distribution-2.3.0/000077500000000000000000000000001265472114500142425ustar00rootroot00000000000000distribution-2.3.0/.drone.yml000066400000000000000000000016261265472114500161570ustar00rootroot00000000000000image: dmp42/go:stable script: # To be spoofed back into the test image - go get github.com/modocache/gover - go get -t ./... # Go fmt - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" # Go lint - test -z "$(golint ./... | tee /dev/stderr)" # Go vet - go vet ./... # Go test - go test -v -race -cover ./... # Helper to concatenate reports - gover # Send to coverall - goveralls -service drone.io -coverprofile=gover.coverprofile -repotoken {{COVERALLS_TOKEN}} # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" # http://labix.org/gocheck notify: email: recipients: - distribution@docker.com slack: team: docker channel: "#dt" username: mom token: {{SLACK_TOKEN}} on_success: true on_failure: true distribution-2.3.0/.gitignore000066400000000000000000000006611265472114500162350ustar00rootroot00000000000000# Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof # never checkin from the bin file (for now) bin/* # Test key files *.pem # Cover profiles *.out # Editor/IDE specific files. *.sublime-project *.sublime-workspace distribution-2.3.0/.mailmap000066400000000000000000000022631265472114500156660ustar00rootroot00000000000000Stephen J Day Stephen Day Stephen J Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland Brian Bland Brian Bland Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern Andrew Meredith Andrew Meredith harche harche Jessie Frazelle Sharif Nassar Sharif Nassar Sven Dowideit Sven Dowideit Vincent Giersch Vincent Giersch davidli davidli distribution-2.3.0/AUTHORS000066400000000000000000000100751265472114500153150ustar00rootroot00000000000000Aaron Lehmann Aaron Vinson Adam Enger Adrian Mouat Ahmet Alp Balkan Alex Chan Alex Elman amitshukla Amy Lindburg Andrew Meredith Andrey Kostov Andy Goldstein Anton Tiurin Antonio Mercado Arnaud Porterie Arthur Baars Avi Miller Ayose Cazorla BadZen Ben Firshman bin liu Brian Bland burnettk Carson A Chris Dillon Daisuke Fujita Darren Shepherd Dave Trombley Dave Tucker David Lawrence David Verhasselt David Xia davidli Dejan Golja Derek McGowan Diogo Mónica DJ Enriquez Donald Huang Doug Davis farmerworking Florentin Raud Frederick F. Kautz IV gabriell nascimento harche Henri Gomez Hu Keping Hua Wang Ian Babrou Jack Griffin Jason Freidman Jeff Nickoloff Jessie Frazelle Jianqing Wang Jon Poler Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez Kelsey Hightower Kenneth Lim Kenny Leung Li Yi Liu Hua Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley Matt Moore Matt Robenolt Michael Prokop Miquel Sabaté Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques Patrick Devine Philip Misiowiec Richard Scothern Rodolfo Carvalho Rusty Conover Sean Boran Sebastiaan van Stijn Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart Stephen J Day Sungho Moon Sven Dowideit Sylvain Baubeau Ted Reed tgic Thomas Sjögren Tianon Gravi Tibor Vass Tonis Tiigi Trevor Pounds Troels Thomsen Vincent Batts Vincent Demeester Vincent Giersch W. Trevor King weiyuan.yl xg.song xiekeyang Yann ROBERT yuzou distribution-2.3.0/CONTRIBUTING.md000066400000000000000000000163001265472114500164730ustar00rootroot00000000000000# Contributing to the registry ## Before reporting an issue... ### If your problem is with... - automated builds - your account on the [Docker Hub](https://hub.docker.com/) - any other [Docker Hub](https://hub.docker.com/) issue Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) ### If you... - need help setting up your registry - can't figure out something - are not sure what's going on or what your problem is Then please do not open an issue here yet - you should first try one of the following support forums: - irc: #docker-distribution on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. - search the bugtracker for an already reported issue ### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - comment if you have some new, technical and relevant information to add to the case - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. ### If you have not found an existing issue that describes your problem: 1. create a new issue, with a succinct title that describes your issue: - bad title: "It doesn't work with my docker" - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" 2. copy the output of: - `docker version` - `docker info` - `docker exec registry -version` 3. copy the command line you used to launch your Registry 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) 5. reproduce your problem and get your docker daemon logs showing the error 6. if relevant, copy your registry logs that show the error 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry ## Contributing a patch for a known bug, or a small correction You should follow the basic GitHub workflow: 1. fork 2. commit a change 3. make sure the tests pass 4. PR Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - sign your commits using `-s`: `git commit -s -m "My commit"` Some simple rules to ensure quick merge: - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - if you need to amend your PR following comments, please squash instead of adding more commits ## Contributing new features You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. Then you should submit your implementation, clearly linking to the issue (and possible proposal). Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. It's mandatory to: - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - address maintainers' comments and modify your submission accordingly - write tests for any new code Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. Have a look at a great, succesful contribution: the [Ceph driver PR](https://github.com/docker/distribution/pull/443) ## Coding Style Unless explicitly stated, we follow all coding guidelines from the Go community. While some of these standards may seem arbitrary, they somehow seem to result in a solid, consistent codebase. It is possible that the code base does not currently comply with these guidelines. We are not looking for a massive PR that fixes this, since that goes against the spirit of the guidelines. All new contributions should make a best effort to clean up and make the code base better than they left it. Obviously, apply your best judgement. Remember, the goal here is to make the code base easier for humans to navigate and understand. Always keep that in mind when nudging others to comply. The rules: 1. All code should be formatted with `gofmt -s`. 2. All code should pass the default levels of [`golint`](https://github.com/golang/lint). 3. All code should follow the guidelines covered in [Effective Go](http://golang.org/doc/effective_go.html) and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments). 4. Comment the code. Tell us the why, the history and the context. 5. Document _all_ declarations and methods, even private ones. Declare expectations, caveats and anything else that may be important. If a type gets exported, having the comments already there will ensure it's ready. 6. Variable name length should be proportional to its context and no longer. `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. In practice, short methods will have short variable names and globals will have longer names. 7. No underscores in package names. If you need a compound name, step back, and re-examine why you need a compound name. If you still think you need a compound name, lose the underscore. 8. No utils or helpers packages. If a function is not general enough to warrant its own package, it has not been written generally enough to be a part of a util package. Just leave it unexported and well-documented. 9. All tests should run with `go test` and outside tooling should not be required. No, we don't need another unit testing framework. Assertion packages are acceptable if they provide _real_ incremental value. 10. Even though we call these "rules" above, they are actually just guidelines. Since you've read all the rules, you now know that. If you are having trouble getting into the mood of idiomatic Go, we recommend reading through [Effective Go](http://golang.org/doc/effective_go.html). The [Go Blog](http://blog.golang.org/) is also a great resource. Drinking the kool-aid is a lot easier than going thirsty. distribution-2.3.0/Dockerfile000066400000000000000000000010631265472114500162340ustar00rootroot00000000000000FROM golang:1.5.3 RUN apt-get update && \ apt-get install -y librados-dev apache2-utils && \ rm -rf /var/lib/apt/lists/* ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH ENV DOCKER_BUILDTAGS include_rados include_oss include_gcs WORKDIR $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml RUN make PREFIX=/go clean binaries VOLUME ["/var/lib/registry"] EXPOSE 5000 ENTRYPOINT ["registry"] CMD ["/etc/docker/registry/config.yml"] distribution-2.3.0/Godeps/000077500000000000000000000000001265472114500154635ustar00rootroot00000000000000distribution-2.3.0/Godeps/Godeps.json000066400000000000000000000114461265472114500176050ustar00rootroot00000000000000{ "ImportPath": "github.com/docker/distribution", "GoVersion": "go1.4.2", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "golang.org/x/oauth2", "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" }, { "ImportPath": "golang.org/x/oauth2/google", "Rev": "8914e5017ca260f2a3a1575b1e6868874050d95e" }, { "ImportPath": "google.golang.org/api/storage/v1", "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" }, { "ImportPath": "google.golang.org/cloud", "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" }, { "ImportPath": "google.golang.org/api", "Rev": "18450f4e95c7e76ce3a5dc3a8cb7178ab6d56121" }, { "ImportPath": "google.golang.org/grpc", "Rev": "91c8b79535eb6045d70ec671d302213f88a3ab95" }, { "ImportPath": "github.com/bradfitz/http2", "Rev": "f8202bc903bda493ebba4aa54922d78430c2c42f" }, { "ImportPath": "github.com/golang/protobuf", "Rev": "0f7a9caded1fb3c9cc5a9b4bcf2ff633cc8ae644" }, { "ImportPath": "google.golang.org/cloud/storage", "Rev": "2400193c85c3561d13880d34e0e10c4315bb02af" }, { "ImportPath": "github.com/AdRoll/goamz/aws", "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" }, { "ImportPath": "github.com/AdRoll/goamz/cloudfront", "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" }, { "ImportPath": "github.com/AdRoll/goamz/s3", "Rev": "aa6e716d710a0c7941cb2075cfbb9661f16d21f1" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc" }, { "ImportPath": "github.com/Sirupsen/logrus", "Comment": "v0.7.3", "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { "ImportPath": "github.com/bugsnag/bugsnag-go", "Comment": "v1.0.2-5-gb1d1530", "Rev": "b1d153021fcd90ca3f080db36bec96dc690fb274" }, { "ImportPath": "github.com/bugsnag/osext", "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" }, { "ImportPath": "github.com/bugsnag/panicwrap", "Rev": "e2c28503fcd0675329da73bf48b33404db873782" }, { "ImportPath": "github.com/denverdino/aliyungo/oss", "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" }, { "ImportPath": "github.com/denverdino/aliyungo/util", "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" }, { "ImportPath": "github.com/denverdino/aliyungo/common", "Rev": "6ffb587da9da6d029d0ce517b85fecc82172d502" }, { "ImportPath": "github.com/docker/libtrust", "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" }, { "ImportPath": "github.com/garyburd/redigo/internal", "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" }, { "ImportPath": "github.com/garyburd/redigo/redis", "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" }, { "ImportPath": "github.com/gorilla/handlers", "Rev": "60c7bfde3e33c201519a200a4507a158cc03a17b" }, { "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, { "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, { "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" }, { "ImportPath": "github.com/ncw/swift", "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" }, { "ImportPath": "github.com/noahdesu/go-ceph/rados", "Comment": "v.0.3.0-29-gb15639c", "Rev": "b15639c44c05368348355229070361395d9152ee" }, { "ImportPath": "github.com/spf13/cobra", "Rev": "312092086bed4968099259622145a0c9ae280064" }, { "ImportPath": "github.com/spf13/pflag", "Rev": "5644820622454e71517561946e3d94b9f9db6842" }, { "ImportPath": "github.com/stevvooe/resumable", "Rev": "51ad44105773cafcbe91927f70ac68e1bf78f8b4" }, { "ImportPath": "github.com/yvasiyarov/go-metrics", "Rev": "57bccd1ccd43f94bb17fdd8bf3007059b802f85e" }, { "ImportPath": "github.com/yvasiyarov/gorelic", "Comment": "v0.0.6-8-ga9bba5b", "Rev": "a9bba5b9ab508a086f9a12b8c51fab68478e2128" }, { "ImportPath": "github.com/yvasiyarov/newrelic_platform_go", "Rev": "b21fdbd4370f3717f3bbd2bf41c223bc273068e6" }, { "ImportPath": "golang.org/x/crypto/bcrypt", "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" }, { "ImportPath": "golang.org/x/crypto/blowfish", "Rev": "c10c31b5e94b6f7a0283272dc2bb27163dcea24b" }, { "ImportPath": "golang.org/x/net/context", "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" }, { "ImportPath": "golang.org/x/net/trace", "Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5" }, { "ImportPath": "gopkg.in/check.v1", "Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673" }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" } ] } distribution-2.3.0/Godeps/Readme000066400000000000000000000002101265472114500165740ustar00rootroot00000000000000This directory tree is generated automatically by godep. Please do not edit. See https://github.com/tools/godep for more information. distribution-2.3.0/LICENSE000066400000000000000000000260751265472114500152610ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. distribution-2.3.0/MAINTAINERS000066400000000000000000000026731265472114500157470ustar00rootroot00000000000000# Distribution maintainers file # # This file describes who runs the docker/distribution project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "aaronlehmann", "dmcgowan", "dmp42", "richardscothern", "shykes", "stevvooe", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.aaronlehmann] Name = "Aaron Lehmann" Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" [people.brianbland] Name = "Brian Bland" Email = "brian.bland@docker.com" GitHub = "BrianBland" [people.dmcgowan] Name = "Derek McGowan" Email = "derek@mcgstyle.net" GitHub = "dmcgowan" [people.dmp42] Name = "Olivier Gambier" Email = "olivier@docker.com" GitHub = "dmp42" [people.richardscothern] Name = "Richard Scothern" Email = "richard.scothern@gmail.com" GitHub = "richardscothern" [people.shykes] Name = "Solomon Hykes" Email = "solomon@docker.com" GitHub = "shykes" [people.stevvooe] Name = "Stephen Day" Email = "stephen.day@docker.com" GitHub = "stevvooe" distribution-2.3.0/Makefile000066400000000000000000000043551265472114500157110ustar00rootroot00000000000000# Set an output prefix, which is the local directory if not specified PREFIX?=$(shell pwd) # Used to populate version variable in main package. VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) # Allow turning off function inlining and variable registerization ifeq (${DISABLE_OPTIMIZATION},true) GO_GCFLAGS=-gcflags "-N -l" VERSION:="$(VERSION)-noopt" endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries .DEFAULT: default all: AUTHORS clean fmt vet fmt lint build test binaries AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ # This only needs to be generated by hand when cutting full releases. version/version.go: ./version/version.sh > $@ ${PREFIX}/bin/registry: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry ${PREFIX}/bin/digest: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest ${PREFIX}/bin/registry-api-descriptor-template: version/version.go $(shell find . -type f -name '*.go') @echo "+ $@" @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template ./bin/registry-api-descriptor-template $< > $@ # Depends on binaries because vet will silently fail if it can't load compiled # imports vet: binaries @echo "+ $@" @go vet ./... fmt: @echo "+ $@" @test -z "$$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" || \ echo "+ please format Go code with 'gofmt -s'" lint: @echo "+ $@" @test -z "$$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" build: @echo "+ $@" @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} ./... test: @echo "+ $@" @go test -test.short -tags "${DOCKER_BUILDTAGS}" ./... test-full: @echo "+ $@" @go test ./... binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template @echo "+ $@" clean: @echo "+ $@" @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/registry-api-descriptor-template" distribution-2.3.0/README.md000066400000000000000000000125571265472114500155330ustar00rootroot00000000000000# Distribution The Docker toolset to pack, ship, store, and deliver content. This repository's main product is the Docker Registry 2.0 implementation for storing and distributing Docker images. It supersedes the [docker/docker-registry](https://github.com/docker/docker-registry) project with a new API design, focused around security and performance. [![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) [![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | | **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | | **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | ### How does this integrate with Docker engine? This project should provide an implementation to a V2 API for use in the [Docker core project](https://github.com/docker/docker). The API should be embeddable and simplify the process of securely pulling and pushing content from `docker` daemons. ### What are the long term goals of the Distribution project? The _Distribution_ project has the further long term goal of providing a secure tool chain for distributing content. The specifications, APIs and tools should be as useful with Docker as they are without. Our goal is to design a professional grade and extensible content distribution system that allow users to: * Enjoy an efficient, secured and reliable way to store, manage, package and exchange content * Hack/roll their own on top of healthy open-source components * Implement their own home made solution through good specs, and solid extensions mechanism. ## More about Registry 2.0 The new registry implementation provides the following benefits: - faster push and pull - new, more efficient implementation - simplified deployment - pluggable storage backend - webhook notifications For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. [Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, if they have a [Docker Hub](https://hub.docker.com/) account. For some users and even companies, this default behavior is sufficient. For others, it is not. For example, users with their own software products may want to maintain a registry for private, company images. Also, you may wish to deploy your own image repository for images used to test or in continuous integration. For these use cases and others, [deploying your own registry instance](docs/deploying.md) may be the better choice. ### Migration to Registry 2.0 For those who have previously deployed their own registry based on the Registry 1.0 implementation and wish to deploy a Registry 2.0 while retaining images, data migration is required. A tool to assist with migration efforts has been created. For more information see [docker/migrator] (https://github.com/docker/migrator). ## Contribute Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute issues, fixes, and patches to this project. If you are contributing code, see the instructions for [building a development environment](docs/building.md). ## Support If any issues are encountered while using the _Distribution_ project, several avenues are available for support:
IRC #docker-distribution on FreeNode
Issue Tracker github.com/docker/distribution/issues
Google Groups https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
Mailing List docker@dockerproject.org
## License This project is distributed under [Apache License, Version 2.0](LICENSE.md). distribution-2.3.0/ROADMAP.md000066400000000000000000000322011265472114500156450ustar00rootroot00000000000000# Roadmap The Distribution Project consists of several components, some of which are still being defined. This document defines the high-level goals of the project, identifies the current components, and defines the release- relationship to the Docker Platform. * [Distribution Goals](#distribution-goals) * [Distribution Components](#distribution-components) * [Project Planning](#project-planning): release-relationship to the Docker Platform. This road map is a living document, providing an overview of the goals and considerations made in respect of the future of the project. ## Distribution Goals - Replace the existing [docker registry](github.com/docker/docker-registry) implementation as the primary implementation. - Replace the existing push and pull code in the docker engine with the distribution package. - Define a strong data model for distributing docker images - Provide a flexible distribution tool kit for use in the docker platform - Unlock new distribution models ## Distribution Components Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming features and bugfixes for a component will be added to the relevant milestone. If a feature or bugfix is not part of a milestone, it is currently unscheduled for implementation. * [Registry](#registry) * [Distribution Package](#distribution-package) *** ### Registry The new Docker registry is the main portion of the distribution repository. Registry 2.0 is the first release of the next-generation registry. This was primarily focused on implementing the [new registry API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), with a focus on security and performance. Following from the Distribution project goals above, we have a set of goals for registry v2 that we would like to follow in the design. New features should be compared against these goals. #### Data Storage and Distribution First The registry's first goal is to provide a reliable, consistent storage location for Docker images. The registry should only provide the minimal amount of indexing required to fetch image data and no more. This means we should be selective in new features and API additions, including those that may require expensive, ever growing indexes. Requests should be servable in "constant time". #### Content Addressability All data objects used in the registry API should be content addressable. Content identifiers should be secure and verifiable. This provides a secure, reliable base from which to build more advanced content distribution systems. #### Content Agnostic In the past, changes to the image format would require large changes in Docker and the Registry. By decoupling the distribution and image format, we can allow the formats to progress without having to coordinate between the two. This means that we should be focused on decoupling Docker from the registry just as much as decoupling the registry from Docker. Such an approach will allow us to unlock new distribution models that haven't been possible before. We can take this further by saying that the new registry should be content agnostic. The registry provides a model of names, tags, manifests and content addresses and that model can be used to work with content. #### Simplicity The new registry should be closer to a microservice component than its predecessor. This means it should have a narrower API and a low number of service dependencies. It should be easy to deploy. This means that other solutions should be explored before changing the API or adding extra dependencies. If functionality is required, can it be added as an extension or companion service. #### Extensibility The registry should provide extension points to add functionality. By keeping the scope narrow, but providing the ability to add functionality. Features like search, indexing, synchronization and registry explorers fall into this category. No such feature should be added unless we've found it impossible to do through an extension. #### Active Feature Discussions The following are feature discussions that are currently active. If you don't see your favorite, unimplemented feature, feel free to contact us via IRC or the mailing list and we can talk about adding it. The goal here is to make sure that new features go through a rigid design process before landing in the registry. ##### Proxying to other Registries A _pull-through caching_ mode exists for the registry, but is restricted from within the docker client to only mirror the official Docker Hub. This functionality can be expanded when image provenance has been specified and implemented in the distribution project. ##### Metadata storage Metadata for the registry is currently stored with the manifest and layer data on the storage backend. While this is a big win for simplicity and reliably maintaining state, it comes with the cost of consistency and high latency. The mutable registry metadata operations should be abstracted behind an API which will allow ACID compliant storage systems to handle metadata. ##### Peer to Peer transfer Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit ##### Indexing, Search and Discovery The original registry provided some implementation of search for use with private registries. Support has been elided from V2 since we'd like to both decouple search functionality from the registry. The makes the registry simpler to deploy, especially in use cases where search is not needed, and let's us decouple the image format from the registry. There are explorations into using the catalog API and notification system to build external indexes. The current line of thought is that we will define a common search API to index and query docker images. Such a system could be run as a companion to a registry or set of registries to power discovery. The main issue with search and discovery is that there are so many ways to accomplish it. There are two aspects to this project. The first is deciding on how it will be done, including an API definition that can work with changing data formats. The second is the process of integrating with `docker search`. We expect that someone attempts to address the problem with the existing tools and propose it as a standard search API or uses it to inform a standardization process. Once this has been explored, we integrate with the docker client. Please see the following for more detail: - https://github.com/docker/distribution/issues/206 ##### Deletes > __NOTE:__ Deletes are a much asked for feature. Before requesting this feature or participating in discussion, we ask that you read this section in full and understand the problems behind deletes. While, at first glance, implementing deleting seems simple, there are a number mitigating factors that make many solutions not ideal or even pathological in the context of a registry. The following paragraph discuss the background and approaches that could be applied to a arrive at a solution. The goal of deletes in any system is to remove unused or unneeded data. Only data requested for deletion should be removed and no other data. Removing unintended data is worse than _not_ removing data that was requested for removal but ideally, both are supported. Generally, according to this rule, we err on holding data longer than needed, ensuring that it is only removed when we can be certain that it can be removed. With the current behavior, we opt to hold onto the data forever, ensuring that data cannot be incorrectly removed. To understand the problems with implementing deletes, one must understand the data model. All registry data is stored in a filesystem layout, implemented on a "storage driver", effectively a _virtual file system_ (VFS). The storage system must assume that this VFS layer will be eventually consistent and has poor read- after-write consistency, since this is the lower common denominator among the storage drivers. This is mitigated by writing values in reverse- dependent order, but makes wider transactional operations unsafe. Layered on the VFS model is a content-addressable _directed, acyclic graph_ (DAG) made up of blobs. Manifests reference layers. Tags reference manifests. Since the same data can be referenced by multiple manifests, we only store data once, even if it is in different repositories. Thus, we have a set of blobs, referenced by tags and manifests. If we want to delete a blob we need to be certain that it is no longer referenced by another manifest or tag. When we delete a manifest, we also can try to delete the referenced blobs. Deciding whether or not a blob has an active reference is the crux of the problem. Conceptually, deleting a manifest and its resources is quite simple. Just find all the manifests, enumerate the referenced blobs and delete the blobs not in that set. An astute observer will recognize this as a garbage collection problem. As with garbage collection in programming languages, this is very simple when one always has a consistent view. When one adds parallelism and an inconsistent view of data, it becomes very challenging. A simple example can demonstrate this. Let's say we are deleting a manifest _A_ in one process. We scan the manifest and decide that all the blobs are ready for deletion. Concurrently, we have another process accepting a new manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ is accepted and all the blobs are considered present, so the operation proceeds. The original process then deletes the referenced blobs, assuming they were unreferenced. The manifest _B_, which we thought had all of its data present, can no longer be served by the registry, since the dependent data has been deleted. Deleting data from the registry safely requires some way to coordinate this operation. The following approaches are being considered: - _Reference Counting_ - Maintain a count of references to each blob. This is challenging for a number of reasons: 1. maintaining a consistent consensus of reference counts across a set of Registries and 2. Building the initial list of reference counts for an existing registry. These challenges can be met with a consensus protocol like Paxos or Raft in the first case and a necessary but simple scan in the second.. - _Lock the World GC_ - Halt all writes to the data store. Walk the data store and find all blob references. Delete all unreferenced blobs. This approach is very simple but requires disabling writes for a period of time while the service reads all data. This is slow and expensive but very accurate and effective. - _Generational GC_ - Do something similar to above but instead of blocking writes, writes are sent to another storage backend while reads are broadcast to the new and old backends. GC is then performed on the read-only portion. Because writes land in the new backend, the data in the read-only section can be safely deleted. The main drawbacks of this approach are complexity and coordination. - _Centralized Oracle_ - Using a centralized, transactional database, we can know exactly which data is referenced at any given time. This avoids coordination problem by managing this data in a single location. We trade off metadata scalability for simplicity and performance. This is a very good option for most registry deployments. This would create a bottleneck for registry metadata. However, metadata is generally not the main bottleneck when serving images. Please let us know if other solutions exist that we have yet to enumerate. Note that for any approach, implementation is a massive consideration. For example, a mark-sweep based solution may seem simple but the amount of work in coordination offset the extra work it might take to build a _Centralized Oracle_. We'll accept proposals for any solution but please coordinate with us before dropping code. At this time, we have traded off simplicity and ease of deployment for disk space. Simplicity and ease of deployment tend to reduce developer involvement, which is currently the most expensive resource in software engineering. Taking on any solution for deletes will greatly effect these factors, trading off very cheap disk space for a complex deployment and operational story. Please see the following issues for more detail: - https://github.com/docker/distribution/issues/422 - https://github.com/docker/distribution/issues/461 - https://github.com/docker/distribution/issues/462 ### Distribution Package At its core, the Distribution Project is a set of Go packages that make up Distribution Components. At this time, most of these packages make up the Registry implementation. The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. For feature additions, please see the Registry section. In the future, we may break out a separate Roadmap for distribution-specific features that apply to more than just the registry. *** ### Project Planning An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. distribution-2.3.0/blobs.go000066400000000000000000000211451265472114500156750ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "io" "net/http" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" ) var ( // ErrBlobExists returned when blob already exists ErrBlobExists = errors.New("blob exists") // ErrBlobDigestUnsupported when blob digest is an unsupported version. ErrBlobDigestUnsupported = errors.New("unsupported blob digest") // ErrBlobUnknown when blob is not found. ErrBlobUnknown = errors.New("unknown blob") // ErrBlobUploadUnknown returned when upload is not found. ErrBlobUploadUnknown = errors.New("blob upload unknown") // ErrBlobInvalidLength returned when the blob has an expected length on // commit, meaning mismatched with the descriptor or an invalid value. ErrBlobInvalidLength = errors.New("blob invalid length") ) // ErrBlobInvalidDigest returned when digest check fails. type ErrBlobInvalidDigest struct { Digest digest.Digest Reason error } func (err ErrBlobInvalidDigest) Error() string { return fmt.Sprintf("invalid digest for referenced layer: %v, %v", err.Digest, err.Reason) } // ErrBlobMounted returned when a blob is mounted from another repository // instead of initiating an upload session. type ErrBlobMounted struct { From reference.Canonical Descriptor Descriptor } func (err ErrBlobMounted) Error() string { return fmt.Sprintf("blob mounted from: %v to: %v", err.From, err.Descriptor) } // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should // only be added but never changed. type Descriptor struct { // MediaType describe the type of the content. All text based formats are // encoded as utf-8. MediaType string `json:"mediaType,omitempty"` // Size in bytes of content. Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified // against against this digest. Digest digest.Digest `json:"digest,omitempty"` // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } // Descriptor returns the descriptor, to make it satisfy the Describable // interface. Note that implementations of Describable are generally objects // which can be described, not simply descriptors; this exception is in place // to make it more convenient to pass actual descriptors to functions that // expect Describable objects. func (d Descriptor) Descriptor() Descriptor { return d } // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. type BlobStatter interface { // Stat provides metadata about a blob identified by the digest. If the // blob is unknown to the describer, ErrBlobUnknown will be returned. Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) } // BlobDeleter enables deleting blobs from storage. type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is // left off of BlobService and BlobStore. type BlobDescriptorService interface { BlobStatter // SetDescriptor assigns the descriptor to the digest. The provided digest and // the digest in the descriptor must map to identical content but they may // differ on their algorithm. The descriptor must have the canonical // digest of the content and the digest algorithm must match the // annotators canonical algorithm. // // Such a facility can be used to map blobs between digest domains, with // the restriction that the algorithm of the descriptor must match the // canonical algorithm (ie sha256) of the annotator. SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error // Clear enables descriptors to be unlinked Clear(ctx context.Context, dgst digest.Digest) error } // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // BlobProvider describes operations for getting blob data. type BlobProvider interface { // Get returns the entire blob identified by digest along with the descriptor. Get(ctx context.Context, dgst digest.Digest) ([]byte, error) // Open provides a ReadSeekCloser to the blob identified by the provided // descriptor. If the blob is not known to the service, an error will be // returned. Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) } // BlobServer can serve blobs via http. type BlobServer interface { // ServeBlob attempts to serve the blob, identifed by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // // This handler only issues successful responses, such as 2xx or 3xx, // meaning it serves data or issues a redirect. If the blob is not // available, an error will be returned and the caller may still issue a // response. // // The implementation may serve the same blob from a different digest // domain. The appropriate headers will be set for the blob, unless they // have already been set by the caller. ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error } // BlobIngester ingests blob data. type BlobIngester interface { // Put inserts the content p into the blob service, returning a descriptor // or an error. Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) // Create allocates a new blob writer to add a blob to this service. The // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } // BlobCreateOption is a general extensible function argument for blob creation // methods. A BlobIngester may choose to honor any or none of the given // BlobCreateOptions, which can be specific to the implementation of the // BlobIngester receiving them. // TODO (brianbland): unify this with ManifestServiceOption in the future type BlobCreateOption interface { Apply(interface{}) error } // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { io.WriteSeeker io.ReaderFrom io.Closer // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. ID() string // StartedAt returns the time this blob write was started. StartedAt() time.Time // Commit completes the blob writer process. The content is verified // against the provided provisional descriptor, which may result in an // error. Depending on the implementation, written data may be validated // against the provisional descriptor fields. If MediaType is not present, // the implementation may reject the commit or assign "application/octet- // stream" to the blob. The returned descriptor may have a different // digest depending on the blob store, referred to as the canonical // descriptor. Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) // Cancel ends the blob write without storing any data and frees any // associated resources. Any data written thus far will be lost. Cancel // implementations should allow multiple calls even after a commit that // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error // Get a reader to the blob being written by this BlobWriter Reader() (io.ReadCloser, error) } // BlobService combines the operations to access, read and write blobs. This // can be used to describe remote blob services. type BlobService interface { BlobStatter BlobProvider BlobIngester } // BlobStore represent the entire suite of blob related operations. Such an // implementation can access, read, write, delete and serve blobs. type BlobStore interface { BlobService BlobServer BlobDeleter } distribution-2.3.0/circle.yml000066400000000000000000000056311265472114500162330ustar00rootroot00000000000000# Pony-up! machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) # Install ceph to test rados driver & create pool - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - ceph osd pool create docker-distribution 1 # Install codecov for coverage - pip install --user codecov post: # go - gvm install go1.5.3 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" # Ceph config RADOS_POOL: "docker-distribution" hosts: # Not used yet fancy: 127.0.0.1 dependencies: pre: # Copy the code to the gopath of all go versions - > gvm use stable && mkdir -p "$(dirname $BASE_STABLE)" && cp -R "$CHECKOUT" "$BASE_STABLE" override: # Install dependencies for every copied clone/go version - gvm use stable && go get github.com/tools/godep: pwd: $BASE_STABLE post: # For the stable go version, additionally install linting tools - > gvm use stable && go get github.com/axw/gocov/gocov github.com/golang/lint/golint test: pre: # Output the go versions we are going to test # - gvm use old && go version - gvm use stable && go version # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). - gvm use stable && godep go install ./...: pwd: $BASE_STABLE # FMT - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE # VET - gvm use stable && go vet ./...: pwd: $BASE_STABLE # LINT - gvm use stable && test -z "$(golint ./... | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE override: # Test stable, and report - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': timeout: 600 pwd: $BASE_STABLE post: # Report to codecov - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE ## Notes # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" # http://labix.org/gocheck distribution-2.3.0/cmd/000077500000000000000000000000001265472114500150055ustar00rootroot00000000000000distribution-2.3.0/cmd/digest/000077500000000000000000000000001265472114500162645ustar00rootroot00000000000000distribution-2.3.0/cmd/digest/main.go000066400000000000000000000033161265472114500175420ustar00rootroot00000000000000package main import ( "flag" "fmt" "io" "log" "os" "github.com/docker/distribution/digest" "github.com/docker/distribution/version" ) var ( algorithm = digest.Canonical showVersion bool ) type job struct { name string reader io.Reader } func init() { flag.Var(&algorithm, "a", "select the digest algorithm (shorthand)") flag.Var(&algorithm, "algorithm", "select the digest algorithm") flag.BoolVar(&showVersion, "version", false, "show the version and exit") log.SetFlags(0) log.SetPrefix(os.Args[0] + ": ") } func usage() { fmt.Fprintf(os.Stderr, "usage: %s [files...]\n", os.Args[0]) fmt.Fprintf(os.Stderr, ` Calculate the digest of one or more input files, emitting the result to standard out. If no files are provided, the digest of stdin will be calculated. `) flag.PrintDefaults() } func unsupported() { log.Fatalf("unsupported digest algorithm: %v", algorithm) } func main() { var jobs []job flag.Usage = usage flag.Parse() if showVersion { version.PrintVersion() return } var fail bool // if we fail on one item, foul the exit code if flag.NArg() > 0 { for _, path := range flag.Args() { fp, err := os.Open(path) if err != nil { log.Printf("%s: %v", path, err) fail = true continue } defer fp.Close() jobs = append(jobs, job{name: path, reader: fp}) } } else { // just read stdin jobs = append(jobs, job{name: "-", reader: os.Stdin}) } digestFn := algorithm.FromReader if !algorithm.Available() { unsupported() } for _, job := range jobs { dgst, err := digestFn(job.reader) if err != nil { log.Printf("%s: %v", job.name, err) fail = true continue } fmt.Printf("%v\t%s\n", dgst, job.name) } if fail { os.Exit(1) } } distribution-2.3.0/cmd/registry-api-descriptor-template/000077500000000000000000000000001265472114500234115ustar00rootroot00000000000000distribution-2.3.0/cmd/registry-api-descriptor-template/main.go000066400000000000000000000053531265472114500246720ustar00rootroot00000000000000// registry-api-descriptor-template uses the APIDescriptor defined in the // api/v2 package to execute templates passed to the command line. // // For example, to generate a new API specification, one would execute the // following command from the repo root: // // $ registry-api-descriptor-template docs/spec/api.md.tmpl > docs/spec/api.md // // The templates are passed in the api/v2.APIDescriptor object. Please see the // package documentation for fields available on that object. The template // syntax is from Go's standard library text/template package. For information // on Go's template syntax, please see golang.org/pkg/text/template. package main import ( "log" "net/http" "os" "path/filepath" "regexp" "text/template" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" ) var spaceRegex = regexp.MustCompile(`\n\s*`) func main() { if len(os.Args) != 2 { log.Fatalln("please specify a template to execute.") } path := os.Args[1] filename := filepath.Base(path) funcMap := template.FuncMap{ "removenewlines": func(s string) string { return spaceRegex.ReplaceAllString(s, " ") }, "statustext": http.StatusText, "prettygorilla": prettyGorillaMuxPath, } tmpl := template.Must(template.New(filename).Funcs(funcMap).ParseFiles(path)) data := struct { RouteDescriptors []v2.RouteDescriptor ErrorDescriptors []errcode.ErrorDescriptor }{ RouteDescriptors: v2.APIDescriptor.RouteDescriptors, ErrorDescriptors: append(errcode.GetErrorCodeGroup("registry.api.v2"), // The following are part of the specification but provided by errcode default. errcode.ErrorCodeUnauthorized.Descriptor(), errcode.ErrorCodeDenied.Descriptor(), errcode.ErrorCodeUnsupported.Descriptor()), } if err := tmpl.Execute(os.Stdout, data); err != nil { log.Fatalln(err) } } // prettyGorillaMuxPath removes the regular expressions from a gorilla/mux // route string, making it suitable for documentation. func prettyGorillaMuxPath(s string) string { // Stateful parser that removes regular expressions from gorilla // routes. It correctly handles balanced bracket pairs. var output string var label string var level int start: if s[0] == '{' { s = s[1:] level++ goto capture } output += string(s[0]) s = s[1:] goto end capture: switch s[0] { case '{': level++ case '}': level-- if level == 0 { s = s[1:] goto label } case ':': s = s[1:] goto skip default: label += string(s[0]) } s = s[1:] goto capture skip: switch s[0] { case '{': level++ case '}': level-- } s = s[1:] if level == 0 { goto label } goto skip label: if label != "" { output += "<" + label + ">" label = "" } end: if s != "" { goto start } return output } distribution-2.3.0/cmd/registry/000077500000000000000000000000001265472114500166555ustar00rootroot00000000000000distribution-2.3.0/cmd/registry/config-cache.yml000066400000000000000000000022471265472114500217130ustar00rootroot00000000000000version: 0.1 log: level: debug fields: service: registry environment: development storage: cache: blobdescriptor: redis filesystem: rootdirectory: /var/lib/registry-cache maintenance: uploadpurging: enabled: false http: addr: :5000 secret: asecretforlocaldevelopment debug: addr: localhost:5001 headers: X-Content-Type-Options: [nosniff] redis: addr: localhost:6379 pool: maxidle: 16 maxactive: 64 idletimeout: 300s dialtimeout: 10ms readtimeout: 10ms writetimeout: 10ms notifications: endpoints: - name: local-8082 url: http://localhost:5003/callback headers: Authorization: [Bearer ] timeout: 1s threshold: 10 backoff: 1s disabled: true - name: local-8083 url: http://localhost:8083/callback timeout: 1s threshold: 10 backoff: 1s disabled: true proxy: remoteurl: https://registry-1.docker.io username: username password: password health: storagedriver: enabled: true interval: 10s threshold: 3 distribution-2.3.0/cmd/registry/config-dev.yml000066400000000000000000000025551265472114500214300ustar00rootroot00000000000000version: 0.1 log: level: debug fields: service: registry environment: development hooks: - type: mail disabled: true levels: - panic options: smtp: addr: mail.example.com:25 username: mailuser password: password insecure: true from: sender@example.com to: - errors@example.com storage: delete: enabled: true cache: blobdescriptor: redis filesystem: rootdirectory: /var/lib/registry maintenance: uploadpurging: enabled: false http: addr: :5000 debug: addr: localhost:5001 headers: X-Content-Type-Options: [nosniff] redis: addr: localhost:6379 pool: maxidle: 16 maxactive: 64 idletimeout: 300s dialtimeout: 10ms readtimeout: 10ms writetimeout: 10ms notifications: endpoints: - name: local-5003 url: http://localhost:5003/callback headers: Authorization: [Bearer ] timeout: 1s threshold: 10 backoff: 1s disabled: true - name: local-8083 url: http://localhost:8083/callback timeout: 1s threshold: 10 backoff: 1s disabled: true health: storagedriver: enabled: true interval: 10s threshold: 3 distribution-2.3.0/cmd/registry/config-example.yml000066400000000000000000000004731265472114500223020ustar00rootroot00000000000000version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 distribution-2.3.0/cmd/registry/main.go000066400000000000000000000016011265472114500201260ustar00rootroot00000000000000package main import ( _ "net/http/pprof" "github.com/docker/distribution/registry" _ "github.com/docker/distribution/registry/auth/htpasswd" _ "github.com/docker/distribution/registry/auth/silly" _ "github.com/docker/distribution/registry/auth/token" _ "github.com/docker/distribution/registry/proxy" _ "github.com/docker/distribution/registry/storage/driver/azure" _ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/gcs" _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" _ "github.com/docker/distribution/registry/storage/driver/oss" _ "github.com/docker/distribution/registry/storage/driver/s3" _ "github.com/docker/distribution/registry/storage/driver/swift" ) func main() { registry.Cmd.Execute() } distribution-2.3.0/cmd/registry/rados.go000066400000000000000000000001571265472114500203170ustar00rootroot00000000000000// +build include_rados package main import _ "github.com/docker/distribution/registry/storage/driver/rados" distribution-2.3.0/configuration/000077500000000000000000000000001265472114500171115ustar00rootroot00000000000000distribution-2.3.0/configuration/configuration.go000066400000000000000000000453661265472114500223250ustar00rootroot00000000000000package configuration import ( "fmt" "io" "io/ioutil" "net/http" "reflect" "strings" "time" ) // Configuration is a versioned registry configuration, intended to be provided by a yaml file, and // optionally modified by environment variables. // // Note that yaml field names should never include _ characters, since this is the separator used // in environment variable names. type Configuration struct { // Version is the version which defines the format of the rest of the configuration Version Version `yaml:"version"` // Log supports setting various parameters related to the logging // subsystem. Log struct { // Level is the granularity at which registry operations are logged. Level Loglevel `yaml:"level"` // Formatter overrides the default formatter with another. Options // include "text", "json" and "logstash". Formatter string `yaml:"formatter,omitempty"` // Fields allows users to specify static string fields to include in // the logger context. Fields map[string]interface{} `yaml:"fields,omitempty"` // Hooks allows users to configurate the log hooks, to enabling the // sequent handling behavior, when defined levels of log message emit. Hooks []LogHook `yaml:"hooks,omitempty"` } // Loglevel is the level at which registry operations are logged. This is // deprecated. Please use Log.Level in the future. Loglevel Loglevel `yaml:"loglevel,omitempty"` // Storage is the configuration for the registry's storage driver Storage Storage `yaml:"storage"` // Auth allows configuration of various authorization methods that may be // used to gate requests. Auth Auth `yaml:"auth,omitempty"` // Middleware lists all middlewares to be used by the registry. Middleware map[string][]Middleware `yaml:"middleware,omitempty"` // Reporting is the configuration for error reporting Reporting Reporting `yaml:"reporting,omitempty"` // HTTP contains configuration parameters for the registry's http // interface. HTTP struct { // Addr specifies the bind address for the registry instance. Addr string `yaml:"addr,omitempty"` // Net specifies the net portion of the bind address. A default empty value means tcp. Net string `yaml:"net,omitempty"` // Host specifies an externally-reachable address for the registry, as a fully // qualified URL. Host string `yaml:"host,omitempty"` Prefix string `yaml:"prefix,omitempty"` // Secret specifies the secret key which HMAC tokens are created with. Secret string `yaml:"secret,omitempty"` // TLS instructs the http server to listen with a TLS configuration. // This only support simple tls configuration with a cert and key. // Mostly, this is useful for testing situations or simple deployments // that require tls. If more complex configurations are required, use // a proxy or make a proposal to add support here. TLS struct { // Certificate specifies the path to an x509 certificate file to // be used for TLS. Certificate string `yaml:"certificate,omitempty"` // Key specifies the path to the x509 key file, which should // contain the private portion for the file specified in // Certificate. Key string `yaml:"key,omitempty"` // Specifies the CA certs for client authentication // A file may contain multiple CA certificates encoded as PEM ClientCAs []string `yaml:"clientcas,omitempty"` } `yaml:"tls,omitempty"` // Headers is a set of headers to include in HTTP responses. A common // use case for this would be security headers such as // Strict-Transport-Security. The map keys are the header names, and // the values are the associated header payloads. Headers http.Header `yaml:"headers,omitempty"` // Debug configures the http debug interface, if specified. This can // include services such as pprof, expvar and other data that should // not be exposed externally. Left disabled by default. Debug struct { // Addr specifies the bind address for the debug server. Addr string `yaml:"addr,omitempty"` } `yaml:"debug,omitempty"` } `yaml:"http,omitempty"` // Notifications specifies configuration about various endpoint to which // registry events are dispatched. Notifications Notifications `yaml:"notifications,omitempty"` // Redis configures the redis pool available to the registry webapp. Redis struct { // Addr specifies the the redis instance available to the application. Addr string `yaml:"addr,omitempty"` // Password string to use when making a connection. Password string `yaml:"password,omitempty"` // DB specifies the database to connect to on the redis instance. DB int `yaml:"db,omitempty"` DialTimeout time.Duration `yaml:"dialtimeout,omitempty"` // timeout for connect ReadTimeout time.Duration `yaml:"readtimeout,omitempty"` // timeout for reads of data WriteTimeout time.Duration `yaml:"writetimeout,omitempty"` // timeout for writes of data // Pool configures the behavior of the redis connection pool. Pool struct { // MaxIdle sets the maximum number of idle connections. MaxIdle int `yaml:"maxidle,omitempty"` // MaxActive sets the maximum number of connections that should be // opened before blocking a connection request. MaxActive int `yaml:"maxactive,omitempty"` // IdleTimeout sets the amount time to wait before closing // inactive connections. IdleTimeout time.Duration `yaml:"idletimeout,omitempty"` } `yaml:"pool,omitempty"` } `yaml:"redis,omitempty"` Health Health `yaml:"health,omitempty"` Proxy Proxy `yaml:"proxy,omitempty"` } // LogHook is composed of hook Level and Type. // After hooks configuration, it can execute the next handling automatically, // when defined levels of log message emitted. // Example: hook can sending an email notification when error log happens in app. type LogHook struct { // Disable lets user select to enable hook or not. Disabled bool `yaml:"disabled,omitempty"` // Type allows user to select which type of hook handler they want. Type string `yaml:"type,omitempty"` // Levels set which levels of log message will let hook executed. Levels []string `yaml:"levels,omitempty"` // MailOptions allows user to configurate email parameters. MailOptions MailOptions `yaml:"options,omitempty"` } // MailOptions provides the configuration sections to user, for specific handler. type MailOptions struct { SMTP struct { // Addr defines smtp host address Addr string `yaml:"addr,omitempty"` // Username defines user name to smtp host Username string `yaml:"username,omitempty"` // Password defines password of login user Password string `yaml:"password,omitempty"` // Insecure defines if smtp login skips the secure cerification. Insecure bool `yaml:"insecure,omitempty"` } `yaml:"smtp,omitempty"` // From defines mail sending address From string `yaml:"from,omitempty"` // To defines mail receiving address To []string `yaml:"to,omitempty"` } // FileChecker is a type of entry in the health section for checking files. type FileChecker struct { // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` // File is the path to check File string `yaml:"file,omitempty"` // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` } // HTTPChecker is a type of entry in the health section for checking HTTP URIs. type HTTPChecker struct { // Timeout is the duration to wait before timing out the HTTP request Timeout time.Duration `yaml:"interval,omitempty"` // StatusCode is the expected status code StatusCode int // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` // URI is the HTTP URI to check URI string `yaml:"uri,omitempty"` // Headers lists static headers that should be added to all requests Headers http.Header `yaml:"headers"` // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` } // TCPChecker is a type of entry in the health section for checking TCP servers. type TCPChecker struct { // Timeout is the duration to wait before timing out the TCP connection Timeout time.Duration `yaml:"interval,omitempty"` // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` // Addr is the TCP address to check Addr string `yaml:"addr,omitempty"` // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` } // Health provides the configuration section for health checks. type Health struct { // FileCheckers is a list of paths to check FileCheckers []FileChecker `yaml:"file,omitempty"` // HTTPCheckers is a list of URIs to check HTTPCheckers []HTTPChecker `yaml:"http,omitempty"` // TCPCheckers is a list of URIs to check TCPCheckers []TCPChecker `yaml:"tcp,omitempty"` // StorageDriver configures a health check on the configured storage // driver StorageDriver struct { // Enabled turns on the health check for the storage driver Enabled bool `yaml:"enabled,omitempty"` // Interval is the duration in between checks Interval time.Duration `yaml:"interval,omitempty"` // Threshold is the number of times a check must fail to trigger an // unhealthy state Threshold int `yaml:"threshold,omitempty"` } `yaml:"storagedriver,omitempty"` } // v0_1Configuration is a Version 0.1 Configuration struct // This is currently aliased to Configuration, as it is the current version type v0_1Configuration Configuration // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent uints func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { var versionString string err := unmarshal(&versionString) if err != nil { return err } newVersion := Version(versionString) if _, err := newVersion.major(); err != nil { return err } if _, err := newVersion.minor(); err != nil { return err } *version = newVersion return nil } // CurrentVersion is the most recent Version that can be parsed var CurrentVersion = MajorMinorVersion(0, 1) // Loglevel is the level at which operations are logged // This can be error, warn, info, or debug type Loglevel string // UnmarshalYAML implements the yaml.Umarshaler interface // Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a // valid loglevel func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error { var loglevelString string err := unmarshal(&loglevelString) if err != nil { return err } loglevelString = strings.ToLower(loglevelString) switch loglevelString { case "error", "warn", "info", "debug": default: return fmt.Errorf("Invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString) } *loglevel = Loglevel(loglevelString) return nil } // Parameters defines a key-value parameters mapping type Parameters map[string]interface{} // Storage defines the configuration for registry object storage type Storage map[string]Parameters // Type returns the storage driver type, such as filesystem or s3 func (storage Storage) Type() string { var storageType []string // Return only key in this map for k := range storage { switch k { case "maintenance": // allow configuration of maintenance case "cache": // allow configuration of caching case "delete": // allow configuration of delete case "redirect": // allow configuration of redirect default: storageType = append(storageType, k) } } if len(storageType) > 1 { panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", ")) } if len(storageType) == 1 { return storageType[0] } return "" } // Parameters returns the Parameters map for a Storage configuration func (storage Storage) Parameters() Parameters { return storage[storage.Type()] } // setParameter changes the parameter at the provided key to the new value func (storage Storage) setParameter(key string, value interface{}) { storage[storage.Type()][key] = value } // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a single item map into a Storage or a string into a Storage type with no parameters func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error { var storageMap map[string]Parameters err := unmarshal(&storageMap) if err == nil { if len(storageMap) > 1 { types := make([]string, 0, len(storageMap)) for k := range storageMap { switch k { case "maintenance": // allow for configuration of maintenance case "cache": // allow configuration of caching case "delete": // allow configuration of delete case "redirect": // allow configuration of redirect default: types = append(types, k) } } if len(types) > 1 { return fmt.Errorf("Must provide exactly one storage type. Provided: %v", types) } } *storage = storageMap return nil } var storageType string err = unmarshal(&storageType) if err == nil { *storage = Storage{storageType: Parameters{}} return nil } return err } // MarshalYAML implements the yaml.Marshaler interface func (storage Storage) MarshalYAML() (interface{}, error) { if storage.Parameters() == nil { return storage.Type(), nil } return map[string]Parameters(storage), nil } // Auth defines the configuration for registry authorization. type Auth map[string]Parameters // Type returns the storage driver type, such as filesystem or s3 func (auth Auth) Type() string { // Return only key in this map for k := range auth { return k } return "" } // Parameters returns the Parameters map for an Auth configuration func (auth Auth) Parameters() Parameters { return auth[auth.Type()] } // setParameter changes the parameter at the provided key to the new value func (auth Auth) setParameter(key string, value interface{}) { auth[auth.Type()][key] = value } // UnmarshalYAML implements the yaml.Unmarshaler interface // Unmarshals a single item map into a Storage or a string into a Storage type with no parameters func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error { var m map[string]Parameters err := unmarshal(&m) if err == nil { if len(m) > 1 { types := make([]string, 0, len(m)) for k := range m { types = append(types, k) } // TODO(stevvooe): May want to change this slightly for // authorization to allow multiple challenges. return fmt.Errorf("must provide exactly one type. Provided: %v", types) } *auth = m return nil } var authType string err = unmarshal(&authType) if err == nil { *auth = Auth{authType: Parameters{}} return nil } return err } // MarshalYAML implements the yaml.Marshaler interface func (auth Auth) MarshalYAML() (interface{}, error) { if auth.Parameters() == nil { return auth.Type(), nil } return map[string]Parameters(auth), nil } // Notifications configures multiple http endpoints. type Notifications struct { // Endpoints is a list of http configurations for endpoints that // respond to webhook notifications. In the future, we may allow other // kinds of endpoints, such as external queues. Endpoints []Endpoint `yaml:"endpoints,omitempty"` } // Endpoint describes the configuration of an http webhook notification // endpoint. type Endpoint struct { Name string `yaml:"name"` // identifies the endpoint in the registry instance. Disabled bool `yaml:"disabled"` // disables the endpoint URL string `yaml:"url"` // post url for the endpoint. Headers http.Header `yaml:"headers"` // static headers that should be added to all requests Timeout time.Duration `yaml:"timeout"` // HTTP timeout Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure Backoff time.Duration `yaml:"backoff"` // backoff duration } // Reporting defines error reporting methods. type Reporting struct { // Bugsnag configures error reporting for Bugsnag (bugsnag.com). Bugsnag BugsnagReporting `yaml:"bugsnag,omitempty"` // NewRelic configures error reporting for NewRelic (newrelic.com) NewRelic NewRelicReporting `yaml:"newrelic,omitempty"` } // BugsnagReporting configures error reporting for Bugsnag (bugsnag.com). type BugsnagReporting struct { // APIKey is the Bugsnag api key. APIKey string `yaml:"apikey,omitempty"` // ReleaseStage tracks where the registry is deployed. // Examples: production, staging, development ReleaseStage string `yaml:"releasestage,omitempty"` // Endpoint is used for specifying an enterprise Bugsnag endpoint. Endpoint string `yaml:"endpoint,omitempty"` } // NewRelicReporting configures error reporting for NewRelic (newrelic.com) type NewRelicReporting struct { // LicenseKey is the NewRelic user license key LicenseKey string `yaml:"licensekey,omitempty"` // Name is the component name of the registry in NewRelic Name string `yaml:"name,omitempty"` // Verbose configures debug output to STDOUT Verbose bool `yaml:"verbose,omitempty"` } // Middleware configures named middlewares to be applied at injection points. type Middleware struct { // Name the middleware registers itself as Name string `yaml:"name"` // Flag to disable middleware easily Disabled bool `yaml:"disabled,omitempty"` // Map of parameters that will be passed to the middleware's initialization function Options Parameters `yaml:"options"` } // Proxy configures the registry as a pull through cache type Proxy struct { // RemoteURL is the URL of the remote registry RemoteURL string `yaml:"remoteurl"` // Username of the hub user Username string `yaml:"username"` // Password of the hub user Password string `yaml:"password"` } // Parse parses an input configuration yaml document into a Configuration struct // This should generally be capable of handling old configuration format versions // // Environment variables may be used to override configuration parameters other than version, // following the scheme below: // Configuration.Abc may be replaced by the value of REGISTRY_ABC, // Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth func Parse(rd io.Reader) (*Configuration, error) { in, err := ioutil.ReadAll(rd) if err != nil { return nil, err } p := NewParser("registry", []VersionedParseInfo{ { Version: MajorMinorVersion(0, 1), ParseAs: reflect.TypeOf(v0_1Configuration{}), ConversionFunc: func(c interface{}) (interface{}, error) { if v0_1, ok := c.(*v0_1Configuration); ok { if v0_1.Loglevel == Loglevel("") { v0_1.Loglevel = Loglevel("info") } if v0_1.Storage.Type() == "" { return nil, fmt.Errorf("No storage configuration provided") } return (*Configuration)(v0_1), nil } return nil, fmt.Errorf("Expected *v0_1Configuration, received %#v", c) }, }, }) config := new(Configuration) err = p.Parse(in, config) if err != nil { return nil, err } return config, nil } distribution-2.3.0/configuration/configuration_test.go000066400000000000000000000411201265472114500233440ustar00rootroot00000000000000package configuration import ( "bytes" "net/http" "os" "reflect" "strings" "testing" . "gopkg.in/check.v1" "gopkg.in/yaml.v2" ) // Hook up gocheck into the "go test" runner func Test(t *testing.T) { TestingT(t) } // configStruct is a canonical example configuration, which should map to configYamlV0_1 var configStruct = Configuration{ Version: "0.1", Log: struct { Level Loglevel `yaml:"level"` Formatter string `yaml:"formatter,omitempty"` Fields map[string]interface{} `yaml:"fields,omitempty"` Hooks []LogHook `yaml:"hooks,omitempty"` }{ Fields: map[string]interface{}{"environment": "test"}, }, Loglevel: "info", Storage: Storage{ "s3": Parameters{ "region": "us-east-1", "bucket": "my-bucket", "rootdirectory": "/registry", "encrypt": true, "secure": false, "accesskey": "SAMPLEACCESSKEY", "secretkey": "SUPERSECRET", "host": nil, "port": 42, }, }, Auth: Auth{ "silly": Parameters{ "realm": "silly", "service": "silly", }, }, Reporting: Reporting{ Bugsnag: BugsnagReporting{ APIKey: "BugsnagApiKey", }, }, Notifications: Notifications{ Endpoints: []Endpoint{ { Name: "endpoint-1", URL: "http://example.com", Headers: http.Header{ "Authorization": []string{"Bearer "}, }, }, }, }, HTTP: struct { Addr string `yaml:"addr,omitempty"` Net string `yaml:"net,omitempty"` Host string `yaml:"host,omitempty"` Prefix string `yaml:"prefix,omitempty"` Secret string `yaml:"secret,omitempty"` TLS struct { Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` } `yaml:"tls,omitempty"` Headers http.Header `yaml:"headers,omitempty"` Debug struct { Addr string `yaml:"addr,omitempty"` } `yaml:"debug,omitempty"` }{ TLS: struct { Certificate string `yaml:"certificate,omitempty"` Key string `yaml:"key,omitempty"` ClientCAs []string `yaml:"clientcas,omitempty"` }{ ClientCAs: []string{"/path/to/ca.pem"}, }, Headers: http.Header{ "X-Content-Type-Options": []string{"nosniff"}, }, }, } // configYamlV0_1 is a Version 0.1 yaml document representing configStruct var configYamlV0_1 = ` version: 0.1 log: fields: environment: test loglevel: info storage: s3: region: us-east-1 bucket: my-bucket rootdirectory: /registry encrypt: true secure: false accesskey: SAMPLEACCESSKEY secretkey: SUPERSECRET host: ~ port: 42 auth: silly: realm: silly service: silly notifications: endpoints: - name: endpoint-1 url: http://example.com headers: Authorization: [Bearer ] reporting: bugsnag: apikey: BugsnagApiKey http: clientcas: - /path/to/ca.pem headers: X-Content-Type-Options: [nosniff] ` // inmemoryConfigYamlV0_1 is a Version 0.1 yaml document specifying an inmemory // storage driver with no parameters var inmemoryConfigYamlV0_1 = ` version: 0.1 loglevel: info storage: inmemory auth: silly: realm: silly service: silly notifications: endpoints: - name: endpoint-1 url: http://example.com headers: Authorization: [Bearer ] http: headers: X-Content-Type-Options: [nosniff] ` type ConfigSuite struct { expectedConfig *Configuration } var _ = Suite(new(ConfigSuite)) func (suite *ConfigSuite) SetUpTest(c *C) { os.Clearenv() suite.expectedConfig = copyConfig(configStruct) } // TestMarshalRoundtrip validates that configStruct can be marshaled and // unmarshaled without changing any parameters func (suite *ConfigSuite) TestMarshalRoundtrip(c *C) { configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) config, err := Parse(bytes.NewReader(configBytes)) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseSimple validates that configYamlV0_1 can be parsed into a struct // matching configStruct func (suite *ConfigSuite) TestParseSimple(c *C) { config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseInmemory validates that configuration yaml with storage provided as // a string can be parsed into a Configuration struct with no storage parameters func (suite *ConfigSuite) TestParseInmemory(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} suite.expectedConfig.Reporting = Reporting{} suite.expectedConfig.Log.Fields = nil config, err := Parse(bytes.NewReader([]byte(inmemoryConfigYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseIncomplete validates that an incomplete yaml configuration cannot // be parsed without providing environment variables to fill in the missing // components. func (suite *ConfigSuite) TestParseIncomplete(c *C) { incompleteConfigYaml := "version: 0.1" _, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) c.Assert(err, NotNil) suite.expectedConfig.Log.Fields = nil suite.expectedConfig.Storage = Storage{"filesystem": Parameters{"rootdirectory": "/tmp/testroot"}} suite.expectedConfig.Auth = Auth{"silly": Parameters{"realm": "silly"}} suite.expectedConfig.Reporting = Reporting{} suite.expectedConfig.Notifications = Notifications{} suite.expectedConfig.HTTP.Headers = nil // Note: this also tests that REGISTRY_STORAGE and // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY can be used together os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") os.Setenv("REGISTRY_AUTH", "silly") os.Setenv("REGISTRY_AUTH_SILLY_REALM", "silly") config, err := Parse(bytes.NewReader([]byte(incompleteConfigYaml))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithSameEnvStorage validates that providing environment variables // that match the given storage type will only include environment-defined // parameters and remove yaml-defined parameters func (suite *ConfigSuite) TestParseWithSameEnvStorage(c *C) { suite.expectedConfig.Storage = Storage{"s3": Parameters{"region": "us-east-1"}} os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-east-1") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithDifferentEnvStorageParams validates that providing environment variables that change // and add to the given storage parameters will change and add parameters to the parsed // Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageParams(c *C) { suite.expectedConfig.Storage.setParameter("region", "us-west-1") suite.expectedConfig.Storage.setParameter("secure", true) suite.expectedConfig.Storage.setParameter("newparam", "some Value") os.Setenv("REGISTRY_STORAGE_S3_REGION", "us-west-1") os.Setenv("REGISTRY_STORAGE_S3_SECURE", "true") os.Setenv("REGISTRY_STORAGE_S3_NEWPARAM", "some Value") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithDifferentEnvStorageType validates that providing an environment variable that // changes the storage type will be reflected in the parsed Configuration struct func (suite *ConfigSuite) TestParseWithDifferentEnvStorageType(c *C) { suite.expectedConfig.Storage = Storage{"inmemory": Parameters{}} os.Setenv("REGISTRY_STORAGE", "inmemory") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithDifferentEnvStorageTypeAndParams validates that providing an environment variable // that changes the storage type will be reflected in the parsed Configuration struct and that // environment storage parameters will also be included func (suite *ConfigSuite) TestParseWithDifferentEnvStorageTypeAndParams(c *C) { suite.expectedConfig.Storage = Storage{"filesystem": Parameters{}} suite.expectedConfig.Storage.setParameter("rootdirectory", "/tmp/testroot") os.Setenv("REGISTRY_STORAGE", "filesystem") os.Setenv("REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY", "/tmp/testroot") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithSameEnvLoglevel validates that providing an environment variable defining the log // level to the same as the one provided in the yaml will not change the parsed Configuration struct func (suite *ConfigSuite) TestParseWithSameEnvLoglevel(c *C) { os.Setenv("REGISTRY_LOGLEVEL", "info") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseWithDifferentEnvLoglevel validates that providing an environment variable defining the // log level will override the value provided in the yaml document func (suite *ConfigSuite) TestParseWithDifferentEnvLoglevel(c *C) { suite.expectedConfig.Loglevel = "error" os.Setenv("REGISTRY_LOGLEVEL", "error") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseInvalidLoglevel validates that the parser will fail to parse a // configuration if the loglevel is malformed func (suite *ConfigSuite) TestParseInvalidLoglevel(c *C) { invalidConfigYaml := "version: 0.1\nloglevel: derp\nstorage: inmemory" _, err := Parse(bytes.NewReader([]byte(invalidConfigYaml))) c.Assert(err, NotNil) os.Setenv("REGISTRY_LOGLEVEL", "derp") _, err = Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, NotNil) } // TestParseWithDifferentEnvReporting validates that environment variables // properly override reporting parameters func (suite *ConfigSuite) TestParseWithDifferentEnvReporting(c *C) { suite.expectedConfig.Reporting.Bugsnag.APIKey = "anotherBugsnagApiKey" suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" suite.expectedConfig.Reporting.NewRelic.LicenseKey = "NewRelicLicenseKey" suite.expectedConfig.Reporting.NewRelic.Name = "some NewRelic NAME" os.Setenv("REGISTRY_REPORTING_BUGSNAG_APIKEY", "anotherBugsnagApiKey") os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") os.Setenv("REGISTRY_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") os.Setenv("REGISTRY_REPORTING_NEWRELIC_NAME", "some NewRelic NAME") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseInvalidVersion validates that the parser will fail to parse a newer configuration // version than the CurrentVersion func (suite *ConfigSuite) TestParseInvalidVersion(c *C) { suite.expectedConfig.Version = MajorMinorVersion(CurrentVersion.Major(), CurrentVersion.Minor()+1) configBytes, err := yaml.Marshal(suite.expectedConfig) c.Assert(err, IsNil) _, err = Parse(bytes.NewReader(configBytes)) c.Assert(err, NotNil) } // TestParseExtraneousVars validates that environment variables referring to // nonexistent variables don't cause side effects. func (suite *ConfigSuite) TestParseExtraneousVars(c *C) { suite.expectedConfig.Reporting.Bugsnag.Endpoint = "localhost:8080" // A valid environment variable os.Setenv("REGISTRY_REPORTING_BUGSNAG_ENDPOINT", "localhost:8080") // Environment variables which shouldn't set config items os.Setenv("registry_REPORTING_NEWRELIC_LICENSEKEY", "NewRelicLicenseKey") os.Setenv("REPORTING_NEWRELIC_NAME", "some NewRelic NAME") os.Setenv("REGISTRY_DUCKS", "quack") os.Setenv("REGISTRY_REPORTING_ASDF", "ghjk") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseEnvVarImplicitMaps validates that environment variables can set // values in maps that don't already exist. func (suite *ConfigSuite) TestParseEnvVarImplicitMaps(c *C) { readonly := make(map[string]interface{}) readonly["enabled"] = true maintenance := make(map[string]interface{}) maintenance["readonly"] = readonly suite.expectedConfig.Storage["maintenance"] = maintenance os.Setenv("REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED", "true") config, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) c.Assert(config, DeepEquals, suite.expectedConfig) } // TestParseEnvWrongTypeMap validates that incorrectly attempting to unmarshal a // string over existing map fails. func (suite *ConfigSuite) TestParseEnvWrongTypeMap(c *C) { os.Setenv("REGISTRY_STORAGE_S3", "somestring") _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, NotNil) } // TestParseEnvWrongTypeStruct validates that incorrectly attempting to // unmarshal a string into a struct fails. func (suite *ConfigSuite) TestParseEnvWrongTypeStruct(c *C) { os.Setenv("REGISTRY_STORAGE_LOG", "somestring") _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, NotNil) } // TestParseEnvWrongTypeSlice validates that incorrectly attempting to // unmarshal a string into a slice fails. func (suite *ConfigSuite) TestParseEnvWrongTypeSlice(c *C) { os.Setenv("REGISTRY_LOG_HOOKS", "somestring") _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, NotNil) } // TestParseEnvMany tests several environment variable overrides. // The result is not checked - the goal of this test is to detect panics // from misuse of reflection. func (suite *ConfigSuite) TestParseEnvMany(c *C) { os.Setenv("REGISTRY_VERSION", "0.1") os.Setenv("REGISTRY_LOG_LEVEL", "debug") os.Setenv("REGISTRY_LOG_FORMATTER", "json") os.Setenv("REGISTRY_LOG_HOOKS", "json") os.Setenv("REGISTRY_LOG_FIELDS", "abc: xyz") os.Setenv("REGISTRY_LOG_HOOKS", "- type: asdf") os.Setenv("REGISTRY_LOGLEVEL", "debug") os.Setenv("REGISTRY_STORAGE", "s3") os.Setenv("REGISTRY_AUTH_PARAMS", "param1: value1") os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") os.Setenv("REGISTRY_AUTH_PARAMS_VALUE2", "value2") _, err := Parse(bytes.NewReader([]byte(configYamlV0_1))) c.Assert(err, IsNil) } func checkStructs(c *C, t reflect.Type, structsChecked map[string]struct{}) { for t.Kind() == reflect.Ptr || t.Kind() == reflect.Map || t.Kind() == reflect.Slice { t = t.Elem() } if t.Kind() != reflect.Struct { return } if _, present := structsChecked[t.String()]; present { // Already checked this type return } structsChecked[t.String()] = struct{}{} byUpperCase := make(map[string]int) for i := 0; i < t.NumField(); i++ { sf := t.Field(i) // Check that the yaml tag does not contain an _. yamlTag := sf.Tag.Get("yaml") if strings.Contains(yamlTag, "_") { c.Fatalf("yaml field name includes _ character: %s", yamlTag) } upper := strings.ToUpper(sf.Name) if _, present := byUpperCase[upper]; present { c.Fatalf("field name collision in configuration object: %s", sf.Name) } byUpperCase[upper] = i checkStructs(c, sf.Type, structsChecked) } } // TestValidateConfigStruct makes sure that the config struct has no members // with yaml tags that would be ambiguous to the environment variable parser. func (suite *ConfigSuite) TestValidateConfigStruct(c *C) { structsChecked := make(map[string]struct{}) checkStructs(c, reflect.TypeOf(Configuration{}), structsChecked) } func copyConfig(config Configuration) *Configuration { configCopy := new(Configuration) configCopy.Version = MajorMinorVersion(config.Version.Major(), config.Version.Minor()) configCopy.Loglevel = config.Loglevel configCopy.Log = config.Log configCopy.Log.Fields = make(map[string]interface{}, len(config.Log.Fields)) for k, v := range config.Log.Fields { configCopy.Log.Fields[k] = v } configCopy.Storage = Storage{config.Storage.Type(): Parameters{}} for k, v := range config.Storage.Parameters() { configCopy.Storage.setParameter(k, v) } configCopy.Reporting = Reporting{ Bugsnag: BugsnagReporting{config.Reporting.Bugsnag.APIKey, config.Reporting.Bugsnag.ReleaseStage, config.Reporting.Bugsnag.Endpoint}, NewRelic: NewRelicReporting{config.Reporting.NewRelic.LicenseKey, config.Reporting.NewRelic.Name, config.Reporting.NewRelic.Verbose}, } configCopy.Auth = Auth{config.Auth.Type(): Parameters{}} for k, v := range config.Auth.Parameters() { configCopy.Auth.setParameter(k, v) } configCopy.Notifications = Notifications{Endpoints: []Endpoint{}} for _, v := range config.Notifications.Endpoints { configCopy.Notifications.Endpoints = append(configCopy.Notifications.Endpoints, v) } configCopy.HTTP.Headers = make(http.Header) for k, v := range config.HTTP.Headers { configCopy.HTTP.Headers[k] = v } return configCopy } distribution-2.3.0/configuration/parser.go000066400000000000000000000177031265472114500207440ustar00rootroot00000000000000package configuration import ( "fmt" "os" "reflect" "sort" "strconv" "strings" "github.com/Sirupsen/logrus" "gopkg.in/yaml.v2" ) // Version is a major/minor version pair of the form Major.Minor // Major version upgrades indicate structure or type changes // Minor version upgrades should be strictly additive type Version string // MajorMinorVersion constructs a Version from its Major and Minor components func MajorMinorVersion(major, minor uint) Version { return Version(fmt.Sprintf("%d.%d", major, minor)) } func (version Version) major() (uint, error) { majorPart := strings.Split(string(version), ".")[0] major, err := strconv.ParseUint(majorPart, 10, 0) return uint(major), err } // Major returns the major version portion of a Version func (version Version) Major() uint { major, _ := version.major() return major } func (version Version) minor() (uint, error) { minorPart := strings.Split(string(version), ".")[1] minor, err := strconv.ParseUint(minorPart, 10, 0) return uint(minor), err } // Minor returns the minor version portion of a Version func (version Version) Minor() uint { minor, _ := version.minor() return minor } // VersionedParseInfo defines how a specific version of a configuration should // be parsed into the current version type VersionedParseInfo struct { // Version is the version which this parsing information relates to Version Version // ParseAs defines the type which a configuration file of this version // should be parsed into ParseAs reflect.Type // ConversionFunc defines a method for converting the parsed configuration // (of type ParseAs) into the current configuration version // Note: this method signature is very unclear with the absence of generics ConversionFunc func(interface{}) (interface{}, error) } type envVar struct { name string value string } type envVars []envVar func (a envVars) Len() int { return len(a) } func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name } // Parser can be used to parse a configuration file and environment of a defined // version into a unified output structure type Parser struct { prefix string mapping map[Version]VersionedParseInfo env envVars } // NewParser returns a *Parser with the given environment prefix which handles // versioned configurations which match the given parseInfos func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser { p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)} for _, parseInfo := range parseInfos { p.mapping[parseInfo.Version] = parseInfo } for _, env := range os.Environ() { envParts := strings.SplitN(env, "=", 2) p.env = append(p.env, envVar{envParts[0], envParts[1]}) } // We must sort the environment variables lexically by name so that // more specific variables are applied before less specific ones // (i.e. REGISTRY_STORAGE before // REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a // lot simpler and easier to get right than unmarshalling map entries // into temporaries and merging with the existing entry. sort.Sort(p.env) return &p } // Parse reads in the given []byte and environment and writes the resulting // configuration into the input v // // Environment variables may be used to override configuration parameters other // than version, following the scheme below: // v.Abc may be replaced by the value of PREFIX_ABC, // v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth func (p *Parser) Parse(in []byte, v interface{}) error { var versionedStruct struct { Version Version } if err := yaml.Unmarshal(in, &versionedStruct); err != nil { return err } parseInfo, ok := p.mapping[versionedStruct.Version] if !ok { return fmt.Errorf("Unsupported version: %q", versionedStruct.Version) } parseAs := reflect.New(parseInfo.ParseAs) err := yaml.Unmarshal(in, parseAs.Interface()) if err != nil { return err } for _, envVar := range p.env { pathStr := envVar.name if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") { path := strings.Split(pathStr, "_") err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value) if err != nil { return err } } } c, err := parseInfo.ConversionFunc(parseAs.Interface()) if err != nil { return err } reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c))) return nil } // overwriteFields replaces configuration values with alternate values specified // through the environment. Precondition: an empty path slice must never be // passed in. func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error { for v.Kind() == reflect.Ptr { if v.IsNil() { panic("encountered nil pointer while handling environment variable " + fullpath) } v = reflect.Indirect(v) } switch v.Kind() { case reflect.Struct: return p.overwriteStruct(v, fullpath, path, payload) case reflect.Map: return p.overwriteMap(v, fullpath, path, payload) case reflect.Interface: if v.NumMethod() == 0 { if !v.IsNil() { return p.overwriteFields(v.Elem(), fullpath, path, payload) } // Interface was empty; create an implicit map var template map[string]interface{} wrappedV := reflect.MakeMap(reflect.TypeOf(template)) v.Set(wrappedV) return p.overwriteMap(wrappedV, fullpath, path, payload) } } return nil } func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error { // Generate case-insensitive map of struct fields byUpperCase := make(map[string]int) for i := 0; i < v.NumField(); i++ { sf := v.Type().Field(i) upper := strings.ToUpper(sf.Name) if _, present := byUpperCase[upper]; present { panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name)) } byUpperCase[upper] = i } fieldIndex, present := byUpperCase[path[0]] if !present { logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath) return nil } field := v.Field(fieldIndex) sf := v.Type().Field(fieldIndex) if len(path) == 1 { // Env var specifies this field directly fieldVal := reflect.New(sf.Type) err := yaml.Unmarshal([]byte(payload), fieldVal.Interface()) if err != nil { return err } field.Set(reflect.Indirect(fieldVal)) return nil } // If the field is nil, must create an object switch sf.Type.Kind() { case reflect.Map: if field.IsNil() { field.Set(reflect.MakeMap(sf.Type)) } case reflect.Ptr: if field.IsNil() { field.Set(reflect.New(sf.Type)) } } err := p.overwriteFields(field, fullpath, path[1:], payload) if err != nil { return err } return nil } func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error { if m.Type().Key().Kind() != reflect.String { // non-string keys unsupported logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath) return nil } if len(path) > 1 { // If a matching key exists, get its value and continue the // overwriting process. for _, k := range m.MapKeys() { if strings.ToUpper(k.String()) == path[0] { mapValue := m.MapIndex(k) // If the existing value is nil, we want to // recreate it instead of using this value. if (mapValue.Kind() == reflect.Ptr || mapValue.Kind() == reflect.Interface || mapValue.Kind() == reflect.Map) && mapValue.IsNil() { break } return p.overwriteFields(mapValue, fullpath, path[1:], payload) } } } // (Re)create this key var mapValue reflect.Value if m.Type().Elem().Kind() == reflect.Map { mapValue = reflect.MakeMap(m.Type().Elem()) } else { mapValue = reflect.New(m.Type().Elem()) } if len(path) > 1 { err := p.overwriteFields(mapValue, fullpath, path[1:], payload) if err != nil { return err } } else { err := yaml.Unmarshal([]byte(payload), mapValue.Interface()) if err != nil { return err } } m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue)) return nil } distribution-2.3.0/context/000077500000000000000000000000001265472114500157265ustar00rootroot00000000000000distribution-2.3.0/context/context.go000066400000000000000000000043521265472114500177450ustar00rootroot00000000000000package context import ( "sync" "github.com/docker/distribution/uuid" "golang.org/x/net/context" ) // Context is a copy of Context from the golang.org/x/net/context package. type Context interface { context.Context } // instanceContext is a context that provides only an instance id. It is // provided as the main background context. type instanceContext struct { Context id string // id of context, logged as "instance.id" once sync.Once // once protect generation of the id } func (ic *instanceContext) Value(key interface{}) interface{} { if key == "instance.id" { ic.once.Do(func() { // We want to lazy initialize the UUID such that we don't // call a random generator from the package initialization // code. For various reasons random could not be available // https://github.com/docker/distribution/issues/782 ic.id = uuid.Generate().String() }) return ic.id } return ic.Context.Value(key) } var background = &instanceContext{ Context: context.Background(), } // Background returns a non-nil, empty Context. The background context // provides a single key, "instance.id" that is globally unique to the // process. func Background() Context { return background } // WithValue returns a copy of parent in which the value associated with key is // val. Use context Values only for request-scoped data that transits processes // and APIs, not for passing optional parameters to functions. func WithValue(parent Context, key, val interface{}) Context { return context.WithValue(parent, key, val) } // stringMapContext is a simple context implementation that checks a map for a // key, falling back to a parent if not present. type stringMapContext struct { context.Context m map[string]interface{} } // WithValues returns a context that proxies lookups through a map. Only // supports string keys. func WithValues(ctx context.Context, m map[string]interface{}) context.Context { mo := make(map[string]interface{}, len(m)) // make our own copy. for k, v := range m { mo[k] = v } return stringMapContext{ Context: ctx, m: mo, } } func (smc stringMapContext) Value(key interface{}) interface{} { if ks, ok := key.(string); ok { if v, ok := smc.m[ks]; ok { return v } } return smc.Context.Value(key) } distribution-2.3.0/context/doc.go000066400000000000000000000075611265472114500170330ustar00rootroot00000000000000// Package context provides several utilities for working with // golang.org/x/net/context in http requests. Primarily, the focus is on // logging relevent request information but this package is not limited to // that purpose. // // The easiest way to get started is to get the background context: // // ctx := context.Background() // // The returned context should be passed around your application and be the // root of all other context instances. If the application has a version, this // line should be called before anything else: // // ctx := context.WithVersion(context.Background(), version) // // The above will store the version in the context and will be available to // the logger. // // Logging // // The most useful aspect of this package is GetLogger. This function takes // any context.Context interface and returns the current logger from the // context. Canonical usage looks like this: // // GetLogger(ctx).Infof("something interesting happened") // // GetLogger also takes optional key arguments. The keys will be looked up in // the context and reported with the logger. The following example would // return a logger that prints the version with each log message: // // ctx := context.Context(context.Background(), "version", version) // GetLogger(ctx, "version").Infof("this log message has a version field") // // The above would print out a log message like this: // // INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m // // When used with WithLogger, we gain the ability to decorate the context with // loggers that have information from disparate parts of the call stack. // Following from the version example, we can build a new context with the // configured logger such that we always print the version field: // // ctx = WithLogger(ctx, GetLogger(ctx, "version")) // // Since the logger has been pushed to the context, we can now get the version // field for free with our log messages. Future calls to GetLogger on the new // context will have the version field: // // GetLogger(ctx).Infof("this log message has a version field") // // This becomes more powerful when we start stacking loggers. Let's say we // have the version logger from above but also want a request id. Using the // context above, in our request scoped function, we place another logger in // the context: // // ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context // ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) // // When GetLogger is called on the new context, "http.request.id" will be // included as a logger field, along with the original "version" field: // // INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m // // Note that this only affects the new context, the previous context, with the // version field, can be used independently. Put another way, the new logger, // added to the request context, is unique to that context and can have // request scoped varaibles. // // HTTP Requests // // This package also contains several methods for working with http requests. // The concepts are very similar to those described above. We simply place the // request in the context using WithRequest. This makes the request variables // available. GetRequestLogger can then be called to get request specific // variables in a log line: // // ctx = WithRequest(ctx, req) // GetRequestLogger(ctx).Infof("request variables") // // Like above, if we want to include the request data in all log messages in // the context, we push the logger to a new context and use that one: // // ctx = WithLogger(ctx, GetRequestLogger(ctx)) // // The concept is fairly powerful and ensures that calls throughout the stack // can be traced in log messages. Using the fields like "http.request.id", one // can analyze call flow for a particular request with a simple grep of the // logs. package context distribution-2.3.0/context/http.go000066400000000000000000000216651265472114500172460ustar00rootroot00000000000000package context import ( "errors" "net" "net/http" "strings" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" "github.com/gorilla/mux" ) // Common errors used with this package. var ( ErrNoRequestContext = errors.New("no http request in context") ErrNoResponseWriterContext = errors.New("no http response in context") ) func parseIP(ipStr string) net.IP { ip := net.ParseIP(ipStr) if ip == nil { log.Warnf("invalid remote IP address: %q", ipStr) } return ip } // RemoteAddr extracts the remote address of the request, taking into // account proxy headers. func RemoteAddr(r *http.Request) string { if prior := r.Header.Get("X-Forwarded-For"); prior != "" { proxies := strings.Split(prior, ",") if len(proxies) > 0 { remoteAddr := strings.Trim(proxies[0], " ") if parseIP(remoteAddr) != nil { return remoteAddr } } } // X-Real-Ip is less supported, but worth checking in the // absence of X-Forwarded-For if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { if parseIP(realIP) != nil { return realIP } } return r.RemoteAddr } // RemoteIP extracts the remote IP of the request, taking into // account proxy headers. func RemoteIP(r *http.Request) string { addr := RemoteAddr(r) // Try parsing it as "IP:port" if ip, _, err := net.SplitHostPort(addr); err == nil { return ip } return addr } // WithRequest places the request on the context. The context of the request // is assigned a unique id, available at "http.request.id". The request itself // is available at "http.request". Other common attributes are available under // the prefix "http.request.". If a request is already present on the context, // this method will panic. func WithRequest(ctx Context, r *http.Request) Context { if ctx.Value("http.request") != nil { // NOTE(stevvooe): This needs to be considered a programming error. It // is unlikely that we'd want to have more than one request in // context. panic("only one request per context") } return &httpRequestContext{ Context: ctx, startedAt: time.Now(), id: uuid.Generate().String(), r: r, } } // GetRequest returns the http request in the given context. Returns // ErrNoRequestContext if the context does not have an http request associated // with it. func GetRequest(ctx Context) (*http.Request, error) { if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { return r, nil } return nil, ErrNoRequestContext } // GetRequestID attempts to resolve the current request id, if possible. An // error is return if it is not available on the context. func GetRequestID(ctx Context) string { return GetStringValue(ctx, "http.request.id") } // WithResponseWriter returns a new context and response writer that makes // interesting response statistics available within the context. func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { irw := instrumentedResponseWriter{ ResponseWriter: w, Context: ctx, } if closeNotifier, ok := w.(http.CloseNotifier); ok { irwCN := &instrumentedResponseWriterCN{ instrumentedResponseWriter: irw, CloseNotifier: closeNotifier, } return irwCN, irwCN } return &irw, &irw } // GetResponseWriter returns the http.ResponseWriter from the provided // context. If not present, ErrNoResponseWriterContext is returned. The // returned instance provides instrumentation in the context. func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { v := ctx.Value("http.response") rw, ok := v.(http.ResponseWriter) if !ok || rw == nil { return nil, ErrNoResponseWriterContext } return rw, nil } // getVarsFromRequest let's us change request vars implementation for testing // and maybe future changes. var getVarsFromRequest = mux.Vars // WithVars extracts gorilla/mux vars and makes them available on the returned // context. Variables are available at keys with the prefix "vars.". For // example, if looking for the variable "name", it can be accessed as // "vars.name". Implementations that are accessing values need not know that // the underlying context is implemented with gorilla/mux vars. func WithVars(ctx Context, r *http.Request) Context { return &muxVarsContext{ Context: ctx, vars: getVarsFromRequest(r), } } // GetRequestLogger returns a logger that contains fields from the request in // the current context. If the request is not available in the context, no // fields will display. Request loggers can safely be pushed onto the context. func GetRequestLogger(ctx Context) Logger { return GetLogger(ctx, "http.request.id", "http.request.method", "http.request.host", "http.request.uri", "http.request.referer", "http.request.useragent", "http.request.remoteaddr", "http.request.contenttype") } // GetResponseLogger reads the current response stats and builds a logger. // Because the values are read at call time, pushing a logger returned from // this function on the context will lead to missing or invalid data. Only // call this at the end of a request, after the response has been written. func GetResponseLogger(ctx Context) Logger { l := getLogrusLogger(ctx, "http.response.written", "http.response.status", "http.response.contenttype") duration := Since(ctx, "http.request.startedat") if duration > 0 { l = l.WithField("http.response.duration", duration.String()) } return l } // httpRequestContext makes information about a request available to context. type httpRequestContext struct { Context startedAt time.Time id string r *http.Request } // Value returns a keyed element of the request for use in the context. To get // the request itself, query "request". For other components, access them as // "request.". For example, r.RequestURI func (ctx *httpRequestContext) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.request" { return ctx.r } if !strings.HasPrefix(keyStr, "http.request.") { goto fallback } parts := strings.Split(keyStr, ".") if len(parts) != 3 { goto fallback } switch parts[2] { case "uri": return ctx.r.RequestURI case "remoteaddr": return RemoteAddr(ctx.r) case "method": return ctx.r.Method case "host": return ctx.r.Host case "referer": referer := ctx.r.Referer() if referer != "" { return referer } case "useragent": return ctx.r.UserAgent() case "id": return ctx.id case "startedat": return ctx.startedAt case "contenttype": ct := ctx.r.Header.Get("Content-Type") if ct != "" { return ct } } } fallback: return ctx.Context.Value(key) } type muxVarsContext struct { Context vars map[string]string } func (ctx *muxVarsContext) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "vars" { return ctx.vars } if strings.HasPrefix(keyStr, "vars.") { keyStr = strings.TrimPrefix(keyStr, "vars.") } if v, ok := ctx.vars[keyStr]; ok { return v } } return ctx.Context.Value(key) } // instrumentedResponseWriterCN provides response writer information in a // context. It implements http.CloseNotifier so that users can detect // early disconnects. type instrumentedResponseWriterCN struct { instrumentedResponseWriter http.CloseNotifier } // instrumentedResponseWriter provides response writer information in a // context. This variant is only used in the case where CloseNotifier is not // implemented by the parent ResponseWriter. type instrumentedResponseWriter struct { http.ResponseWriter Context mu sync.Mutex status int written int64 } func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { n, err = irw.ResponseWriter.Write(p) irw.mu.Lock() irw.written += int64(n) // Guess the likely status if not set. if irw.status == 0 { irw.status = http.StatusOK } irw.mu.Unlock() return } func (irw *instrumentedResponseWriter) WriteHeader(status int) { irw.ResponseWriter.WriteHeader(status) irw.mu.Lock() irw.status = status irw.mu.Unlock() } func (irw *instrumentedResponseWriter) Flush() { if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { flusher.Flush() } } func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.response" { return irw } if !strings.HasPrefix(keyStr, "http.response.") { goto fallback } parts := strings.Split(keyStr, ".") if len(parts) != 3 { goto fallback } irw.mu.Lock() defer irw.mu.Unlock() switch parts[2] { case "written": return irw.written case "status": return irw.status case "contenttype": contentType := irw.Header().Get("Content-Type") if contentType != "" { return contentType } } } fallback: return irw.Context.Value(key) } func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { if keyStr, ok := key.(string); ok { if keyStr == "http.response" { return irw } } return irw.instrumentedResponseWriter.Value(key) } distribution-2.3.0/context/http_test.go000066400000000000000000000144741265472114500203050ustar00rootroot00000000000000package context import ( "net/http" "net/http/httptest" "net/http/httputil" "net/url" "reflect" "testing" "time" ) func TestWithRequest(t *testing.T) { var req http.Request start := time.Now() req.Method = "GET" req.Host = "example.com" req.RequestURI = "/test-test" req.Header = make(http.Header) req.Header.Set("Referer", "foo.com/referer") req.Header.Set("User-Agent", "test/0.1") ctx := WithRequest(Background(), &req) for _, testcase := range []struct { key string expected interface{} }{ { key: "http.request", expected: &req, }, { key: "http.request.id", }, { key: "http.request.method", expected: req.Method, }, { key: "http.request.host", expected: req.Host, }, { key: "http.request.uri", expected: req.RequestURI, }, { key: "http.request.referer", expected: req.Referer(), }, { key: "http.request.useragent", expected: req.UserAgent(), }, { key: "http.request.remoteaddr", expected: req.RemoteAddr, }, { key: "http.request.startedat", }, } { v := ctx.Value(testcase.key) if v == nil { t.Fatalf("value not found for %q", testcase.key) } if testcase.expected != nil && v != testcase.expected { t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) } // Key specific checks! switch testcase.key { case "http.request.id": if _, ok := v.(string); !ok { t.Fatalf("request id not a string: %v", v) } case "http.request.startedat": vt, ok := v.(time.Time) if !ok { t.Fatalf("value not a time: %v", v) } now := time.Now() if vt.After(now) { t.Fatalf("time generated too late: %v > %v", vt, now) } if vt.Before(start) { t.Fatalf("time generated too early: %v < %v", vt, start) } } } } type testResponseWriter struct { flushed bool status int written int64 header http.Header } func (trw *testResponseWriter) Header() http.Header { if trw.header == nil { trw.header = make(http.Header) } return trw.header } func (trw *testResponseWriter) Write(p []byte) (n int, err error) { if trw.status == 0 { trw.status = http.StatusOK } n = len(p) trw.written += int64(n) return } func (trw *testResponseWriter) WriteHeader(status int) { trw.status = status } func (trw *testResponseWriter) Flush() { trw.flushed = true } func TestWithResponseWriter(t *testing.T) { trw := testResponseWriter{} ctx, rw := WithResponseWriter(Background(), &trw) if ctx.Value("http.response") != rw { t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) } grw, err := GetResponseWriter(ctx) if err != nil { t.Fatalf("error getting response writer: %v", err) } if grw != rw { t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) } if ctx.Value("http.response.status") != 0 { t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) } if n, err := rw.Write(make([]byte, 1024)); err != nil { t.Fatalf("unexpected error writing: %v", err) } else if n != 1024 { t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) } if ctx.Value("http.response.status") != http.StatusOK { t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) } if ctx.Value("http.response.written") != int64(1024) { t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) } // Make sure flush propagates rw.(http.Flusher).Flush() if !trw.flushed { t.Fatalf("response writer not flushed") } // Write another status and make sure context is correct. This normally // wouldn't work except for in this contrived testcase. rw.WriteHeader(http.StatusBadRequest) if ctx.Value("http.response.status") != http.StatusBadRequest { t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) } } func TestWithVars(t *testing.T) { var req http.Request vars := map[string]string{ "foo": "asdf", "bar": "qwer", } getVarsFromRequest = func(r *http.Request) map[string]string { if r != &req { t.Fatalf("unexpected request: %v != %v", r, req) } return vars } ctx := WithVars(Background(), &req) for _, testcase := range []struct { key string expected interface{} }{ { key: "vars", expected: vars, }, { key: "vars.foo", expected: "asdf", }, { key: "vars.bar", expected: "qwer", }, } { v := ctx.Value(testcase.key) if !reflect.DeepEqual(v, testcase.expected) { t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) } } } // SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test // RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten // at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header // just contains the IP address, it is different enough for testing. func TestRemoteAddr(t *testing.T) { var expectedRemote string backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if r.RemoteAddr == expectedRemote { t.Errorf("Unexpected matching remote addresses") } actualRemote := RemoteAddr(r) if expectedRemote != actualRemote { t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) } w.WriteHeader(200) })) defer backend.Close() backendURL, err := url.Parse(backend.URL) if err != nil { t.Fatal(err) } proxy := httputil.NewSingleHostReverseProxy(backendURL) frontend := httptest.NewServer(proxy) defer frontend.Close() // X-Forwarded-For set by proxy expectedRemote = "127.0.0.1" proxyReq, err := http.NewRequest("GET", frontend.URL, nil) if err != nil { t.Fatal(err) } _, err = http.DefaultClient.Do(proxyReq) if err != nil { t.Fatal(err) } // RemoteAddr in X-Real-Ip getReq, err := http.NewRequest("GET", backend.URL, nil) if err != nil { t.Fatal(err) } expectedRemote = "1.2.3.4" getReq.Header["X-Real-ip"] = []string{expectedRemote} _, err = http.DefaultClient.Do(getReq) if err != nil { t.Fatal(err) } // Valid X-Real-Ip and invalid X-Forwarded-For getReq.Header["X-forwarded-for"] = []string{"1.2.3"} _, err = http.DefaultClient.Do(getReq) if err != nil { t.Fatal(err) } } distribution-2.3.0/context/logger.go000066400000000000000000000067351265472114500175470ustar00rootroot00000000000000package context import ( "fmt" "github.com/Sirupsen/logrus" "runtime" ) // Logger provides a leveled-logging interface. type Logger interface { // standard logger methods Print(args ...interface{}) Printf(format string, args ...interface{}) Println(args ...interface{}) Fatal(args ...interface{}) Fatalf(format string, args ...interface{}) Fatalln(args ...interface{}) Panic(args ...interface{}) Panicf(format string, args ...interface{}) Panicln(args ...interface{}) // Leveled methods, from logrus Debug(args ...interface{}) Debugf(format string, args ...interface{}) Debugln(args ...interface{}) Error(args ...interface{}) Errorf(format string, args ...interface{}) Errorln(args ...interface{}) Info(args ...interface{}) Infof(format string, args ...interface{}) Infoln(args ...interface{}) Warn(args ...interface{}) Warnf(format string, args ...interface{}) Warnln(args ...interface{}) } // WithLogger creates a new context with provided logger. func WithLogger(ctx Context, logger Logger) Context { return WithValue(ctx, "logger", logger) } // GetLoggerWithField returns a logger instance with the specified field key // and value without affecting the context. Extra specified keys will be // resolved from the context. func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) } // GetLoggerWithFields returns a logger instance with the specified fields // without affecting the context. Extra specified keys will be resolved from // the context. func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { // must convert from interface{} -> interface{} to string -> interface{} for logrus. lfields := make(logrus.Fields, len(fields)) for key, value := range fields { lfields[fmt.Sprint(key)] = value } return getLogrusLogger(ctx, keys...).WithFields(lfields) } // GetLogger returns the logger from the current context, if present. If one // or more keys are provided, they will be resolved on the context and // included in the logger. While context.Value takes an interface, any key // argument passed to GetLogger will be passed to fmt.Sprint when expanded as // a logging key field. If context keys are integer constants, for example, // its recommended that a String method is implemented. func GetLogger(ctx Context, keys ...interface{}) Logger { return getLogrusLogger(ctx, keys...) } // GetLogrusLogger returns the logrus logger for the context. If one more keys // are provided, they will be resolved on the context and included in the // logger. Only use this function if specific logrus functionality is // required. func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { var logger *logrus.Entry // Get a logger, if it is present. loggerInterface := ctx.Value("logger") if loggerInterface != nil { if lgr, ok := loggerInterface.(*logrus.Entry); ok { logger = lgr } } if logger == nil { fields := logrus.Fields{} // Fill in the instance id, if we have it. instanceID := ctx.Value("instance.id") if instanceID != nil { fields["instance.id"] = instanceID } fields["go.version"] = runtime.Version() // If no logger is found, just return the standard logger. logger = logrus.StandardLogger().WithFields(fields) } fields := logrus.Fields{} for _, key := range keys { v := ctx.Value(key) if v != nil { fields[fmt.Sprint(key)] = v } } return logger.WithFields(fields) } distribution-2.3.0/context/trace.go000066400000000000000000000054521265472114500173610ustar00rootroot00000000000000package context import ( "runtime" "time" "github.com/docker/distribution/uuid" ) // WithTrace allocates a traced timing span in a new context. This allows a // caller to track the time between calling WithTrace and the returned done // function. When the done function is called, a log message is emitted with a // "trace.duration" field, corresponding to the elapased time and a // "trace.func" field, corresponding to the function that called WithTrace. // // The logging keys "trace.id" and "trace.parent.id" are provided to implement // dapper-like tracing. This function should be complemented with a WithSpan // method that could be used for tracing distributed RPC calls. // // The main benefit of this function is to post-process log messages or // intercept them in a hook to provide timing data. Trace ids and parent ids // can also be linked to provide call tracing, if so required. // // Here is an example of the usage: // // func timedOperation(ctx Context) { // ctx, done := WithTrace(ctx) // defer done("this will be the log message") // // ... function body ... // } // // If the function ran for roughly 1s, such a usage would emit a log message // as follows: // // INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... // // Notice that the function name is automatically resolved, along with the // package and a trace id is emitted that can be linked with parent ids. func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { if ctx == nil { ctx = Background() } pc, file, line, _ := runtime.Caller(1) f := runtime.FuncForPC(pc) ctx = &traced{ Context: ctx, id: uuid.Generate().String(), start: time.Now(), parent: GetStringValue(ctx, "trace.id"), fnname: f.Name(), file: file, line: line, } return ctx, func(format string, a ...interface{}) { GetLogger(ctx, "trace.duration", "trace.id", "trace.parent.id", "trace.func", "trace.file", "trace.line"). Debugf(format, a...) } } // traced represents a context that is traced for function call timing. It // also provides fast lookup for the various attributes that are available on // the trace. type traced struct { Context id string parent string start time.Time fnname string file string line int } func (ts *traced) Value(key interface{}) interface{} { switch key { case "trace.start": return ts.start case "trace.duration": return time.Since(ts.start) case "trace.id": return ts.id case "trace.parent.id": if ts.parent == "" { return nil // must return nil to signal no parent. } return ts.parent case "trace.func": return ts.fnname case "trace.file": return ts.file case "trace.line": return ts.line } return ts.Context.Value(key) } distribution-2.3.0/context/trace_test.go000066400000000000000000000035161265472114500204170ustar00rootroot00000000000000package context import ( "runtime" "testing" "time" ) // TestWithTrace ensures that tracing has the expected values in the context. func TestWithTrace(t *testing.T) { pc, file, _, _ := runtime.Caller(0) // get current caller. f := runtime.FuncForPC(pc) base := []valueTestCase{ { key: "trace.id", notnilorempty: true, }, { key: "trace.file", expected: file, notnilorempty: true, }, { key: "trace.line", notnilorempty: true, }, { key: "trace.start", notnilorempty: true, }, } ctx, done := WithTrace(Background()) defer done("this will be emitted at end of test") checkContextForValues(t, ctx, append(base, valueTestCase{ key: "trace.func", expected: f.Name(), })) traced := func() { parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. pc, _, _, _ := runtime.Caller(0) // get current caller. f := runtime.FuncForPC(pc) ctx, done := WithTrace(ctx) defer done("this should be subordinate to the other trace") time.Sleep(time.Second) checkContextForValues(t, ctx, append(base, valueTestCase{ key: "trace.func", expected: f.Name(), }, valueTestCase{ key: "trace.parent.id", expected: parentID, })) } traced() time.Sleep(time.Second) } type valueTestCase struct { key string expected interface{} notnilorempty bool // just check not empty/not nil } func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { for _, testcase := range values { v := ctx.Value(testcase.key) if testcase.notnilorempty { if v == nil || v == "" { t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) } continue } if v != testcase.expected { t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) } } } distribution-2.3.0/context/util.go000066400000000000000000000014321265472114500172320ustar00rootroot00000000000000package context import ( "time" ) // Since looks up key, which should be a time.Time, and returns the duration // since that time. If the key is not found, the value returned will be zero. // This is helpful when inferring metrics related to context execution times. func Since(ctx Context, key interface{}) time.Duration { startedAtI := ctx.Value(key) if startedAtI != nil { if startedAt, ok := startedAtI.(time.Time); ok { return time.Since(startedAt) } } return 0 } // GetStringValue returns a string value from the context. The empty string // will be returned if not found. func GetStringValue(ctx Context, key interface{}) (value string) { stringi := ctx.Value(key) if stringi != nil { if valuev, ok := stringi.(string); ok { value = valuev } } return value } distribution-2.3.0/context/version.go000066400000000000000000000011121265472114500177350ustar00rootroot00000000000000package context // WithVersion stores the application version in the context. The new context // gets a logger to ensure log messages are marked with the application // version. func WithVersion(ctx Context, version string) Context { ctx = WithValue(ctx, "version", version) // push a new logger onto the stack return WithLogger(ctx, GetLogger(ctx, "version")) } // GetVersion returns the application version from the context. An empty // string may returned if the version was not set on the context. func GetVersion(ctx Context) string { return GetStringValue(ctx, "version") } distribution-2.3.0/context/version_test.go000066400000000000000000000005561265472114500210070ustar00rootroot00000000000000package context import "testing" func TestVersionContext(t *testing.T) { ctx := Background() if GetVersion(ctx) != "" { t.Fatalf("context should not yet have a version") } expected := "2.1-whatever" ctx = WithVersion(ctx, expected) version := GetVersion(ctx) if version != expected { t.Fatalf("version was not set: %q != %q", version, expected) } } distribution-2.3.0/contrib/000077500000000000000000000000001265472114500157025ustar00rootroot00000000000000distribution-2.3.0/contrib/apache/000077500000000000000000000000001265472114500171235ustar00rootroot00000000000000distribution-2.3.0/contrib/apache/README.MD000066400000000000000000000024571265472114500203120ustar00rootroot00000000000000# Apache HTTPd sample for Registry v1, v2 and mirror 3 containers involved * Docker Registry v1 (registry 0.9.1) * Docker Registry v2 (registry 2.0.0) * Docker Registry v1 in mirror mode HTTP for mirror and HTTPS for v1 & v2 * http://registry.example.com proxify Docker Registry 1.0 in Mirror mode * https://registry.example.com proxify Docker Registry 1.0 or 2.0 in Hosting mode ## 3 Docker containers should be started * Docker Registry 1.0 in Mirror mode : port 5001 * Docker Registry 1.0 in Hosting mode : port 5000 * Docker Registry 2.0 in Hosting mode : port 5002 ### Registry v1 docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/docker-registry/storage/hosting-v1:/tmp -p 5000:5000 registry:0.9.1" ### Mirror docker run -d -e SETTINGS_FLAVOR=dev -e STANDALONE=false -e MIRROR_SOURCE=https://registry-1.docker.io -e MIRROR_SOURCE_INDEX=https://index.docker.io \ -e MIRROR_TAGS_CACHE_TTL=172800 -v /var/lib/docker-registry/storage/mirror:/tmp -p 5001:5000 registry:0.9.1" ### Registry v2 docker run -d -e SETTINGS_FLAVOR=dev -v /var/lib/axway/docker-registry/storage/hosting2-v2:/tmp -p 5002:5000 registry:2" # For Hosting mode access * users should have account (valid-user) to be able to fetch images * only users using account docker-deployer will be allowed to push images distribution-2.3.0/contrib/apache/apache.conf000066400000000000000000000055311265472114500212170ustar00rootroot00000000000000# # Sample Apache 2.x configuration where : # ServerName registry.example.com ServerAlias www.registry.example.com ProxyRequests off ProxyPreserveHost on # no proxy for /error/ (Apache HTTPd errors messages) ProxyPass /error/ ! ProxyPass /_ping http://localhost:5001/_ping ProxyPassReverse /_ping http://localhost:5001/_ping ProxyPass /v1 http://localhost:5001/v1 ProxyPassReverse /v1 http://localhost:5001/v1 # Logs ErrorLog ${APACHE_LOG_DIR}/mirror_error_log CustomLog ${APACHE_LOG_DIR}/mirror_access_log combined env=!dontlog ServerName registry.example.com ServerAlias www.registry.example.com SSLEngine on SSLCertificateFile /etc/apache2/ssl/registry.example.com.crt SSLCertificateKeyFile /etc/apache2/ssl/registry.example.com.key # Higher Strength SSL Ciphers SSLProtocol all -SSLv2 -SSLv3 -TLSv1 SSLCipherSuite RC4-SHA:HIGH SSLHonorCipherOrder on # Logs ErrorLog ${APACHE_LOG_DIR}/registry_error_ssl_log CustomLog ${APACHE_LOG_DIR}/registry_access_ssl_log combined env=!dontlog Header always set "Docker-Distribution-Api-Version" "registry/2.0" Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" RequestHeader set X-Forwarded-Proto "https" ProxyRequests off ProxyPreserveHost on # no proxy for /error/ (Apache HTTPd errors messages) ProxyPass /error/ ! # # Registry v1 # ProxyPass /v1 http://localhost:5000/v1 ProxyPassReverse /v1 http://localhost:5000/v1 ProxyPass /_ping http://localhost:5000/_ping ProxyPassReverse /_ping http://localhost:5000/_ping # Authentication require for push Order deny,allow Allow from all AuthName "Registry Authentication" AuthType basic AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" # Read access to authentified users Require valid-user # Write access to docker-deployer account only Require user docker-deployer # Allow ping to run unauthenticated. Satisfy any Allow from all # Allow ping to run unauthenticated. Satisfy any Allow from all # # Registry v2 # ProxyPass /v2 http://localhost:5002/v2 ProxyPassReverse /v2 http://localhost:5002/v2 Order deny,allow Allow from all AuthName "Registry Authentication" AuthType basic AuthUserFile "/etc/apache2/htpasswd/registry-htpasswd" # Read access to authentified users Require valid-user # Write access to docker-deployer only Require user docker-deployer distribution-2.3.0/contrib/ceph/000077500000000000000000000000001265472114500166215ustar00rootroot00000000000000distribution-2.3.0/contrib/ceph/ci-setup.sh000077500000000000000000000050531265472114500207140ustar00rootroot00000000000000#! /bin/bash # # Ceph cluster setup in Circle CI # set -x set -e set -u NODE=$(hostname) CEPHDIR=/tmp/ceph mkdir cluster pushd cluster # Install retries=0 until [ $retries -ge 5 ]; do pip install ceph-deploy && break retries=$[$retries+1] sleep 30 done retries=0 until [ $retries -ge 5 ]; do ceph-deploy install --release hammer $NODE && break retries=$[$retries+1] sleep 30 done retries=0 until [ $retries -ge 5 ]; do ceph-deploy pkg --install librados-dev $NODE && break retries=$[$retries+1] sleep 30 done echo $(ip route get 1 | awk '{print $NF;exit}') $(hostname) >> /etc/hosts ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N "" cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ssh-keyscan $NODE >> ~/.ssh/known_hosts ceph-deploy new $NODE cat >> ceph.conf < 74acc70fa106 Removing intermediate container edb84c2b40cb Successfully built 74acc70fa106 The commmand outputs its progress until it completes. 4. Start your configuration with compose. $ docker-compose up Recreating compose_registryv1_1... Recreating compose_registryv2_1... Recreating compose_nginx_1... Attaching to compose_registryv1_1, compose_registryv2_1, compose_nginx_1 ... 5. In another terminal, display the running configuration. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a81ad2557702 compose_nginx:latest "nginx -g 'daemon of 8 minutes ago Up 8 minutes 80/tcp, 443/tcp, 0.0.0.0:5000->5000/tcp compose_nginx_1 0618437450dd compose_registryv2:latest "registry cmd/regist 8 minutes ago Up 8 minutes 0.0.0.0:32777->5000/tcp compose_registryv2_1 aa82b1ed8e61 registry:latest "docker-registry" 8 minutes ago Up 8 minutes 0.0.0.0:32776->5000/tcp compose_registryv1_1 ### Explore a bit 1. Check for TLS on your `nginx` server. $ curl -v https://localhost:5000 * Rebuilt URL to: https://localhost:5000/ * Hostname was NOT found in DNS cache * Trying 127.0.0.1... * Connected to localhost (127.0.0.1) port 5000 (#0) * successfully set certificate verify locations: * CAfile: none CApath: /etc/ssl/certs * SSLv3, TLS handshake, Client hello (1): * SSLv3, TLS handshake, Server hello (2): * SSLv3, TLS handshake, CERT (11): * SSLv3, TLS alert, Server hello (2): * SSL certificate problem: self signed certificate * Closing connection 0 curl: (60) SSL certificate problem: self signed certificate More details here: http://curl.haxx.se/docs/sslcerts.html 2. Tag the `v1` registry image. $ docker tag registry:latest localhost:5000/registry_one:latest 2. Push it to the localhost. $ docker push localhost:5000/registry_one:latest If you are using the 1.6 Docker client, this pushes the image the `v2 `registry. 4. Use `curl` to list the image in the registry. $ curl -v -X GET http://localhost:32777/v2/registry1/tags/list * Hostname was NOT found in DNS cache * Trying 127.0.0.1... * Connected to localhost (127.0.0.1) port 32777 (#0) > GET /v2/registry1/tags/list HTTP/1.1 > User-Agent: curl/7.36.0 > Host: localhost:32777 > Accept: */* > < HTTP/1.1 200 OK < Content-Type: application/json; charset=utf-8 < Docker-Distribution-Api-Version: registry/2.0 < Date: Tue, 14 Apr 2015 22:34:13 GMT < Content-Length: 39 < {"name":"registry1","tags":["latest"]} * Connection #0 to host localhost left intact This example refers to the specific port assigned to the 2.0 registry. You saw this port earlier, when you used `docker ps` to show your running containers. distribution-2.3.0/contrib/compose/docker-compose.yml000066400000000000000000000003341265472114500230040ustar00rootroot00000000000000nginx: build: "nginx" ports: - "5000:5000" links: - registryv1:registryv1 - registryv2:registryv2 registryv1: image: registry ports: - "5000" registryv2: build: "../../" ports: - "5000" distribution-2.3.0/contrib/compose/nginx/000077500000000000000000000000001265472114500204725ustar00rootroot00000000000000distribution-2.3.0/contrib/compose/nginx/Dockerfile000066400000000000000000000003431265472114500224640ustar00rootroot00000000000000FROM nginx:1.7 COPY nginx.conf /etc/nginx/nginx.conf COPY registry.conf /etc/nginx/conf.d/registry.conf COPY docker-registry.conf /etc/nginx/docker-registry.conf COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf distribution-2.3.0/contrib/compose/nginx/docker-registry-v2.conf000066400000000000000000000005701265472114500250050ustar00rootroot00000000000000proxy_pass http://docker-registry-v2; proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 900; distribution-2.3.0/contrib/compose/nginx/docker-registry.conf000066400000000000000000000007441265472114500244630ustar00rootroot00000000000000proxy_pass http://docker-registry; proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; # For basic auth through nginx in v1 to work, please comment this line proxy_read_timeout 900; distribution-2.3.0/contrib/compose/nginx/nginx.conf000066400000000000000000000011271265472114500224650ustar00rootroot00000000000000user nginx; worker_processes 1; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; include /etc/nginx/conf.d/*.conf; } distribution-2.3.0/contrib/compose/nginx/registry.conf000066400000000000000000000021041265472114500232060ustar00rootroot00000000000000# Docker registry proxy for api versions 1 and 2 upstream docker-registry { server registryv1:5000; } upstream docker-registry-v2 { server registryv2:5000; } # No client auth or TLS server { listen 5000; server_name localhost; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; location /v2/ { # Do not allow connections from docker 1.5 and earlier # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { return 404; } # To add basic authentication to v2 use auth_basic setting plus add_header # auth_basic "registry.localhost"; # auth_basic_user_file test.password; # add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; include docker-registry-v2.conf; } location / { include docker-registry.conf; } } distribution-2.3.0/contrib/docker-integration/000077500000000000000000000000001265472114500214725ustar00rootroot00000000000000distribution-2.3.0/contrib/docker-integration/Dockerfile000066400000000000000000000025501265472114500234660ustar00rootroot00000000000000FROM debian:jessie MAINTAINER Docker Distribution Team # compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ # For DIND ca-certificates \ curl \ iptables \ procps \ e2fsprogs \ xz-utils \ # For build build-essential \ file \ git \ net-tools \ && apt-get clean && rm -rf /var/lib/apt/lists/* # Install Docker ENV VERSION 1.7.1 RUN curl -L -o /usr/local/bin/docker https://test.docker.com/builds/Linux/x86_64/docker-${VERSION} \ && chmod +x /usr/local/bin/docker # Install DIND RUN curl -L -o /dind https://raw.githubusercontent.com/docker/docker/v1.8.1/hack/dind \ && chmod +x /dind # Install bats RUN cd /usr/local/src/ \ && git clone https://github.com/sstephenson/bats.git \ && cd bats \ && ./install.sh /usr/local # Install docker-compose RUN curl -L https://github.com/docker/compose/releases/download/1.3.3/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose \ && chmod +x /usr/local/bin/docker-compose RUN mkdir -p /go/src/github.com/docker/distribution WORKDIR /go/src/github.com/docker/distribution/contrib/docker-integration VOLUME /var/lib/docker ENTRYPOINT ["/dind"] distribution-2.3.0/contrib/docker-integration/README.md000066400000000000000000000073731265472114500227630ustar00rootroot00000000000000# Docker Registry Integration Testing These integration tests cover interactions between the Docker daemon and the registry server. All tests are run using the docker cli. The compose configuration is intended to setup a testing environment for Docker using multiple registry configurations. These configurations include different combinations of a v1 and v2 registry as well as TLS configurations. ## Running inside of Docker ### Get integration container The container image to run the integation tests will need to be pulled or built locally. *Building locally* ``` $ docker build -t distribution/docker-integration . ``` ### Run script Invoke the tests within Docker through the `run.sh` script. ``` $ ./run.sh ``` Run with aufs driver and tmp volume **NOTE: Using a volume will prevent multiple runs from needing to re-pull images** ``` $ DOCKER_GRAPHDRIVER=aufs DOCKER_VOLUME=/tmp/volume ./run.sh ``` ### Example developer flow These tests are useful for developing both as a registry and docker core developer. The following setup may be used to do integration testing between development versions Insert into your `.zshrc` or `.bashrc` ``` # /usr/lib/docker for Docker-in-Docker # Set this directory to make each invocation run much faster, without # the need to repull images. export DOCKER_VOLUME=$HOME/.docker-test-volume # Use overlay for all Docker testing, try aufs if overlay not supported export DOCKER_GRAPHDRIVER=overlay # Name this according to personal preference function rdtest() { if [ "$1" != "" ]; then DOCKER_BINARY=$GOPATH/src/github.com/docker/docker/bundles/$1/binary/docker if [ ! -f $DOCKER_BINARY ]; then current_version=`cat $GOPATH/src/github.com/docker/docker/VERSION` echo "$DOCKER_BINARY does not exist" echo "Current checked out docker version: $current_version" echo "Checkout desired version and run 'make binary' from $GOPATH/src/github.com/docker/docker" return 1 fi fi $GOPATH/src/github.com/docker/distribution/contrib/docker-integration/run.sh } ``` Run with Docker release version ``` $ rdtest ``` Run using local development version of docker ``` $ cd $GOPATH/src/github.com/docker/docker $ make binary $ rdtest `cat VERSION` ``` ## Running manually outside of Docker ### Install Docker Compose [Docker Compose Installation Guide](https://docs.docker.com/compose/install/) ### Start compose setup ``` docker-compose up ``` ### Install Certificates The certificates must be installed in /etc/docker/cert.d in order to use TLS client auth and use the CA certificate. ``` sudo sh ./install_certs.sh ``` ### Test with Docker Tag an image as with any other private registry. Attempt to push the image. ``` docker pull hello-world docker tag hello-world localhost:5440/hello-world docker push localhost:5440/hello-world docker tag hello-world localhost:5441/hello-world docker push localhost:5441/hello-world # Perform login using user `testuser` and password `passpassword` ``` ### Set /etc/hosts entry Find the non-localhost ip address of local machine ### Run bats Run the bats tests after updating /etc/hosts, installing the certificates, and running the `docker-compose` script. ``` bats -p . ``` ## Configurations Port | V2 | V1 | TLS | Authentication --- | --- | --- | --- | --- 5000 | yes | yes | no | none 5001 | no | yes | no | none 5002 | yes | no | no | none 5011 | no | yes | yes | none 5440 | yes | yes | yes | none 5441 | yes | yes | yes | basic (testuser/passpassword) 5442 | yes | yes | yes | TLS client 5443 | yes | yes | yes | TLS client (no CA) 5444 | yes | yes | yes | TLS client + basic (testuser/passpassword) 5445 | yes | yes | yes (no CA) | none 5446 | yes | yes | yes (no CA) | basic (testuser/passpassword) 5447 | yes | yes | yes (no CA) | TLS client 5448 | yes | yes | yes (SSLv3) | none distribution-2.3.0/contrib/docker-integration/docker-compose.yml000066400000000000000000000006721265472114500251340ustar00rootroot00000000000000nginx: build: "nginx" ports: - "5000:5000" - "5001:5001" - "5002:5002" - "5011:5011" - "5440:5440" - "5441:5441" - "5442:5442" - "5443:5443" - "5444:5444" - "5445:5445" - "5446:5446" - "5447:5447" - "5448:5448" links: - registryv1:registryv1 - registryv2:registryv2 registryv1: image: registry:0.9.1 ports: - "5000" registryv2: build: "../../" ports: - "5000" distribution-2.3.0/contrib/docker-integration/helpers.bash000066400000000000000000000010131265472114500237660ustar00rootroot00000000000000# Start docker daemon function start_daemon() { # Drivers to use for Docker engines the tests are going to create. STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} EXEC_DRIVER=${EXEC_DRIVER:-native} docker --daemon --log-level=panic \ --storage-driver="$STORAGE_DRIVER" --exec-driver="$EXEC_DRIVER" & DOCKER_PID=$! # Wait for it to become reachable. tries=10 until docker version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then echo >&2 "error: daemon failed to start" exit 1 fi sleep 1 done } distribution-2.3.0/contrib/docker-integration/install_certs.sh000066400000000000000000000032351265472114500246770ustar00rootroot00000000000000#!/bin/sh set -e hostname=$1 if [ "$hostname" = "" ]; then hostname="localhost" fi mkdir -p /etc/docker/certs.d/$hostname:5011 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5011/ca.crt mkdir -p /etc/docker/certs.d/$hostname:5440 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5440/ca.crt mkdir -p /etc/docker/certs.d/$hostname:5441 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5441/ca.crt mkdir -p /etc/docker/certs.d/$hostname:5442 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5442/ca.crt cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5442/client.cert cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5442/client.key mkdir -p /etc/docker/certs.d/$hostname:5443 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5443/ca.crt cp ./nginx/ssl/registry-noca+client-cert.pem /etc/docker/certs.d/$hostname:5443/client.cert cp ./nginx/ssl/registry-noca+client-key.pem /etc/docker/certs.d/$hostname:5443/client.key mkdir -p /etc/docker/certs.d/$hostname:5444 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5444/ca.crt cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5444/client.cert cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5444/client.key mkdir -p /etc/docker/certs.d/$hostname:5447 cp ./nginx/ssl/registry-ca+client-cert.pem /etc/docker/certs.d/$hostname:5447/client.cert cp ./nginx/ssl/registry-ca+client-key.pem /etc/docker/certs.d/$hostname:5447/client.key mkdir -p /etc/docker/certs.d/$hostname:5448 cp ./nginx/ssl/registry-ca+ca.pem /etc/docker/certs.d/$hostname:5448/ca.crt distribution-2.3.0/contrib/docker-integration/nginx/000077500000000000000000000000001265472114500226155ustar00rootroot00000000000000distribution-2.3.0/contrib/docker-integration/nginx/Dockerfile000066400000000000000000000006251265472114500246120ustar00rootroot00000000000000FROM nginx:1.9 COPY nginx.conf /etc/nginx/nginx.conf COPY registry.conf /etc/nginx/conf.d/registry.conf COPY docker-registry.conf /etc/nginx/docker-registry.conf COPY docker-registry-v2.conf /etc/nginx/docker-registry-v2.conf COPY registry-noauth.conf /etc/nginx/registry-noauth.conf COPY registry-basic.conf /etc/nginx/registry-basic.conf COPY test.passwd /etc/nginx/test.passwd COPY ssl /etc/nginx/ssl distribution-2.3.0/contrib/docker-integration/nginx/docker-registry-v2.conf000066400000000000000000000005701265472114500271300ustar00rootroot00000000000000proxy_pass http://docker-registry-v2; proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_read_timeout 900; distribution-2.3.0/contrib/docker-integration/nginx/docker-registry.conf000066400000000000000000000007301265472114500266010ustar00rootroot00000000000000proxy_pass http://docker-registry; proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Authorization ""; # see https://github.com/docker/docker-registry/issues/170 proxy_read_timeout 900; distribution-2.3.0/contrib/docker-integration/nginx/nginx.conf000066400000000000000000000011271265472114500246100ustar00rootroot00000000000000user nginx; worker_processes 1; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; keepalive_timeout 65; include /etc/nginx/conf.d/*.conf; } distribution-2.3.0/contrib/docker-integration/nginx/registry-basic.conf000066400000000000000000000006121265472114500264120ustar00rootroot00000000000000client_max_body_size 0; chunked_transfer_encoding on; location /v2/ { auth_basic "registry.localhost"; auth_basic_user_file test.passwd; add_header 'Docker-Distribution-Api-Version' 'registry/2.0' always; include docker-registry-v2.conf; } location / { auth_basic "registry.localhost"; auth_basic_user_file test.passwd; include docker-registry.conf; } distribution-2.3.0/contrib/docker-integration/nginx/registry-noauth.conf000066400000000000000000000002661265472114500266340ustar00rootroot00000000000000client_max_body_size 0; chunked_transfer_encoding on; location /v2/ { include docker-registry-v2.conf; } location / { include docker-registry.conf; } distribution-2.3.0/contrib/docker-integration/nginx/registry.conf000066400000000000000000000165541265472114500253470ustar00rootroot00000000000000# Docker registry proxy for api versions 1 and 2 upstream docker-registry { server registryv1:5000; } upstream docker-registry-v2 { server registryv2:5000; } # No client auth or TLS server { listen 5000; server_name localhost; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; location /v2/ { # Do not allow connections from docker 1.5 and earlier # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { return 404; } include docker-registry-v2.conf; } location / { include docker-registry.conf; } } # No client auth or TLS (V1 Only) server { listen 5001; server_name localhost; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; location / { include docker-registry.conf; } } # No client auth or TLS (V2 Only) server { listen 5002; server_name localhost; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; location / { include docker-registry-v2.conf; } } # TLS localhost (V1 Only) server { listen 5011; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; client_max_body_size 0; chunked_transfer_encoding on; location / { include docker-registry.conf; } } # TLS localregistry (V1 Only) server { listen 5011; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; client_max_body_size 0; chunked_transfer_encoding on; location / { include docker-registry.conf; } } # TLS Configuration chart # Username/Password: testuser/passpassword # | ca | client | basic | notes # 5440 | yes | no | no | Tests CA certificate # 5441 | yes | no | yes | Tests basic auth over TLS # 5442 | yes | yes | no | Tests client auth with client CA # 5443 | yes | yes | no | Tests client auth without client CA # 5444 | yes | yes | yes | Tests using basic auth + tls auth # 5445 | no | no | no | Tests insecure using TLS # 5446 | no | no | yes | Tests sending credentials to server with insecure TLS # 5447 | no | yes | no | Tests client auth to insecure # 5448 | yes | no | no | Bad SSL version server { listen 5440; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; include registry-noauth.conf; } server { listen 5441; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; include registry-basic.conf; } server { listen 5442; listen 5443; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-noauth.conf; } server { listen 5444; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-basic.conf; } server { listen 5445; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; include registry-noauth.conf; } server { listen 5446; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; include registry-basic.conf; } server { listen 5447; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localhost-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-noauth.conf; } server { listen 5448; server_name localhost; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localhost-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localhost-key.pem; ssl_protocols SSLv3; include registry-noauth.conf; } # Add configuration for localregistry server_name # Requires configuring /etc/hosts to use # Set /etc/hosts entry to external IP, not 127.0.0.1 for testing # Docker secure/insecure registry features server { listen 5440; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; include registry-noauth.conf; } server { listen 5441; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; include registry-basic.conf; } server { listen 5442; listen 5443; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-noauth.conf; } server { listen 5444; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-basic.conf; } server { listen 5445; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; include registry-noauth.conf; } server { listen 5446; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; include registry-basic.conf; } server { listen 5447; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-noca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-noca+localregistry-key.pem; ssl_client_certificate /etc/nginx/ssl/registry-ca+ca.pem; ssl_verify_client on; include registry-noauth.conf; } server { listen 5448; server_name localregistry; ssl on; ssl_certificate /etc/nginx/ssl/registry-ca+localregistry-cert.pem; ssl_certificate_key /etc/nginx/ssl/registry-ca+localregistry-key.pem; ssl_protocols SSLv3; include registry-noauth.conf; } distribution-2.3.0/contrib/docker-integration/nginx/ssl/000077500000000000000000000000001265472114500234165ustar00rootroot00000000000000distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+ca.pem000066400000000000000000000033651265472114500267400ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE9TCCAt+gAwIBAgIQMsdPWoLAso/tIOvLk8R/sDALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTQwMVoXDTE4MDUxMDIwNTQwMVowJjERMA8GA1UEChMIUXVpY2tUTFMxETAPBgNV BAMTCFF1aWNrVExTMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1YeX GTvXPKlWA2lMbCvIGB9JYld/otf8aqs6euVJK1f09ngj5b6VoVlI8o1ScVcHKlKx BGfPMThnM7fiEmsfDSPuCIlGmTqR0t4t9dHRnLBGbZmR8JdAs7LKpP+PFYu0JTIT wFcjXIs+45cIF2HpsYY6zkj0bmNsyYmT1U1BTW+qqmhvc0Jkr+ikElOQ93Pn7zIO cXtxdERdzdzXY5cfL3CCaoJDgXOsKPQfYrCi5Zl6sLZVBkIc6Q2fErSIjTp45+NY AjiOxfUT0MOFtA0/HzYvVp3gTNPGEWM3dF1hwzCqJ32odbw/3TiFCEeC1B82p1sR sgoFZ6Vbfy9fMhB5S7BBtbqF09Yq/PMM3drOvWIxMF4aOY55ilrtKVwmnckiB0mE CPOColUUyiWIwwvp82InYsX5ekfS4x1mX1iz8zQEuTF5QHdKiUfd4A33ZMf0Ve6p y9SaMmos99uVQMzWlwj7nVACXjb9Ee6MY/ePRl7Z2gBxEYV41SGFRg8LNkQ//fYk o2vJ4Bp4aOh/O3ZQNv1eqEDmf/Su5lYCzURyQ2srcRRdwpteDPX+NHYn2d07knHN NQvOJn6EkcsDbgp0vSr6mFDv2GZWkTOAd8jZyrcErrLHAxRNm0Va+CEIKLhswf1G Y2kFkPL1otI8OSDvdJSjZ2GjRSwXhM2Mf3PzfAkCAwEAAaMjMCEwDgYDVR0PAQH/ BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4ICAQDBxOHKnF9z PZWPNKDRmBPtmnU2IHh6JJ9HzqGALJJbBU0MUSD/aLBBkYeS0YSHgYZ1hXLsfuRU lm/czV41hU1FTDqS2fFpcAAGH+6/rwyfrz+GYr2K4b/ijCwOMbMrDWO54zqZT3KU GFBpkrh4fNyKdgUNJsy0Q0it3gOGSUmLvEQUzqxPFVz7h/pF/Cecr0/kpjbpsxna XQkhtDyKDIQfPCq8Ci1vox5WvBbBkdzDtyCm+KSb6VC3pCX6LV5NkS7YM7mtscTi QdYfLbKX05kUVG2R9SShJn5BSXzGk9M5FR5koGY0lMHwmJqaOqazXjqa1jR7UNDK UyExHIXSqJ+nCf4bChEsaC1uwu3Gr7PfP41Zb2U3Raf8UmFnbz6Hx0sS4zBvyJ5w Ntemve4M1mB7++oLZ4PkuwK82SkQ8YK0z+lGJQRjg/HP3fVETV8TlIPJAvg7bRnH sMrLb/V+K6iY+08kQ2rpU02itRjKnU/DLoha4KVjafY8eIcIR2lpwrYjx+KYpkcF AMEC7MnuzhyUfDL++GO6XGwRnx2E54MnKtkrECObMSzwuLysPmjhrEUH6YR7zGib KmN6vQkA4s5053R+Tu0k1JGaw90SfvcW4bxGcFjU4Kg0KqlY1y8tnt+ZiHmK0naA KauB3KY1NiL+Ng5DCzNdkwDkWH78ZguI2w== -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+client-cert.pem000066400000000000000000000033651265472114500305660ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE9TCCAt+gAwIBAgIRAKbgxG1zgQI81ISaHxqLfpcwCwYJKoZIhvcNAQELMCYx ETAPBgNVBAoTCFF1aWNrVExTMREwDwYDVQQDEwhRdWlja1RMUzAeFw0xNTA1MjYy MDU0MjJaFw0xODA1MTAyMDU0MjJaMBMxETAPBgNVBAoTCFF1aWNrVExTMIICIjAN BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/ oARHbx59G+GOeGkrwG6ZWSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8M WpLxp5U9LyYkv0AiSPfT2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/ MgJbdTylEq1UcZSLMuky+RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7II hGlhziLVTKV9W1RP8Aop8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4 nFwmuhOo8gvw/HhzYcxyMHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCai gwUNzfe4/dHeCk/r3pteWOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru 5QqKMrbSlOcd6yHT6NM1ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/ Vlp5N+WRjDpsBscR8kt2Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoO nhRqhl2PSphcWdimk8Bwf5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3j NLQ8EmHWaZlJSeW4BiDYsXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeB twZJXIXR6Jc8hgsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoG CCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQCl0cTLbLIn XFuxreei+y6TlG2Z5XcxJ84mr8VLAaQMlJOLZV0O/suFBu9KqBuvPaHhGRnKE2uw Vxdj9qaDdvmvuzi4jYyUA/sQuqq1+wHwGTadOi9r0IsL8OxzsG16OlhuXzhoQVdw C9z1jad4HC7uihQ5yhl2ltAA+h5G0Sr1b9El2mx4p6BV+okmTvrqrmjshQb1GZwx jG6SJ/uvjGf7rn09ZyYafF9ZDTMNodNXjW8orqGlFdXZLPFJ9agUFfwWfqD2lrtm Fu+Ei0ZvKOtyzmh06eO2aGAHJCBTfcDM4tBKBKp0MOMoZkcQQDNpSyI12j6s1wtx /1dC8QDyfFpZFXTbKn3q+6MpR+u5zqVquYjwP5DqGTvX0e1sLSthv7LRiOi0qHv1 bZ8JoWhRMNumui9mzwar5t20ExcWxGxizZY+t+OIj4kaAeRoKK6r6FrYBnTjM+iR +xtML5UHPOSmYfNcai0Wn4T7hwpgnCJ+K7qGYjFUCarsINppQEwkxHAvuX+asc38 nA0wd7ByulkMJph0gP6j6LuJf28JODi6EQ7FcQItMeTuPrc+mpqJ4jP7vTTSJG7Q wvqXLMgFQFR+2PG0s10hbY/Y/nwZAROfAs7ADED+EcDPTl/+XjVyo/aYIeOb/07W SpS/cacZYUsSLgB4cWbxElcc/p7CW1PbOA== -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+client-key.pem000066400000000000000000000062531265472114500304200ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEAq0Pc8DQ9AyvokFzm9v4a+29TCA3/oARHbx59G+GOeGkrwG6Z WSZa/oNEJf3NJcU00V04k+fQuVoYBCgBXec9TEBvXa8MWpLxp5U9LyYkv0AiSPfT 2fJEE8mC+isMl+DbmgBcShwRXpeZQyIbEJhedS8mIjW/MgJbdTylEq1UcZSLMuky +RWv10dw02fLuN1302OgfJRZooPug9rPYHHGbTB0o7IIhGlhziLVTKV9W1RP8Aop 8TamSD85OV6shDaCvmMFr1YNDjcJJ5MGMaSmq0Krq9v4nFwmuhOo8gvw/HhzYcxy MHnqMt6EgvbVWwXOoW7xiI3BEDFV33xgTp61bFpcdCaigwUNzfe4/dHeCk/r3pte WOxH1bvcxUlmUB65wjRAwKuIX8Z0hC4ZlM30o+z11Aru5QqKMrbSlOcd6yHT6NM1 ZRyD+nbFORqB8W51g344eYl0zqQjxTQ0TNjJWDR2RWB/Vlp5N+WRjDpsBscR8kt2 Q1My17gWzvHfijGETZpbvmo2f+Keqc9fcfzkIe/VZFoOnhRqhl2PSphcWdimk8Bw f5jC2uDAXWCdvVWvRSP4Xg8zpDwLhlsfLaWVH9n+WG3jNLQ8EmHWaZlJSeW4BiDY sXmpTAkeLmwoS+pk2WL0TSQ7+S3DyrmTeVANHipNQZeBtwZJXIXR6Jc8hgsCAwEA AQKCAgBJcL1iR5ROMtr0ZNIp4gciALfjQVV3gb48GR/e/9b/LWI0j3i0sOzeLN3h SLda1fjzOn1Td1ma0dZwmdMUOF+hvhPDYZfzkwWLLkThXgLt/At3rMYstGWa8pN2 wVUSH7sri7IHmYedP3baQdrHP/9pUsGQc+m8ASTE3i+PFcKbPe5+818HTtRrhVgN X3oNmPKUNCmSom7ZcKer5P1+Ruum0NuDgomCdkoZgfhjeKeLrVjl/wXDSQL/AhWA 02c4/sML7xx19nl8uf7z+Gj0ir1pvRouhRJTwnRc4KdWu+Yn7WLU8j2ZKf5St/as zjnpYVEdCp0KSHccgXtobUZDEG2NCHmM6gR2j3qgoUAYjHyqPYlph2r5C47q+p4c dDWkpwZwGiuYq9qpZj24X6BfppxExcX6AwOgFLZLp80IynwrMVxFsDd2J+KpKRQ1 +ZtYPcULwInF9MNi/dv84pxGOmmOaIUyjN8Sw4eqANU4T5uvTjUj7Ou6KYyfmxgG y++vjpRN7tN1t1Hwde8SVWobvmhU+5SJVHV8INoJD7uciaevPo9pt833SQTtDXeY PVBhOKO7thAxdUiqlU/1nGTXnf1VO6wAjaVYoTnP4tJ97WuTptwd2F5znVWHFGVh lzJAzmFOuyCnRnInsf4n5EmWJnT7XF2CofQqAJ8NIddrU8GnQQKCAQEAyqWAiPMK I/dMzlS7oJGlhbKZ5R4buc+EoZqtW7/8/S+0L6IaQvpEUilD+aDQyaxXjoKiQQL+ 0UeeSmF/zU5BsOTpB8AuJUfYoUe0N+x7hO5eIcoCB/QWYX+iC3tCN4j1Iwt6VliV PBYEiLUYPngSIHob/nK8UtgxrWQ3Fik9XJtWhePHrvMvDBalgCKdnyhuucGxKUjc TtPcyMFdi0z4Kt/FAm+5u/v4ZkO909Ish0FrAqQ9t5ETfvTTTYKBmzny6/LSPTK9 0XIsHltuC1xG4vGQsES/Ph++Yj3Vn011FqvFZeBUHbfcQuB4h5wcb+90d4GU1kux eabsHPIZKrlN4QKCAQEA2Fs8NAN5K9i7qbxZCJPi6DJV6XMznk6JVGb+qkkChCyq IOXb95+c9CIpe6w2d3res3zvML3zbdz2Lyp9G0ve6tSlOaSnHeyIxZ5SRB+yQrcF GXtsx370bOGjCi1/NH85kwKlMuROFJKleJQv8rKpIEo5aPSPV9Cc/VsUqBpvR+O0 U1HMv57P4yJA/ddw6imHJBl3jTmWBpK4B+LBsCbdypxdVoO8t32Lb2BqDTaPJfYU RJUpjn/efLLoP6CWxYtqpUlY5tc7NJGAokl8Fo1mPn02klydvs09uiXE80Li2Hoc /meMH07Lbt2VTw6iGNRX6VpIHEUZGZeS6rbAvO4ZawKCAQEAjOtGVPXdyWEB0kHu MBzYY/7tMf0b/rymWNL9Vt5NiauQu8cYSBdNR21WzdLdHkFwqbOCLX9twA7zrnna q+SNnfuxaShlbptls9HvKyySQMCaSRj3DJzaq3ZcM2vFgmUFQxeKPV1geeY9xOta LqbExDzmFq2m9F1PPmqAPDL1bt6+7mCVzb1irB9be52WysUNKrPdBP6b5V1DHYAK EwK1WOs/TxBusqDn/gWBjjmLqYr+ZVndaTfDvPd3sWDdzBoiKZ40QUZ15Z5lu76M 6e2DhfHCUjGcZBEjDaI+WYc9s0REAzJajEf9Lax3ZKZUyCpWbXx5CgSdKCHB8+cP RTyTQQKCAQEAsxx8r5a8hocLfQ43Kvm7HH0nUHeVoRXlbOFDLNf6ZE/RnCCOxOX3 esiZTRAZmzo2CaOBJPnr/+SwTgW/woxCBGh8TEc6LnS2GdviwRD4c3CuoRTjzhgU 49q8Ld3SdDRrBoBnIMWOuktY/4S2WRZ9GwU3l+L2lD1Y6gmwBSa1P2+Lxnpupagk 9CVUZpEnokM05LbMmTa2M8Tc43Je5KSYcnaWctvmrIUbnN3VjhC/2y5oQwq1d4n2 N4eo65vXlbzAUgtxtNEz62YVdsSdHNJ8dXkVZ3+S+/VPh75i2PxjbdFSFW7Futlx YtvAEs3LdgC8squSDQ1LJTutXfBjiUUX9wKCAQBiCMre86tLyJu6Qb6X1cRAwO7m 4kyGzIUtijXko6mWxb4X/usVvzhSaNVYbHbMZXjX+J5vhBOul+RmQ3EY6nw0H2z8 9D4z/rnQVqeb0uvIeUhBPni+s4fS4bA92M6Ie5bhiOSF2JjjJr38BFnTZARE7C+7 ZII7z2c0eQz/wAAt9fWWroAB2mIm6wxq0LNij2NoE0iq6k2xJE1/k8qhXpsN0zAv bjG72Q7WryBeK/eIDK9e5wGlfLVDOx2Evlcaj70oJxuoRh57e8fCYy8huJQT+Wlx Qw4zhxiyzAMq8SEqFsm8dVO4Bu2FwzmmehA80ieSb+si7JZU92xGDT394Im2 -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+localhost-cert.pem000066400000000000000000000034151265472114500312740ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFCTCCAvOgAwIBAgIQdcXDOHrLsd2ENSfj5h8ZmjALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTQwM1oXDTE4MDUxMDIwNTQwM1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2K saEVcHq0eldu5kABbWtZsf9keK7lz8beVIowzOqp5IHpGlggtH7xDVeigA/sLdds WTgKEOq3zsJzdgfEti5TNAjjmPqjMKkolqv3LXDJG0dZ2GZ8W/eBB6X1wB0LKr3i ye3/5jb/wCZYVGGMQXj0VQxY8Qq+OHEp0effeheJqA0OYOj+RaZwi20OR/KmJRgY wXU33bZyapuyT4krhFlFbtzXeKsKQPrT2ePWxPAceqUGUTIqyJySYIw6vb72YxjX FNRw6Jg7B7RqVJaVCfBrVxtAv+rCLOhUOVYmWhgWEIODPXiqOGwB0VUApAVAYqfi TYnJIZ7QYLlQx5VPNlzZuSJTUzKmHQLtLcTqdO5HmLxfxc0WuS/ftK916wy/jpSc m2DiHjIy6aAEaHKGQrNgT+no68kp30xkYAVsIs0BFpl6Q2iNr5e0uKta82A0xU1Q we7swSHOHCevuDZfFA/CqnBptOjvNUuVytcroCeCrV/ftp75w/Fd9zOcb6LGLxM2 2UzhkSXl3II250xj74Q3q8T9TDxCLty7oiawhaYKI+8SDYc510EQ7MH46WMO+3Uq JkpmmELd9POgnnZ1JrCFmf0flUKTi2CqU3wrBPpPMwFBxoFipp5iL87npACHc3DY 6uaoF4Pf9Et1Fd7HRon8RMsKkrSF92NFiBx5UvhZAgMBAAGjNjA0MA4GA1UdDwEB /wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq hkiG9w0BAQsDggIBAC0F4ci1nqZ9KUhEEAmWmy8g89DovNNIGSC51r2WJ/COmYUX X70TONscsBL/kx5MK4xoAmb+EN6Yy8i+z9NkNJd0B+2MjXPMFBpgGb0UiPv2wEmZ 5PAKyjwTxNIm6L/nFhkmVqfsQHfjHukXES4C0ff6fj6fuDpBfl5nTlVmc9LpP+hT 5RAwW10qumucGxAWGNBWW+K66cf8O7n/0nQykxJxYjBx16ZB80H2uvqFDKDVFqze co5M4euXQq9KiXPRlcC9rab2a7FGLHd0TyPkq6TvfsqpxcryyKS4rIAz3sQh/tl/ /qm1tBcZW2bce3UlF2Wb2dW9HqvIu1O84f6ptLqwgKcIdTbwgQZ0kbFoWE2kWJSV w+eAFb7tz1LDTpF3NRlz+1K27pBQWRQgcqoIRoQXpC0LfQY9Mp70QIfUQdUh6tnO 8hmq5y623tfxiDwCxb/EOpwCmwK1Cp9cloZTDefVE1r6NkEJWeeHG79VljUGF1KT NKzXWrrsFtge/hU9Pj+frcZO9qExxPCcsrdZcoK7Ll8s+pjulRvbnCnJkNpeOI3P iz6+sdGmzKSKg2daRM67Zmy5tmlBEX/eV7kFqt+b3HsdUiLo3Ng2lyPLNNDfwUtB EukgYGjVJoyqLjLXgsCxLJlk7X/ogVwf8SlAnQ7p6KuxGWm02vlUpEmJp+Hq -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+localhost-key.pem000066400000000000000000000062531265472114500311320ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJJwIBAAKCAgEArYqxoRVwerR6V27mQAFta1mx/2R4ruXPxt5UijDM6qnkgeka WCC0fvENV6KAD+wt12xZOAoQ6rfOwnN2B8S2LlM0COOY+qMwqSiWq/ctcMkbR1nY Znxb94EHpfXAHQsqveLJ7f/mNv/AJlhUYYxBePRVDFjxCr44cSnR5996F4moDQ5g 6P5FpnCLbQ5H8qYlGBjBdTfdtnJqm7JPiSuEWUVu3Nd4qwpA+tPZ49bE8Bx6pQZR MirInJJgjDq9vvZjGNcU1HDomDsHtGpUlpUJ8GtXG0C/6sIs6FQ5ViZaGBYQg4M9 eKo4bAHRVQCkBUBip+JNickhntBguVDHlU82XNm5IlNTMqYdAu0txOp07keYvF/F zRa5L9+0r3XrDL+OlJybYOIeMjLpoARocoZCs2BP6ejrySnfTGRgBWwizQEWmXpD aI2vl7S4q1rzYDTFTVDB7uzBIc4cJ6+4Nl8UD8KqcGm06O81S5XK1yugJ4KtX9+2 nvnD8V33M5xvosYvEzbZTOGRJeXcgjbnTGPvhDerxP1MPEIu3LuiJrCFpgoj7xIN hznXQRDswfjpYw77dSomSmaYQt3086CednUmsIWZ/R+VQpOLYKpTfCsE+k8zAUHG gWKmnmIvzuekAIdzcNjq5qgXg9/0S3UV3sdGifxEywqStIX3Y0WIHHlS+FkCAwEA AQKCAgAtZw3V8P/+el1PpqoCsNzpqwvQn36bc3CKvPwtM1tJQa2Q92V3DQdr9rDg 7pjGkankpGorKScH4ZLseLy2h5aKRCZm9PS/DhbbCs1wrDhtO5AxeKYPGhYNiOpx VvwuHQ/Pohfmdn7KgNrKrW1WIBW5CWN+2X4mq2Gk6aYLHgKZSeB3mf1st6mNRACW RZg5OZKW3VMv0a/l3cVaeqooXwQ/PtUkXhMp3ILnnKly3Gulzi2gIyj3EQ5vODSe O3gND/UZOJwwgGG6Aief4fnDc7an+c1OSgBr8OVC21Ys3dfQWWV0os9gVFhymX8k 2AgRf6jP93sFw2NSY34KvcGZpKG59oMDxWF1vPo8sOt17Ey0+qp3eUtB3FfE7Wtf BaLaD/x4U91izIqOEMzQ6QiZAyvmUoBkUSo125CYuIkt8C8Q1lA1KjihETWF37QR mr8LUk0A0x3SErtm4wVfeDEqVSfI9gKpk6i6rlUzuCjv58Rc0yyqoghXwBWM4CKj 5ZHYpBKAxj4bM6IrKnodAOcsyVk2c2zVTaMxPhoUj0fF7IE5Hy6YAQ/yBheZEM1v fhsdBFyS6OqSCnN6UinhH268QPam82lfKTFjW5lOgsSDQZ9rhiWoyamhonJTq65I nb08f4mzT6OGMwV13zq8dXio6WnUIQAhXdEYWrMBmxp5b6CxAQKCAQEA4kmwV3Nb n3ZIzVAp2l+yGZwdg4YWzN2kcfdNkL8I+Pn8pWrOwv/uGQYmM0786ys9kB5lu4FR TMcoEo3AaK/z8N49ro2Kl6HcTmxZgTMr+cl6iwetzqYdkRK7klxyCv5uVloDQDtc AulDH6RkW9BfRERpi6XtlgiFdJj5jMvXMpwGHX69JVsXb83ZSQESjI2JfO9Y8+4M a7hNKWW/W0ZBrGCcQQPbgpysfJ+PFKUF/yF1h8SSCdetW2Kv2ix16wL5uHKINYmZ Y/Om+/AFnUOQlANycgThtgBI5mvg9Khq6W2i/RNcIL7bvwAzq1p+o6cGnImXo4bY hC4fs2/aeX17UQKCAQEAxFQHSLBYDLal5CQYbHbNZ2sLjwRUraEd/+BA8XoERVVQ JPihgEvTPEaHnWrFTw0qaGKgMZ5SZCZSWUIfXjYvQIUcEMhNUOHweXhJJhifO5sd sTuvU7bWg76F69bRKfp8KM266m7qMYv+tNlQ6Kbz/1ImsW00xb86vCK2hPfhldtN d/iBb4HVDu1uoATHUNuqsSGj/UvttKudQdg7MapzM4N+D4m6rPZUjQmtoMWOXt7R LYrqEOHWfkxXKlVHw1cL9uzUpArvnR0VcYvGfXiYJFbXWsEB07VxIoLMPEtPbpH9 YLY37KugrthEVnsbySmZIWCRDEqQuuAaa5o8S1naiQKCAQAiU/dybMebe0A0FVMk E5xbEjnP+AmBbqZBu7iCmthrnNDc70UKg/TEyxAEfJkVu+uM72+TcFy6/wNvPR3R Q9AH3E8TKdm6gw1+wCUb2n1zWUND0Bhn3v9hQKw/2dJbJJnsc59GoTqmHmjWZgPr gcLSAmbYjoVqW0STmZlR6KJuxQiQdOeQwS7fASVTU9xSgi43S7/80UIFHWJnQ04y NIhF9CoAGuuz9ryb80CraxVrzNGdlQ5qe9OKp3/x4wjIbB0iBA3xwTwJ066jTZgs cVF/gr5b2a28BHMKsZbgxqPhYYZ2SfeR6CJB6W/tML9BaFcybBUa85vpAW5BtFg6 UfThAoIBAAp1/71byBVFVimF0tdUrTUpewAv1uM5hoOvy0YSnk+jcBXIObLAV40K pQc6PTEtHmlZd/es2+8CK7kd0NYQRQxHC2vJgHUi1NFkG2GwRivC5B4hdAId5+g1 KqWaWKLH+f2imKcNKeVh9Dxmp+z9mFquYelqTDmNKvADWX5URuzZNpOB5kOuw098 TzyvhH9GdR3jEP3aIdxSmJp9jwnibyj7hKgHSq8UoQSy01GRtThQ3wxyLm6f2fH4 11wmFyDNbpHFpL7o5kOU3SOjsvvUhSbKiccIKbTCIjkYhxFfYegeV0Xj767opjMq ytlgzeY2FTa2EoR5JKUQc9fv6+6H5yECggEAVVfnywPm8QXn+ByFDdUndZg3uEje DGyvt1M3mIz5geyRZO8ECzgsZVzKgZC8jDB4lPKz3AGgNlUl/vyGHk6CtW6V6ePA EXcmOkkMKJQMdopY2/cE6YlSpBGMCcnfothgL0HXxYoop4xVjb74k7tFcNrIDoRx zp9dSalgxx9aMeaURRbMWf8AhWLZUAjJ/359M1SmcNW619SL3p8Q95Nptvdiltww lWOCkBdgkjW0mel+Mi2+gY8UPmgNBMPrJ1z9b7b7529YCv5Oci8ABn/N202nhjCp LupADooNknOMLDyqwRorEv4g6wRjuPIYTIhI9fO5ranu089x+mmGU2tCBw== -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-cert.pem000066400000000000000000000034321265472114500321660ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFETCCAvugAwIBAgIQJ+iLgsp9gA0DmROqW+tHFzALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTQxNloXDTE4MDUxMDIwNTQxNlowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQDHR/A6uiQ9X/Xh5ivmdjRr5XVr1D7+fU9Qu6ohArqtBuJsLr6t2RBTS9w6PIAf xjQSMSFlrm/CY+hbfBMSgm9NeH23o3kYCgoEPhP/634A45W5xwUFno388U8/NHK7 qwzSP1ezKXfXNvzuo1mZhT08aVdGMOrZUcZZZl8R3RPcIRw9XDSfXKVkMluH6egk 8iLdOxdIdRS58DeSI09FskWe3cIZ5kJmMqnKoIbYSJCVVeYPO0RFlIBi+zpdVyI/ r9LG0r0plRdz/HJevbOitU2y93S1s9NWMNEkOFU1PFJmsF3ZzNqJFCySj00y/Hcs jPULYwIxYdqcv16cTNmd3P6FegvuzLJLjNuGaLJGc1antv+p62P7ZdE3DyprFuxs MJgDL9+NjDaIzoamFf0Uv7K3F7hxrrAHfvm1CMUOyQLg9J6Wl4mLsOy2ZhCbdNFs T6dobAUGvz4Muj9V8V5pR+nFehjmsPENSsTcs5j0e8zTWtvMFISdS+NZAkpiz0s4 PV8DLgk5Rp1ZG2V5OnRPLMOTgK0nngc5GVaxf7OYCrFHbBJ8tL93MXNQptNFeBpV FhjUGqVFcz+6nbFX2NsFLZnghQRs9lej4TTG33NSAYusKqhVwpYFf8CsXCcvYuU6 RlkCYjr3PB+nX1UDa0eUGm0zOabf9O3D1VzHQBpDuzSHQwIDAQABozowODAOBgNV HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz dHJ5MAsGCSqGSIb3DQEBCwOCAgEAaPfAs6saij4FZIPbzAb5M6ZVvfXBg+AfH52t p3tFsnWUJCiOh9ywsc2NcmJdleKDc4/spElFMUarHqcE1ua6EH15O5GEnHWKj8EY PVQFrPvf30UkRGNPl8eC7afZtCNk9MLllIATAzBr5Z1i+psV7MmgBKpbZ4B0TnhR GXNT60QaCJ9RfUuc2z7RHJNo9XTn3Q44X7TFj+P3jHOWzTf8y6Mz6saTy2bugIUy AfRgRgq/bB8hRjrazg55FIlrMv7dr3J0cIuqmaHfsw7Q2ECMCXW8oQXMBzfuIT0n sG4u0oVxdNx4OdHsAubGjjwNDhxJvN5j8+YFqZMu03i8LbyamTwsrZg2C3QrRUq8 SujQEEB+AmO0lpuJ24FsOOYVSYCpLy2ugrKOr2NUqbiBKZs8uBh6RGACfunMZlEw 4BntohiO7oZ5gjvhGZNUEqzMChw7knvVjZ+DkhFk9yE4qIL7VsJSUNI2ZJym/Xeq jr/oT8CpP8/mFZspa6DFciPfhGLQqKcaZZohL7461pOYWY5C2vsJNR2ucBZzTFvD BiN/rMnIGFrxUscCCje6RLmrsZ3Lb7bfhB3W6kwzLRfr/XEygAzx6S2mlOM34kqF HFpKrg9TtLIpYLAKAIfuNbrLaNP1UKh7iLarhDz/qDcvRka/qJTzLD3eLeGXefAP KjJ1S7s= -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-ca+localregistry-key.pem000066400000000000000000000062531265472114500320250ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAx0fwOrokPV/14eYr5nY0a+V1a9Q+/n1PULuqIQK6rQbibC6+ rdkQU0vcOjyAH8Y0EjEhZa5vwmPoW3wTEoJvTXh9t6N5GAoKBD4T/+t+AOOVuccF BZ6N/PFPPzRyu6sM0j9Xsyl31zb87qNZmYU9PGlXRjDq2VHGWWZfEd0T3CEcPVw0 n1ylZDJbh+noJPIi3TsXSHUUufA3kiNPRbJFnt3CGeZCZjKpyqCG2EiQlVXmDztE RZSAYvs6XVciP6/SxtK9KZUXc/xyXr2zorVNsvd0tbPTVjDRJDhVNTxSZrBd2cza iRQsko9NMvx3LIz1C2MCMWHanL9enEzZndz+hXoL7syyS4zbhmiyRnNWp7b/qetj +2XRNw8qaxbsbDCYAy/fjYw2iM6GphX9FL+ytxe4ca6wB375tQjFDskC4PSelpeJ i7DstmYQm3TRbE+naGwFBr8+DLo/VfFeaUfpxXoY5rDxDUrE3LOY9HvM01rbzBSE nUvjWQJKYs9LOD1fAy4JOUadWRtleTp0TyzDk4CtJ54HORlWsX+zmAqxR2wSfLS/ dzFzUKbTRXgaVRYY1BqlRXM/up2xV9jbBS2Z4IUEbPZXo+E0xt9zUgGLrCqoVcKW BX/ArFwnL2LlOkZZAmI69zwfp19VA2tHlBptMzmm3/Ttw9Vcx0AaQ7s0h0MCAwEA AQKCAgBd61qd4vKHdn1kzNztzdHg9BDGFA7oU9iYvQlua2HdgDwgLluxhXa7Oyp8 y9y6nOgXls4dpPuJCxsMWsqGU7DvOxVNAh9lI/4ah8NXPv5wntIG73Q/dL2Ic5Yc vLRCHFh7klzb1HRlmsXUFmp4/yGgIil+rDlS2MZ5hdTSj3X3ricoCBfI75oHQfB/ es7s8q1ZxKqxfHSbOUqHdlq7B0zmla8QE8RBdCkvlT5YGsMBjq1RimYfwOBNRgf4 y8MZbt0Q1WtPeLPH9zdTzWYnDfmjmhqINEsq+PDoeCA4aciQGxjwOCrapgZnwF/q 4q+r8HbgufXjnjGw5ERLt7BsRSYynoJiTWQ3p/wZ2VLpjFtxYxoJ5/qpQvbZMgGS Yu3FZNC6cnbOs+JWbdm7Kg93N24cBrGdk/KdEE6lz6uQq07FTSqLtPEQWePzBiuA 1wfP78b2AH6vyJKq36EfMCJK2i7rpwtNz7d9NI5kiLRDB7gesqC94WJ+psEu+ErO w9DbTV3xdOPs4FGGrR41Hbo8emrk6smhb8+VK2odggi8i2CLAkYupMsuobBlX3CL hyJPfWDv1aREJ1w7zWVQlJkvp5zR0oXZXpfFxjpj7Ypbp7BKxmh5+WYj8msFDfaD 8VQ+pqgPpdl6zElEq9m5koHjsHH57fMeJQ59HiWpWFur+kQx4QKCAQEA0Jnvbm7R WypbPDInkIoPDIhyP9Pqv+wMzNfYEnVEG0GhEU/H5aE20a+Dm6u0bsmPm5lCSQsu EvylTSL3yumQZMincNIUXcPYb2Qye/ZzJnMIibCqwMKQqi4HxCXprWhiEoGPum8A fN0bTGgMYfM6JZ/Dh1eGsEvemeW+5tn5xZF4Lfp/vkT8v4FuHDydUF/lIx7F5MMi VteS0hHnR1DuvxHqtysf0wy2l61LFr7mQCMYTNEyFB3ZfXqpxJmFmCqPbr4PQsIm 2rqIDw+13eeoyDpJJkdi+yzHkAYDOdAsur0vOQvK/Zj1QKz9qmC1O6L4BN5yp265 vjSE4Orvo7btEQKCAQEA9I/afLw6lHUJ4FVL0p7dH15JSFjt7nmGHocE7Wf6Yp3G vMp+PdGyoJ2KEQB2unnQZK1gZqUuRQLannjNl7fsIiIhHgHxMBCIiylwSUVnP868 u9/fpJV/cSGze2zF0WAttIgXKNtXG7xMntcY2k+SAe0qjqX494KT0NGnznySt2nU A1YlkXm6u3KCOJrBKfbtiHXFoH39sA+ihuPiV7xcETS2ZrFdAX9M422p4yDHqe/0 dTe18wIxJNiEX4xp/HRE//cuQ5dw/Z/QmNrzgWxHbOmXVR5C90vIJRuYY9xz0tDP LMnifSKfnG16l2gqg7zb8xsxYqSGndXWKPAeiq3/EwKCAQEAhCWQbWgcjmFFzNuE /ubG48yoe9DW/OAft8Dg68iH7bBkxd/BpbG8VZeXiw16T1i29f5f5IAFnxeX7EbD rTLLO1113V3ocwH3YZGa/bbBedETzo4xjc1z8asZVmQiJa1ju4+CKrvZFkDH415i wcZgxqbwKhQDijl1+g52Ii5iMYuXE6GGPVXcu8DVrWOk0N7+/IGpIeOQJG2KYDPh TOdzZ22FQKY8EeoS3gF0+SLUIDtbUIaR7/Z86iXD2HzdCemkVaZnaoYuMRBL0ybD sqDn5nguEObWSII0pgN5Fa3QODhS6xOSc5brfx5X0BBVn0L9VbBJ99GIL3t71jRe vVrL0QKCAQB+jUYZT+ncUqgWruy6g7yW89pmFqagxb/SYjn5g9m8WDq0DPDAmped p4f/fkbx/gEJZ/I/i3BjA7QPVyHERcdqblDGz2h4X8XYhUv2jnR8P0XIznNTHo1B BJh04PeIfgWIqveZC8+KqajYdSQGLDC40Ho6MMahha9p2mPEZRAi2x97zoNIQT6Q qxOZqPMV/RIzkAYBI9E33w9ST/AbSHw35xgQEe23zaEC+wdzYc4QMPxF/9smcdbu YyA0tVtO6PefoNAO5/nvNFjkEED7kwVu5X2K7Urn3w4lrZ7w5e4FhEoAukN6T4Va lAhg+uUtIHiM12B50/tZB4N30bFsP9eDAoIBAHc7ppfpo1aDK3bDr6zTSOU4Mn1l XrfhBJHDy2Wt9WkvWtcCtXr3sDpthaChueV+mGoKvfgWyzUoauO6HDDsRYriqaQB cXclVjyy+3atY32Opz9rnWefQkbgTOQ+oQgOzEFhxNS+11Omc6ZZ9s31N6TZi/Yz rgXzhGrr73DkV6uwiiwkvP8vJxg8AMWKorDIm1myr9wwlK5ogDKSku1DM/y1gvlt 4EA39fqURyqxN9o5Yq+8K1+a/smjGx95M+P8Nke4bMs1+lb7bBXbMaVpC6DLqj8B eleOZ7adY2mS0CBuf0PNkJRNDwF1B5VDmGBJLubUtGLuUUoEyUbv66WfnUw= -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+client-cert.pem000066400000000000000000000033611265472114500311170ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE9DCCAt6gAwIBAgIQb58oJ+9SvWUCcYWA+L1oiTALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTUwMFoXDTE4MDUxMDIwNTUwMFowEzERMA8GA1UEChMIUXVpY2tUTFMwggIiMA0G CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDDmOL3EhBm4So3agPMmF0z1+/nPlrE xoG7x0HYPk5CP3PF3TNVk3ArBPkMzge0/895a4ZEb9j+LUQEjOZa/ZwuLmSjfJSt 9xTXI1ldp8KasyzQZjC33/bUj7FGxGzgbHyJrGGBoH2W5HdswH4WzhCnGTslyiDo VN4hklJ7gr+Geq3TPf8Eji+1L71MOrUyoNp7BaQBQT/gKxK0nV+ZuSk6eaiu+om7 slp3x4bc21o7eIMmNXggJP6p9fMDctnioKhAPcm+5ADiFYSjivLeUQ85VkMTpmdU yvq6ziK3Ls6erD+S3xLvcHYAaeu84qLd7qdPwkHMTQsDpO4vPMIwL8piMzZV+kwL Bq+5xk5//FwnQH0pSo2Nr4vRn+DITZc3GKyGUJQoOUgAdfGNskTt8GXa4IsHn5iw zr12vGaxb//GDm0RLHnh7NVbD8xxDHIJq+fJNFb7MdXa8v31PYebkWuaPhYt6HQC I/D81zwcJIOGfzNITS2ifM5tvMaUXireo4pLC2v2aSY6RrPq1owlB6jGFwGwZSAF O6rxSqWO1gLfhJLzqcw/NjWnO7nCZEs/iKgAa22K2CtTt3dDMTvSBYKdkRe/FYQC MCa7MFJSaH85pYRzoDN4IuVpvROrtuQmlI47oZzb64uCPoA4A8AN+k8iysqITsgK 1m8ePPXhbu4YlwIDAQABozUwMzAOBgNVHQ8BAf8EBAMCAKAwEwYDVR0lBAwwCgYI KwYBBQUHAwIwDAYDVR0TAQH/BAIwADALBgkqhkiG9w0BAQsDggIBALSgrCdEQd3I vb/FNkNZkAwdjfBD6j7ZtPBwvjEiiyNTx9hOLBGvbey7kr0HtW0KkLWsdRmCc+3z ev9I5VjDOtpiqrvuAA1wRBaL3UzGyj/eFjPJpvkfJi8zjkIZ2y18QG3yJ6Eqy6dD 0aIQAHl9hkXMOVrf364gf0p7EoOGtSlfQ56yIGDPTFKKiy+Al0S42p17lhI4coz9 zGXE1/SiNeZgdsk4zHDqhzzBp8foZuSL1sGcIXHkG8RtqZ1WvCyIPYRyIjIKZcXd JCEM//EbgDzQ7VE/jm+hIlYfPjM7fmUzsfii+bIrp/0HGEU3HN++LsA6eQOwWPa/ PrxKPP36EVXb72QK8C3lmz6y+CHhuuAm0C1b1qmYVEs4eRE21S8eB2l0KUlfOecf xZ1LWp1agKt6fGqRgcsR3/qO27l8W7hlbFNPeOTgr6NQQkEMRW5OxbnZ58ULXqr3 gWh8Na3D4+3j53035UBBQUMmeeFfWCvtr5n0+6BTAi62Cwwu9QQQBM/2f9/9K+B7 cW0xPYtczm+VwJL6/rDtNN9xPWitxab1dkZp2XcHG3VWtYvE2R2EtEoKvvCLPggx zcafsZfcD1wlvtQF7YjykGJnMa0SB0GBl9SQtvGc8PkP39yXHqXZhIoo3fp4qm9v RfbdpOr8p/Ks34ZqQPukFwpM1s/6aicF -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+client-key.pem000066400000000000000000000062531265472114500307550ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKQIBAAKCAgEAw5ji9xIQZuEqN2oDzJhdM9fv5z5axMaBu8dB2D5OQj9zxd0z VZNwKwT5DM4HtP/PeWuGRG/Y/i1EBIzmWv2cLi5ko3yUrfcU1yNZXafCmrMs0GYw t9/21I+xRsRs4Gx8iaxhgaB9luR3bMB+Fs4Qpxk7Jcog6FTeIZJSe4K/hnqt0z3/ BI4vtS+9TDq1MqDaewWkAUE/4CsStJ1fmbkpOnmorvqJu7Jad8eG3NtaO3iDJjV4 ICT+qfXzA3LZ4qCoQD3JvuQA4hWEo4ry3lEPOVZDE6ZnVMr6us4ity7Onqw/kt8S 73B2AGnrvOKi3e6nT8JBzE0LA6TuLzzCMC/KYjM2VfpMCwavucZOf/xcJ0B9KUqN ja+L0Z/gyE2XNxishlCUKDlIAHXxjbJE7fBl2uCLB5+YsM69drxmsW//xg5tESx5 4ezVWw/McQxyCavnyTRW+zHV2vL99T2Hm5Frmj4WLeh0AiPw/Nc8HCSDhn8zSE0t onzObbzGlF4q3qOKSwtr9mkmOkaz6taMJQeoxhcBsGUgBTuq8UqljtYC34SS86nM PzY1pzu5wmRLP4ioAGttitgrU7d3QzE70gWCnZEXvxWEAjAmuzBSUmh/OaWEc6Az eCLlab0Tq7bkJpSOO6Gc2+uLgj6AOAPADfpPIsrKiE7ICtZvHjz14W7uGJcCAwEA AQKCAgBmIvmxpp8l+cH/ub5OIenZXpMJn4fqZPXtxjjd4HshIN0ln0JlF15lOG2M gDGKFGKUts8gAX/ACocQETtgnDnn65XlwPIqfXFGflD2FNoLyjBGinY6LhtIF9is aXmpHz1Q7tDjzZiHKLor8cBlzCjp+MToEMpqR5bO1Qd5M2cro/gM7Lyz9kN3S3x/ x9BCpbgwsVtYxGfEePmFkwAO159tx4WMCYvOlW2kSm5j+a7+iwmA9D7MGkVZHvNN A7Y/H0F8ekdVBN5pMG9Yrv/vk0ht2lugcS5YGr4eufFq0mhWdv+jhBTxLzqPMMBG m9oMJcj8XyXYtwpfVsqBpCqK2wnEnv4Kf0rZzBU706nI2mjPXx3dL+5qo8uQJKNp mxoS7vmHV5RIJgtdvyzGFHjdfu1leowhV+Jy9jWzMw4wlnmlxsfDECf5RoSf2XGt SMGJb0dbJKae+W4MfNUFsgAWMZk3h3KF8AHHe44OpDbQeoh3JLnkWSG0oS3CR0ch 68TzCy0SZZEZ9IS+I6o5WVpwWfReCQ5NjaKipWcpiJvxg+Dc3GG3QcVXVz2gGrJh g9v0v6eyeOJ32QGvvP7THFBjpWeeHlXT8Yz6hFcPrvErEZ029TEmhg8aLWBGfsR5 F1bazdbqvOSEB9vBAAaddNnEDG9Rl8EmC4WdsnVgYUw1J7gfQQKCAQEA9DKjD9eN CrUl/2YfSm2WaFhYci74XcHDVeAXN2SbOyKbMIqk3aOFQNRAsLRnwPkdiLtuqeDK BafrfLTCORHfFdYKnUzmuekESNLckN9VyLztgqOqNAv3LD6GmSHBaJEnUyniLxOL k0wMEBIsEQw7Fb4blM2REYJ3ZzMFmgpRGnIX8KcxhW9XgSrnqMLO0w6mVxjo7xzd 813nCcNrGhySM/EzKYtTNHy2JZmMH5QFHaIj67KklO7VeEZX5U+TKveBEt4rmHqs Ndqf/djSs8vu1xse82pVRxMXX2mhDLmwjUjPgWYxUL92jTiyJhE7GxpVB/yHgF1J Ecb47MDahoNKkQKCAQEAzQzvCOA77IQpGO117GcMqcjzwEUhTytojFBT+s5mHfzk dYr5TyN86LQ7/GktNoJ5oRvD9UGRSul1OGneivqtWj6mv6/Zvfzacx8NXY4MYFs1 nEr3Gr7orVFIzD2x7nMPG2G6+J6hZ1rhpnZ9Hprf5G41sHIJxHJ9wTYSUAmFh8bv FiJqF90bSq/E5hgjphtX6wZWeZYspzc/5+IrJ/I0nqoxV3rjUy234zlzKJAV10sV 5oVgxLLQsUujkHp/Da+ij2aTv1Za8y3PTJ7MAHYgdpa5l/4U9MnPUEB2REBCI1NN TqxnViwD0xgsvxfb79UzruLJIYOCKvfOumlutXM0pwKCAQBUIMXQhWAP2kyW6mXJ TGvO0vDVlZz3H/Pdt/AHo19fRhLU7E7UFKupo/YNanl8H9au7nO3jrvKqwkT02o+ IwwKB81sV7v9PGu/cvWN64MwPvZMVXojqCOlWH0icGCjV66Glh1YPpGNU1ushbYs wVvxp6b04sUhlSLxqMA7S2aZh8j7nX4QDEXHODLLDyIV0Cw6QViuV/GXEDiyQmK5 gjJUNrp7i4ZExNozpeyCTIpepSde4hKVRJrCbumFFJ8M5GvRRj0asNh3TTRlTbd5 Pb6w2KUXEwECFW+t7UQQkEBkzDrAx6YhvXRoPqoRN0p3keDNeZBtBrZPq47CccZX JRAhAoIBAQCJ/DgnGu54XP9i/PksGrSU1Nvi+SJPKoDyW2QIFTj22SXMS7c1oEYA OrlbRFPeqLK8zfhyZKsnZC8zxVqy37okTqDbwbSfezZt3emamWqOtRJAmNnsr6fY aii4+JNySQ9Td9LgV69549iRso7EN6iPCfMrR7J29izWBlMQdTfchOyDUqleYbZp 7hpsVLY4o5HoYJ10uLBX3oAsxTARc5YhZ5pIqjOr18o1KIXsN/napXaZaAwUkdiK VsI9CZHSXezg30Bxs+UEXEFx6DKT5Oo3o3pFZAAqMlxGPvrXNv7K0tXlKXNos7nn Jg+GkMG6hRiAibCb0umXjKcbHrQXeu1lAoIBAQDcRBsy6cSQXMSu6+PyroH+2DvR 4fuiMfSrUNjv+9K8gtjYLetrZUvRuFT3A/KzDrALKyTFTGJk3YlpTaC5iNKd+QK8 6RBJRYeYV16fpX/2ak/8MgfB2gdW//pE0eFjw+qakcUXmo957m7dUXbOrw1VNAET LVBeVnml+2FUj0sTXGwHKcINPR78PWZ8i1ka9DptnKLBNeA+x+OMkCA88RJJegSk /rgDDV52z4fJHQJh9TZ7zLAXxGgDFYLGPTrdeT+D/owuPXF+SCP4pMtVnwbQgH9G dfQ9bb7G14vAeu/kEkFdGFEreS09BOTRbTfzFjFdDvSV4JyOXe9i/sUDxf9R -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+localhost-cert.pem000066400000000000000000000034151265472114500316310ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFCTCCAvOgAwIBAgIQPjclBRGzhznCybQzYRQTyjALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTQ1NloXDTE4MDUxMDIwNTQ1NlowJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV BAMTCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALBe C9O6es+mStDowUd1kiM59VkinzzdHgE24LvKmGxQ6fDnnT8S9L7iyzoxcJWlvSHu pfyZWvij0ZIyRZ288XemTEFYq25RK0IBGGdvYz9OqT2R3lblBQrXDjSi9WG16sGx 60MGhM2egGMqFQ5DBfT16IKw00+RjFgCVzJ8T64Lzw82E0e7d6hl39SPybY+uvrt SID60hYGmXoOdaiC9qquivks67BZprGNfORrvyJNrCFI6oKUFWHrQ1PpGd2tOwJN 1P3gkkS8pVlAif6ZQkAf+zuKu+l4j5tKxGlJAkJsafVJDLOxBKutUj5msha0g6uJ gFXUe0+G8hkNcEjd8XqUUCwIOY3pdv4WsydKBk3uH9zMnYolw53k1q0ObvoY1NXf beMxHQAtDi7nfQGlae9cuuOSymy95WuvzfhZFKdPWUe8lKN9QXFIWVoCFnOm8T3P +FNCUE+p8DIWkal6Ul9THi/Kz4p7twyrUp1LwT5EtSaJ3iGAmB9I+8/1vmZT3lPi nX8P+iVGM5yOUnptrsFm0bUcJWRD6iaTK1KxpH+Is4h2kiUiSz1tC/9bKaJYN2o9 oy7q7+ZVfHSmIxLo8ZFYsaZBcXi96cKuuPMR3X4ISPwKDqP5irxU/QbI+YQBMshg G4b0BNoMZ50g30r3Hcsifw4pzPQF0RDMOBeCiOi3AgMBAAGjNjA0MA4GA1UdDwEB /wQEAwIAoDAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDALBgkq hkiG9w0BAQsDggIBAFuS/VrMNUwEMyUIktDyna5ExYh/FDOE+YEYf8tsX7dSMhRK wE560/AcVZcbKKAZOnZ/262a++8tparsQt+bXBJ2so6YUqsFDNdOLCI2aShjWDRe TNhqmLIO3FNsLRKp96WHVz+jFoiECsoYfKn0jgqTqxx+7nWFqgBaNSlF5cbCgLCH jQV1uQhzsw/Mh/32hXAidkv/nLeLf7FbKq08hgthtoP+XstlzZ5BxkPodjb8XWXG DSS49SWX971GHa1apwMKfxVGSppxn18ZwEmW1BUfQBNxtMytqA9DK3+xuoUdXkB0 iJbm3Jc10JSRju8iyL121Xt6f8O33paVz/ndDJIWztUOjnItc89rxHsINPt5+cUt jix8ohwmHGDrK7ZooXBvotvmGT/xhPr2eHUAG8JuSJ/Cr09UUOwUEigz4CfgJOHm XukdzjOkb4r7lhNmVeGqrjRol1W0Wsc1NGH++J6xdkIeQ+i23kHwFHfQWV/J69tm rOn2N+qijtmbIy9YfVcrFDtUtEAzXylZ2StCVQNofd0M7tXNdrUL8yAFwlrhWGJV wsSP++1xH2Ie6Diupy8z6rbP383HmnmVPU/UecgLrlX2lEpt/UZkkX1Xm+6PhrrT HDeeULvqtUP3PD8wS0C873Pl9GXOKISqf0HKEIDUAVZhQOsGFqiZH0388M4L -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+localhost-key.pem000066400000000000000000000062571265472114500314730ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKgIBAAKCAgEAsF4L07p6z6ZK0OjBR3WSIzn1WSKfPN0eATbgu8qYbFDp8Oed PxL0vuLLOjFwlaW9Ie6l/Jla+KPRkjJFnbzxd6ZMQVirblErQgEYZ29jP06pPZHe VuUFCtcONKL1YbXqwbHrQwaEzZ6AYyoVDkMF9PXogrDTT5GMWAJXMnxPrgvPDzYT R7t3qGXf1I/Jtj66+u1IgPrSFgaZeg51qIL2qq6K+SzrsFmmsY185Gu/Ik2sIUjq gpQVYetDU+kZ3a07Ak3U/eCSRLylWUCJ/plCQB/7O4q76XiPm0rEaUkCQmxp9UkM s7EEq61SPmayFrSDq4mAVdR7T4byGQ1wSN3xepRQLAg5jel2/hazJ0oGTe4f3Myd iiXDneTWrQ5u+hjU1d9t4zEdAC0OLud9AaVp71y645LKbL3la6/N+FkUp09ZR7yU o31BcUhZWgIWc6bxPc/4U0JQT6nwMhaRqXpSX1MeL8rPinu3DKtSnUvBPkS1Jone IYCYH0j7z/W+ZlPeU+Kdfw/6JUYznI5Sem2uwWbRtRwlZEPqJpMrUrGkf4iziHaS JSJLPW0L/1spolg3aj2jLurv5lV8dKYjEujxkVixpkFxeL3pwq648xHdfghI/AoO o/mKvFT9Bsj5hAEyyGAbhvQE2gxnnSDfSvcdyyJ/DinM9AXREMw4F4KI6LcCAwEA AQKCAgEAnrHg/oD7ZMEC7PuifoRCHMRYCf5nPkLQbtNMYG2pvT0JY6VlDo4l/2Te 7NvzrBPYHSI55RKwkq4FMwFdNtP+imTulJYOm1MaE2gc52WI7jv/eNE6OQIWCWz8 8Uv4dBVWyTcos8S31rTaXWBOVejlAUgMERy+5wfWOpLQlzLYF4m0pMFJk/AReUtB nmhLXlsPsB22cag/RWZmzzcXk6tT/LzVe+R5ptLkdTsUuAxjjaBKVCDiMuDAZL1m dah3h8oKIMab8l0SABumxKqYAKkyvbSJQUhSUYAT5+3c0cfJ6q7WoMk8TqvnwfpQ 2Klbcaa4G6+79H8e/a41RWmcMVTTpLKmwzx/iMLPswLnTFbWYCsLSsml3OpmXPhG CKdbIWMvNMBfahZmnCP2pNcZBVY1/k/lEw25ehtnWqA7HplawT6V3gk/Bzz+3e3R XEpioZF70ipdW5Pb3OG/tKSNDvRRjqLPk9UWlQzmedzu7XN28V/blw/CBVcMAcc0 njwAledTuqv/wQ67dtbXdcxSPZbV/Rq7y3OmpgK6RWLIFzzpOPW5gULqUZfrnxtv StxVnlZXhFoymodFobTi7AYibsLaXLkunZWXEwFwdtLfFHznfHq/rHfBmna1lcKW MgWRqsbaoCsqHC1nc0E4llFkn3zqGYgMQNBeqNfX6cIPI/eQzPECggEBAOk0TP8N edIFENOrzUtpH1fB3k15heeA84SeBhj8t/xrphR3o+IVO/GtMtq9hVLeYFVPwWCi Mmy4KhwNUOtFeCSX4MbpiXvoPEjL3QF+Sv95HsEWsT1iBQIN4aoV0ZSv48YsRczs tLjr96hADLTMfpCwyRq9r8XVF/hnx7vqOoOC/J1kteRhjOWRnutFpdAMfkFgzUa9 1unmDHsDifcT+vpxief9Q9zK9xMYvYmwFkBUjOlhC7WchZC20nrwvM+A2mMBpeLB WSRWsYeOqW8zcQNGdWuXXMKxsYHwv9tXbANVWxs1gz4x7BxcFoN5poIFrnT+eImY EwhGrKR6jZsKF00CggEBAMGbdZU0+yvxL2tAul5RGAqv9xhdUV4eg8warTQ8/RWt 8Vef2wllBYnP48rXNDovb7ZNOjMBdjIWZ2zq2McMtHqpzP+zWQWaNT8/7Zi24JTL y4G75kZdGgTPG2Y71seZoZGxfOu4gf7cLKOqxiHYrNDHEDl5Pi13tJD/8qf6hYm6 K3yALSv+QlM3mk+5oueKQ7Lj9rV81YomYSV5+K+WhszhvLmuxv0necOLKapeBWvL GQ5038yAq3PFdu0HXzyA6L8YdusP1d3sqwQvLbi8KAMXJCeT6WZXGYgX2Rjfbuih ZHUaE7Ac0EsJfMuOowSkS7oXuT81k64ngCoq5KZC5hMCggEBAKYkt9JiZG8HYuSb GsjmHQllup5RvN+hVF0gRFHbAq2YeBtO3Xg+DpXxAjErIuhWPCWri6bwB6LDVmTj 68milaTke6TbTzLy0rg+Xbcppf766LlCFIYZ5l1/TE3j+4vGAC347sW/wkWY/7lj 4GmS43zsJmqhx6/XUJuOPJOZnZSCZr0vuhL6mOoZZDJUTXy62dx0PetvZsT/O9cM P2fDWWTCLTEVlBqik4KMdsS4qjGsyzOeCzyZReNDDRO/nZTsRSqSSwARJhQom5Rr RDVQXeyqbw93KAQhmshroBSB5Rc+4YiyCE3wPTo7NWL38XPi3lbF0VSd/rk/uNH5 6hcSCmUCggEAIPHjQFCTrRaNiyKolAQYozjuQyceAXYP11tyvcDjEB1ZRB/flemq 15iYmpukN4J67/qUPLmy8zL8xnvwB28SBw195MUQEPP8u5aVR7dW3/sN1jWzKaYO F2Nmti7YjX6HD9Oz/iiXdlbhAbi9nmTQg3ZcPGt1OSd1gncLQ6pNrvIPFFB7X1EU 2DRN/eMI5X2Rp49DG/7yF2AQh+AJgVeL+LEw/CfRlKJzBeNYY7U8Fuuoh907eAEt K7YeVpc6jYEiGeJ/2eAH9IuhTkT48saRyHTXoiR5QwDvR0lHmAPtS4irH4Igd4dv qlUi90B+XPvYJwKCc08aojf2hzZlUiVwIQKCAQEAraCoWea8hLFchxmAiBt7joIg nNK7a3LOHYxT1gB9H+PoVqTmzGVTeZpD8Jnis/UHmDhRYuUGqvFIefjAWbz0jJAN t6RMAozENCG1PoeXHf1gt2wspv14kza+8jSdpzNrzZgPZdb7Wh1UEqUkiRYwn87f C7DHknqCj9S2qq0DFXYz15JNPVrbvD+ZLBFJhTAjppS9TuYQVLf8JPYHpLRio/9A dMsyOz1VA2RRYN0u/u4ccxiN45K3PbVMCeDPbWXNm8G75YKQ7LnIuehMB1qkZy6N MOnNGp3l/ZkFK0JsW/pZqTQ2FqSkb0+ttTFApFI3qB04sc4s0uKPI9fa0OQtUw== -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-cert.pem000066400000000000000000000034321265472114500325230ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFETCCAvugAwIBAgIQCnqSQalw9ytL5bHLgHZe+jALBgkqhkiG9w0BAQswJjER MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDUyNjIw NTQ1OFoXDTE4MDUxMDIwNTQ1OFowKzERMA8GA1UEChMIUXVpY2tUTFMxFjAUBgNV BAMTDWxvY2FscmVnaXN0cnkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQC9gvT3cwz0Ih9+7Ilv5lc15HsEiSmEMh4nOMZrSaamKgf/ydCiGo3DQapr/XDK FHMLKq68AxwfOlzmEFQ4d9umpPMQ2+4GBr0VG23ppGtQApIPHgD06S0/CeHmDIXN FXcKybPX/9KbgNkXBWbbJkJy0EcsdP8VJD50Q2WH89nvgEYJNFuKEELD3iGY6bBF jeDTle5jYA7CgBKvD2avn31g24Qhxn8n8/BdYO/U0kw0qmoy1veLOjCAW0os0jkM NlKrFpyHEWNj5B3X6UgSn8EGQaVbDq17PrQwlHJYU4nih0TnD1OwvBnFnd27pXjr 68eGA6Zc2BbUnhNGhppWHZ46LpPxpIbafSOH3ES3N/MZAfcUKIUntLlWE2xCQgFV TW95WeVtP/r1aWgIHu0E2Jb2eHCE+qXYqJxSU7S4DcknmmcTS69hzyHs+92Ec+7Q m0aQFZ0dyPoYPwXMgZpTAIuXEGg/FKC1fiS/deTW37DyvB2jppehKW3RJY3uso7R o9vs6DJx1OdU5XEq9R3n7op61N7PK8Wxmn7TVYHEZHkITVvtucZZd1FNTOrOJaNJ UnE+FuPK1Mrff+jz666Ru4zQL0CondOamX3QR5tuNK6MTqFs87wKY25qsqz7cS27 kHW+r7UNWbJY3/UQhaPZM78zCZa2IL1nBFUjsFvEA4rtYwIDAQABozowODAOBgNV HQ8BAf8EBAMCAKAwDAYDVR0TAQH/BAIwADAYBgNVHREEETAPgg1sb2NhbHJlZ2lz dHJ5MAsGCSqGSIb3DQEBCwOCAgEAHVGMyoyX4lRzWCDkUjrXkrDZzuv03M2ojW2Q UL61ejMkTWQW8R4gKrcPHAOJAPKVfGEVOrQH3ZMyxV2HnWrJ7egrn65zOzmLbWSh O7gdpL6YYjBr218fqJn/8HadXZa4k70JyympYOLojeWSLy3KP03U+y7AMcdE1uG6 6HJI54ZjBoW/nEyWmMh/mfMz8EN+Mgek48Z9AVaOswbtHtDIXN7XO0jbB3DbY5Yh prVqVLYAz4sCchGTadj+aEChF5sJkKREDvAew/njC0WGS2TmMJ+V1uVhXV6354mr edk79YvdwzwDgeYArkprahMtn9eu1aSTfUXsmM5OP5tR4gyFV1kUmTPY1yUd/yO+ 638wV0mWtGbbf6j8dUKeUBCyt2qGg8J80OUeFdvdHMswtaUq951NApX44BinPkbK moBVQByZ5OEcmMidFC9SqYSUwTQ7uNyWeguhCXav+l3x900YlKnUQgRUZntPwXjs yc7MXv0j0E86Gme6G1O02zamwkRgr3qOTHu2oQOow/a24fM4HASayLR0Kegt0sh3 rzk0HRF1mGonf1Ecyyj/3LpHVsgYSckwtJoZLOqtDMn+CKtOCEByssQfD+E9Qe07 qMyvcwpXUpfqe3ZERbJ10m98Z88VeK/XGt9ptq7HY47n1KL6lx3oyXwZIw8pq928 89dcqL0= -----END CERTIFICATE----- distribution-2.3.0/contrib/docker-integration/nginx/ssl/registry-noca+localregistry-key.pem000066400000000000000000000062531265472114500323620ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAvYL093MM9CIffuyJb+ZXNeR7BIkphDIeJzjGa0mmpioH/8nQ ohqNw0Gqa/1wyhRzCyquvAMcHzpc5hBUOHfbpqTzENvuBga9FRtt6aRrUAKSDx4A 9OktPwnh5gyFzRV3Csmz1//Sm4DZFwVm2yZCctBHLHT/FSQ+dENlh/PZ74BGCTRb ihBCw94hmOmwRY3g05XuY2AOwoASrw9mr599YNuEIcZ/J/PwXWDv1NJMNKpqMtb3 izowgFtKLNI5DDZSqxachxFjY+Qd1+lIEp/BBkGlWw6tez60MJRyWFOJ4odE5w9T sLwZxZ3du6V46+vHhgOmXNgW1J4TRoaaVh2eOi6T8aSG2n0jh9xEtzfzGQH3FCiF J7S5VhNsQkIBVU1veVnlbT/69WloCB7tBNiW9nhwhPql2KicUlO0uA3JJ5pnE0uv Yc8h7PvdhHPu0JtGkBWdHcj6GD8FzIGaUwCLlxBoPxSgtX4kv3Xk1t+w8rwdo6aX oSlt0SWN7rKO0aPb7OgycdTnVOVxKvUd5+6KetTezyvFsZp+01WBxGR5CE1b7bnG WXdRTUzqziWjSVJxPhbjytTK33/o8+uukbuM0C9AqJ3Tmpl90EebbjSujE6hbPO8 CmNuarKs+3Etu5B1vq+1DVmyWN/1EIWj2TO/MwmWtiC9ZwRVI7BbxAOK7WMCAwEA AQKCAgEArwqno2uEGnbuKnjmVRInmWKpcb4TN8Rm74lUVEKaB76o1s0cxK3MJP6h H8/e/vg2bqkE7indLsbkiaepcuLaYijXTcomJzDQMw+7zOOOLz/Aku/+qDg8D47c NXV5nLzn0HIPiEIF0JYJbmcR4veKxqu0Ic8K0QdCHHcn75P/x2Tuy4+twW9Vi76/ v5KRuxzZ/fTtVKKj32kWWNXb3fltgCoh+GR0jH2XlVh1DVkVBEwnfT/rM5ESvWwU riOah7ohT1+6QlOAPwKzwfr6FCG000eNKPb8q+p12q0ylHzMzgxtSxJwFb0X/Nzc snaboyWLjDAQ2I7LP6WmXizznvkKbE9PjW6UGYQ+2XApqp+Hn8tSC5I/gIDlBOOa psJ4fkRjr8n5+CbHbGmQG736hZcZY/z10TtOQbxeeeuri6oDQ62D4Z07GpWCG2EG sUakaytZnJkIN79PpfthPZwtStlG0KVs0i5wggH/iP2h0yAmvJ64ZRIqdvuE/aBn sdfRRlYUqmFOJsVQgtUWGKGS4WIxrGaclzT1TNxCKdiAk0glXe3sDtvBni6qDW07 iJzEXxrsLw6MiCDhHfDeae5JYeJXK0HlCfYHXgRmEnDFTGw8rBzwz3eXvPqZ5YNt j+31uHSwQjgOgEgSrXeTmRfLZsytKqndhBB/yBFmzZNrswXGackCggEBAMN5RSdW t+WWl8ghDGz/CN1oRjnk298/6L7ijluKGRgG+igwBEy+5m1EGPJT+Y5LEH4TiQJe Oc2XjQuM7zABX7JWWk1cL8Zlv3kcmR0lg4BWs7wDkoU1HYRkMP57vubtxFzFOsNa momivEniZ/eonHm3yv0VHeenH9j3mhJ3mVDIpkH+7uhn3++c0zYh96NkjfQi1/jF P35eSAt7FgHDOt37fWXwtGeYFRN4P19ZUNiIvZwT6Q1gmegRO8BYoW6cSbLWe5Cp abaULds46+mjM4zJhCZRFkdWHbzP4bZHocSmwGsqcpABJ6SASTVim02GGhBIt1nj fkqa10X1c5Sqis0CggEBAPgxFKSHccfIJ6yht2HJjysRLN/IHlO9hDcpCWUrISN/ hxu1uxfNGmUkd0H8zDO/O+QAJXLE8PPPB77pJniIJ8kK4swwsfufN6bNV9XJldjA o4vXnYt9Mpuky9cugD8LocUgWQzzKY5Y875TC4s3ldzyKQVm0NO+Wz1U3gfjogEC d7PhTk7Ba/ZjVGtL7HuZxlL+/TgZklMks2ulSTW2y8aqVJxaZXv0H0NX/+fpDHYw iljr+iqbiqZvjrzySryb0XWMtzP9oyDEXTXrWnG+kOIZW3BZ9FLxT+Te7zZ2PUbK vTkObsKxc8WVHIYgkt/OwWSwbYLre5nvFPvgEFbQuO8CggEAeZTlUXmbul63m5AK xYS/w88G1x2lMK/0mT4bY4562zoDwJlVI1MdydqwVZGryDiiUnjeIC3xcBISdZu8 bjR8jFUvp6xuPs2ska0bA0kBCQNkmc3zBY2rBVy4KKFZdRNwrm8yhK3HL1KcIKyF FEK4yPBrfozy49JMecxP9aqUHu4eky/4828gl04JBUONXwC9VpuRj7dILdaAozt0 zbXb2JSDQ7O60jCC83A4oprQMU6j+P9dVqe+Mtz9OD8ocb8eC/FiO/FTwm9aMl+u RMzw1GHHI3oODGLg7j6y2oilcsZxKnblePJu8N+mKWFizY5aicRg3rUkKU00Ftx7 fn2xBQKCAQB7w7Xgie5SStyF+KrC58kuF8WB3oBJEAOjoiIeQhCnbAvK5KfkqZHV CAc0b8TAtUc/XldOUSk6222oZQmbJ4J3fac1Xb8TlAUjd9iqMnk3+nBT5vSYP5mC Bf7kUjr/tWQ5MfVWQNfjNTZvHWhvRwvDfzq3h9rxDEbhYbXKx1fdGwboO51aJpgY 6NWLH/RQepFsh91sIUxXi8CxGF5Wm84oRn4k7esXkdgZNAPX+N4O/guvZhV9M81D S/QpAsYEIcuky8P7+Cplx6YXokKa4AXNyglQEHuG9PD7V7SAOxw5dhZAIpNXIThz OfVcaVf0pVzJQjWKCLW9QHz9UXG0aScfAoIBACdr3exVMUaMOtrAnf2NXj3hecgg WsWRBOOaSW5wXGt1JNlfYS4zwViafIwy31DNuMg22rj5Mq0TYMtuNYto5RoLSXeB uupUrENEBnt7JFrwI/NyWG0uYMM3G2MtGHGYooaT9+++wT96QxJZr5fwFYF1ddf6 5tFeKtNt5VM0wWBHO1voUhQ0TCaooatJjMuAB0+WbvwniKxmdbqQDzY+6myBBUVo gBJ0JxhxakLm1XGFHDtPCsAAHX/uZ4CvH2uyWqAlx6iwGXd0wwEGrbIRB/BundxR oaJWswU4FIPAgOpy2LEJKnvzhcmVFtZWD5sFXA1/83QvpceLTFTD5uioBPU= -----END RSA PRIVATE KEY----- distribution-2.3.0/contrib/docker-integration/nginx/test.passwd000066400000000000000000000000571265472114500250210ustar00rootroot00000000000000testuser:$apr1$YmLhHjm6$AjP4z8J1WgcUNxU8J4ue5. distribution-2.3.0/contrib/docker-integration/run.sh000077500000000000000000000034751265472114500226460ustar00rootroot00000000000000#!/usr/bin/env bash set -e set -x cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" source helpers.bash # Root directory of Distribution DISTRIBUTION_ROOT=$(cd ../..; pwd -P) volumeMount="" if [ "$DOCKER_VOLUME" != "" ]; then volumeMount="-v ${DOCKER_VOLUME}:/var/lib/docker" fi dockerMount="" if [ "$DOCKER_BINARY" != "" ]; then dockerMount="-v ${DOCKER_BINARY}:/usr/local/bin/docker" else DOCKER_BINARY=docker fi # Image containing the integration tests environment. INTEGRATION_IMAGE=${INTEGRATION_IMAGE:-distribution/docker-integration} if [ "$1" == "-d" ]; then start_daemon shift fi TESTS=${@:-.} # Make sure we upgrade the integration environment. docker pull $INTEGRATION_IMAGE # Start a Docker engine inside a docker container ID=$(docker run -d -it --privileged $volumeMount $dockerMount \ -v ${DISTRIBUTION_ROOT}:/go/src/github.com/docker/distribution \ -e "DOCKER_GRAPHDRIVER=$DOCKER_GRAPHDRIVER" \ ${INTEGRATION_IMAGE} \ ./run_engine.sh) # Stop container on exit trap "docker rm -f -v $ID" EXIT # Wait for it to become reachable. tries=10 until docker exec "$ID" docker version &> /dev/null; do (( tries-- )) if [ $tries -le 0 ]; then echo >&2 "error: daemon failed to start" exit 1 fi sleep 1 done # If no volume is specified, transfer images into the container from # the outer docker instance if [ "$DOCKER_VOLUME" == "" ]; then # Make sure we have images outside the container, to transfer to the container. # Not much will happen here if the images are already present. docker-compose pull docker-compose build # Transfer images to the inner container. for image in "$INTEGRATION_IMAGE" registry:0.9.1 dockerintegration_nginx dockerintegration_registryv2; do docker save "$image" | docker exec -i "$ID" docker load done fi # Run the tests. docker exec -it "$ID" sh -c "./test_runner.sh $TESTS" distribution-2.3.0/contrib/docker-integration/run_engine.sh000077500000000000000000000012011265472114500241540ustar00rootroot00000000000000#!/bin/sh set -e set -x DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-overlay} EXEC_DRIVER=${EXEC_DRIVER:-native} # Set IP address in /etc/hosts for localregistry IP=$(ifconfig eth0|grep "inet addr:"| cut -d: -f2 | awk '{ print $1}') echo "$IP localregistry" >> /etc/hosts sh install_certs.sh localregistry DOCKER_VERSION=$(docker --version | cut -d ' ' -f3 | cut -d ',' -f1) major=$(echo "$DOCKER_VERSION"| cut -d '.' -f1) minor=$(echo "$DOCKER_VERSION"| cut -d '.' -f2) daemonOpts="daemon" if [ $major -le 1 ] && [ $minor -lt 9 ]; then daemonOpts="--daemon" fi docker $daemonOpts --log-level=debug --storage-driver="$DOCKER_GRAPHDRIVER" distribution-2.3.0/contrib/docker-integration/run_multiversion.sh000077500000000000000000000023771265472114500254660ustar00rootroot00000000000000#!/usr/bin/env bash # Run the integration tests with multiple versions of the Docker engine set -e set -x source helpers.bash if [ `uname` = "Linux" ]; then tmpdir_template="$TMPDIR/docker-versions.XXXXX" else # /tmp isn't available for mounting in boot2docker tmpdir_template="`pwd`/../../../docker-versions.XXXXX" fi tmpdir=`mktemp -d "$tmpdir_template"` trap "rm -rf $tmpdir" EXIT if [ "$1" == "-d" ]; then start_daemon fi # Released versions versions="1.6.1 1.7.1 1.8.3 1.9.1" for v in $versions; do echo "Extracting Docker $v from dind image" binpath="$tmpdir/docker-$v/docker" ID=$(docker create dockerswarm/dind:$v) docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-$v" echo "Running tests with Docker $v" DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" ./run.sh # Cleanup. docker rm -f "$ID" done # Latest experimental version echo "Extracting Docker master from dind image" binpath="$tmpdir/docker-master/docker" docker pull dockerswarm/dind-master ID=$(docker create dockerswarm/dind-master) docker cp "$ID:/usr/local/bin/docker" "$tmpdir/docker-master" echo "Running tests with Docker master" DOCKER_BINARY="$binpath" DOCKER_VOLUME="$DOCKER_VOLUME" ./run.sh # Cleanup. docker rm -f "$ID" distribution-2.3.0/contrib/docker-integration/test_runner.sh000077500000000000000000000003771265472114500244100ustar00rootroot00000000000000#!/usr/bin/env bash set -e cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" TESTS=${@:-.} function execute() { >&2 echo "++ $@" eval "$@" } execute time docker-compose build execute docker-compose up -d # Run the tests. execute time bats -p $TESTS distribution-2.3.0/contrib/docker-integration/tls.bats000066400000000000000000000053061265472114500231530ustar00rootroot00000000000000# Registry host name, should be set to non-localhost address and match # DNS name in nginx/ssl certificates and what is installed in /etc/docker/cert.d hostname="localregistry" image="hello-world:latest" # Login information, should match values in nginx/test.passwd user="testuser" password="passpassword" email="distribution@docker.com" function setup() { docker pull $image } # skip basic auth tests with Docker 1.6, where they don't pass due to # certificate issues function basic_auth_version_check() { run sh -c 'docker version | fgrep -q "Client version: 1.6."' if [ "$status" -eq 0 ]; then skip "Basic auth tests don't support 1.6.x" fi } # has_digest enforces the last output line is "Digest: sha256:..." # the input is the name of the array containing the output lines function has_digest() { filtered=$(echo "$1" |sed -rn '/[dD]igest\: sha(256|384|512)/ p') [ "$filtered" != "" ] } function login() { run docker login -u $user -p $password -e $email $1 [ "$status" -eq 0 ] # First line is WARNING about credential save [ "${lines[1]}" = "Login Succeeded" ] } @test "Test valid certificates" { docker tag -f $image $hostname:5440/$image run docker push $hostname:5440/$image [ "$status" -eq 0 ] has_digest "$output" } @test "Test basic auth" { basic_auth_version_check login $hostname:5441 docker tag -f $image $hostname:5441/$image run docker push $hostname:5441/$image [ "$status" -eq 0 ] has_digest "$output" } @test "Test TLS client auth" { docker tag -f $image $hostname:5442/$image run docker push $hostname:5442/$image [ "$status" -eq 0 ] has_digest "$output" } @test "Test TLS client with invalid certificate authority fails" { docker tag -f $image $hostname:5443/$image run docker push $hostname:5443/$image [ "$status" -ne 0 ] } @test "Test basic auth with TLS client auth" { basic_auth_version_check login $hostname:5444 docker tag -f $image $hostname:5444/$image run docker push $hostname:5444/$image [ "$status" -eq 0 ] has_digest "$output" } @test "Test unknown certificate authority fails" { docker tag -f $image $hostname:5445/$image run docker push $hostname:5445/$image [ "$status" -ne 0 ] } @test "Test basic auth with unknown certificate authority fails" { run login $hostname:5446 [ "$status" -ne 0 ] docker tag -f $image $hostname:5446/$image run docker push $hostname:5446/$image [ "$status" -ne 0 ] } @test "Test TLS client auth to server with unknown certificate authority fails" { docker tag -f $image $hostname:5447/$image run docker push $hostname:5447/$image [ "$status" -ne 0 ] } @test "Test failure to connect to server fails to fallback to SSLv3" { docker tag -f $image $hostname:5448/$image run docker push $hostname:5448/$image [ "$status" -ne 0 ] } distribution-2.3.0/coverpkg.sh000077500000000000000000000006241265472114500164230ustar00rootroot00000000000000#!/usr/bin/env bash # Given a subpackage and the containing package, figures out which packages # need to be passed to `go test -coverpkg`: this includes all of the # subpackage's dependencies within the containing package, as well as the # subpackage itself. DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' distribution-2.3.0/digest/000077500000000000000000000000001265472114500155215ustar00rootroot00000000000000distribution-2.3.0/digest/digest.go000066400000000000000000000074561265472114500173430ustar00rootroot00000000000000package digest import ( "fmt" "hash" "io" "regexp" "strings" ) const ( // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) // Digest allows simple protection of hex formatted digest strings, prefixed // by their algorithm. Strings of type Digest have some guarantee of being in // the correct format and it provides quick access to the components of a // digest string. // // The following is an example of the contents of Digest types: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // This allows to abstract the digest behind this type and work only in those // terms. type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { return NewDigestFromBytes(alg, h.Sum(nil)) } // NewDigestFromBytes returns a new digest from the byte contents of p. // Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. func NewDigestFromHex(alg, hex string) Digest { return Digest(fmt.Sprintf("%s:%s", alg, hex)) } // DigestRegexp matches valid digest types. var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) var ( // ErrDigestInvalidFormat returned when digest format invalid. ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") // ErrDigestInvalidLength returned when digest has invalid length. ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") // ErrDigestUnsupported returned when the digest algorithm is unsupported. ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") ) // ParseDigest parses s and returns the validated digest object. An error will // be returned if the format is invalid. func ParseDigest(s string) (Digest, error) { d := Digest(s) return d, d.Validate() } // FromReader returns the most valid digest for the underlying content using // the canonical digest algorithm. func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } // FromBytes digests the input and returns a Digest. func FromBytes(p []byte) Digest { return Canonical.FromBytes(p) } // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) if !DigestRegexpAnchored.MatchString(s) { return ErrDigestInvalidFormat } i := strings.Index(s, ":") if i < 0 { return ErrDigestInvalidFormat } // case: "sha256:" with no hex. if i+1 == len(s) { return ErrDigestInvalidFormat } switch algorithm := Algorithm(s[:i]); algorithm { case SHA256, SHA384, SHA512: if algorithm.Size()*2 != len(s[i+1:]) { return ErrDigestInvalidLength } break default: return ErrDigestUnsupported } return nil } // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. func (d Digest) Algorithm() Algorithm { return Algorithm(d[:d.sepIndex()]) } // Hex returns the hex digest portion of the digest. This will panic if the // underlying digest is not in a valid format. func (d Digest) Hex() string { return string(d[d.sepIndex()+1:]) } func (d Digest) String() string { return string(d) } func (d Digest) sepIndex() int { i := strings.Index(string(d), ":") if i < 0 { panic("could not find ':' in digest: " + d) } return i } distribution-2.3.0/digest/digest_test.go000066400000000000000000000041151265472114500203670ustar00rootroot00000000000000package digest import ( "testing" ) func TestParseDigest(t *testing.T) { for _, testcase := range []struct { input string err error algorithm Algorithm hex string }{ { input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", algorithm: "sha256", hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", }, { input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", algorithm: "sha384", hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", }, { // empty hex input: "sha256:", err: ErrDigestInvalidFormat, }, { // just hex input: "d41d8cd98f00b204e9800998ecf8427e", err: ErrDigestInvalidFormat, }, { // not hex input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", err: ErrDigestInvalidFormat, }, { // too short input: "sha256:abcdef0123456789", err: ErrDigestInvalidLength, }, { // too short (from different algorithm) input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", err: ErrDigestInvalidLength, }, { input: "foo:d41d8cd98f00b204e9800998ecf8427e", err: ErrDigestUnsupported, }, } { digest, err := ParseDigest(testcase.input) if err != testcase.err { t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) } if testcase.err != nil { continue } if digest.Algorithm() != testcase.algorithm { t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) } if digest.Hex() != testcase.hex { t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) } // Parse string return value and check equality newParsed, err := ParseDigest(digest.String()) if err != nil { t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) } if newParsed != digest { t.Fatalf("expected equal: %q != %q", newParsed, digest) } } } distribution-2.3.0/digest/digester.go000066400000000000000000000104311265472114500176550ustar00rootroot00000000000000package digest import ( "crypto" "fmt" "hash" "io" ) // Algorithm identifies and implementation of a digester by an identifier. // Note the that this defines both the hash algorithm used and the string // encoding. type Algorithm string // supported digest types const ( SHA256 Algorithm = "sha256" // sha256 with hex encoding SHA384 Algorithm = "sha384" // sha384 with hex encoding SHA512 Algorithm = "sha512" // sha512 with hex encoding // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage // digest. Canonical = SHA256 ) var ( // TODO(stevvooe): Follow the pattern of the standard crypto package for // registration of digests. Effectively, we are a registerable set and // common symbol access. // algorithms maps values to hash.Hash implementations. Other algorithms // may be available but they cannot be calculated by the digest package. algorithms = map[Algorithm]crypto.Hash{ SHA256: crypto.SHA256, SHA384: crypto.SHA384, SHA512: crypto.SHA512, } ) // Available returns true if the digest type is available for use. If this // returns false, New and Hash will return nil. func (a Algorithm) Available() bool { h, ok := algorithms[a] if !ok { return false } // check availability of the hash, as well return h.Available() } func (a Algorithm) String() string { return string(a) } // Size returns number of bytes returned by the hash. func (a Algorithm) Size() int { h, ok := algorithms[a] if !ok { return 0 } return h.Size() } // Set implemented to allow use of Algorithm as a command line flag. func (a *Algorithm) Set(value string) error { if value == "" { *a = Canonical } else { // just do a type conversion, support is queried with Available. *a = Algorithm(value) } return nil } // New returns a new digester for the specified algorithm. If the algorithm // does not have a digester implementation, nil will be returned. This can be // checked by calling Available before calling New. func (a Algorithm) New() Digester { return &digester{ alg: a, hash: a.Hash(), } } // Hash returns a new hash as used by the algorithm. If not available, the // method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { // NOTE(stevvooe): A missing hash is usually a programming error that // must be resolved at compile time. We don't import in the digest // package to allow users to choose their hash implementation (such as // when using stevvooe/resumable or a hardware accelerated package). // // Applications that may want to resolve the hash at runtime should // call Algorithm.Available before call Algorithm.Hash(). panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() } // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { digester := a.New() if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err } return digester.Digest(), nil } // FromBytes digests the input and returns a Digest. func (a Algorithm) FromBytes(p []byte) Digest { digester := a.New() if _, err := digester.Hash().Write(p); err != nil { // Writes to a Hash should never fail. None of the existing // hash implementations in the stdlib or hashes vendored // here can return errors from Write. Having a panic in this // condition instead of having FromBytes return an error value // avoids unnecessary error handling paths in all callers. panic("write to hash function returned error: " + err.Error()) } return digester.Digest() } // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. // Digester calculates the digest of written data. Writes should go directly // to the return value of Hash, while calling Digest will return the current // value of the digest. type Digester interface { Hash() hash.Hash // provides direct access to underlying hash instance. Digest() Digest } // digester provides a simple digester definition that embeds a hasher. type digester struct { alg Algorithm hash hash.Hash } func (d *digester) Hash() hash.Hash { return d.hash } func (d *digester) Digest() Digest { return NewDigest(d.alg, d.hash) } distribution-2.3.0/digest/digester_resumable_test.go000066400000000000000000000007771265472114500227670ustar00rootroot00000000000000// +build !noresumabledigest package digest import ( "testing" "github.com/stevvooe/resumable" _ "github.com/stevvooe/resumable/sha256" ) // TestResumableDetection just ensures that the resumable capability of a hash // is exposed through the digester type, which is just a hash plus a Digest // method. func TestResumableDetection(t *testing.T) { d := Canonical.New() if _, ok := d.Hash().(resumable.Hash); !ok { t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) } } distribution-2.3.0/digest/doc.go000066400000000000000000000031401265472114500166130ustar00rootroot00000000000000// Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. // More importantly, it provides tools and wrappers to work with // hash.Hash-based digests with little effort. // // Basics // // The format of a digest is simply a string with two parts, dubbed the // "algorithm" and the "digest", separated by a colon: // // : // // An example of a sha256 digest representation follows: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // In this case, the string "sha256" is the algorithm and the hex bytes are // the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the // standard equality operator. // // Verification // // The main benefit of using the Digest type is simple verification against a // given digest. The Verifier interface, modeled after the stdlib hash.Hash // interface, provides a common write sink for digest verification. After // writing is complete, calling the Verifier.Verified method will indicate // whether or not the stream of bytes matches the target digest. // // Missing Features // // In addition to the above, we intend to add the following features to this // package: // // 1. A Digester type that supports write sink digest calculation. // // 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. // package digest distribution-2.3.0/digest/set.go000066400000000000000000000147221265472114500166510ustar00rootroot00000000000000package digest import ( "errors" "sort" "strings" "sync" ) var ( // ErrDigestNotFound is used when a matching digest // could not be found in a set. ErrDigestNotFound = errors.New("digest not found") // ErrDigestAmbiguous is used when multiple digests // are found in a set. None of the matching digests // should be considered valid matches. ErrDigestAmbiguous = errors.New("ambiguous digest string") ) // Set is used to hold a unique set of digests which // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other // digests in the set. If digests are ommited from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an // appropriately long short code should be used. type Set struct { mutex sync.RWMutex entries digestEntries } // NewSet creates an empty set of digests // which may have digests added. func NewSet() *Set { return &Set{ entries: digestEntries{}, } } // checkShortMatch checks whether two digests match as either whole // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false } if len(shortAlg) > 0 && string(alg) != shortAlg { return false } } else if !strings.HasPrefix(hex, shortHex) { return false } else if len(shortAlg) > 0 && string(alg) != shortAlg { return false } return true } // Lookup looks for a digest matching the given string representation. // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. func (dst *Set) Lookup(d string) (Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { return "", ErrDigestNotFound } var ( searchFunc func(int) bool alg Algorithm hex string ) dgst, err := ParseDigest(d) if err == ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d } } else { hex = dgst.Hex() alg = dgst.Algorithm() searchFunc = func(i int) bool { if dst.entries[i].val == hex { return dst.entries[i].alg >= alg } return dst.entries[i].val >= hex } } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { return "", ErrDigestNotFound } if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { return dst.entries[idx].digest, nil } if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { return "", ErrDigestAmbiguous } return dst.entries[idx].digest, nil } // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. func (dst *Set) Add(d Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) { dst.entries = append(dst.entries, entry) return nil } else if dst.entries[idx].digest == d { return nil } entries := append(dst.entries, nil) copy(entries[idx+1:], entries[idx:len(entries)-1]) entries[idx] = entry dst.entries = entries return nil } // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. func (dst *Set) Remove(d Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) // Not found if idx is after or value at idx is not digest if idx == len(dst.entries) || dst.entries[idx].digest != d { return nil } entries := dst.entries copy(entries[idx:], entries[idx+1:]) entries = entries[:len(entries)-1] dst.entries = entries return nil } // All returns all the digests in the set func (dst *Set) All() []Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() retValues := make([]Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } return retValues } // ShortCodeTable returns a map of Digest to unique short codes. The // length represents the minimum value, the maximum length may be the // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. func ShortCodeTable(dst *Set, length int) map[Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() m := make(map[Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { var short string extended := true for extended { extended = false if len(dst.entries[i].val) <= l { short = dst.entries[i].digest.String() } else { short = dst.entries[i].val[:l] for j := i + 1; j < len(dst.entries); j++ { if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { if j > resetIdx { resetIdx = j } extended = true } else { break } } if extended { l++ } } } m[dst.entries[i].digest] = short if i >= resetIdx { l = length } } return m } type digestEntry struct { alg Algorithm val string digest Digest } type digestEntries []*digestEntry func (d digestEntries) Len() int { return len(d) } func (d digestEntries) Less(i, j int) bool { if d[i].val != d[j].val { return d[i].val < d[j].val } return d[i].alg < d[j].alg } func (d digestEntries) Swap(i, j int) { d[i], d[j] = d[j], d[i] } distribution-2.3.0/digest/set_test.go000066400000000000000000000220611265472114500177030ustar00rootroot00000000000000package digest import ( "crypto/sha256" "encoding/binary" "math/rand" "testing" ) func assertEqualDigests(t *testing.T, d1, d2 Digest) { if d1 != d2 { t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) } } func TestLookup(t *testing.T) { digests := []Digest{ "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", "sha256:5432111111111111111111111111111111111111111111111111111111111111", "sha256:6543111111111111111111111111111111111111111111111111111111111111", "sha256:6432111111111111111111111111111111111111111111111111111111111111", "sha256:6542111111111111111111111111111111111111111111111111111111111111", "sha256:6532111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() for i := range digests { if err := dset.Add(digests[i]); err != nil { t.Fatal(err) } } dgst, err := dset.Lookup("54") if err != nil { t.Fatal(err) } assertEqualDigests(t, dgst, digests[3]) dgst, err = dset.Lookup("1234") if err == nil { t.Fatal("Expected ambiguous error looking up: 1234") } if err != ErrDigestAmbiguous { t.Fatal(err) } dgst, err = dset.Lookup("9876") if err == nil { t.Fatal("Expected ambiguous error looking up: 9876") } if err != ErrDigestNotFound { t.Fatal(err) } dgst, err = dset.Lookup("sha256:1234") if err == nil { t.Fatal("Expected ambiguous error looking up: sha256:1234") } if err != ErrDigestAmbiguous { t.Fatal(err) } dgst, err = dset.Lookup("sha256:12345") if err != nil { t.Fatal(err) } assertEqualDigests(t, dgst, digests[0]) dgst, err = dset.Lookup("sha256:12346") if err != nil { t.Fatal(err) } assertEqualDigests(t, dgst, digests[2]) dgst, err = dset.Lookup("12346") if err != nil { t.Fatal(err) } assertEqualDigests(t, dgst, digests[2]) dgst, err = dset.Lookup("12345") if err != nil { t.Fatal(err) } assertEqualDigests(t, dgst, digests[0]) } func TestAddDuplication(t *testing.T) { digests := []Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", "sha256:5432111111111111111111111111111111111111111111111111111111111111", "sha256:6543111111111111111111111111111111111111111111111111111111111111", "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() for i := range digests { if err := dset.Add(digests[i]); err != nil { t.Fatal(err) } } if len(dset.entries) != 8 { t.Fatal("Invalid dset size") } if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } if len(dset.entries) != 8 { t.Fatal("Duplicate digest insert allowed") } if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } if len(dset.entries) != 9 { t.Fatal("Insert with different algorithm not allowed") } } func TestRemove(t *testing.T) { digests, err := createDigests(10) if err != nil { t.Fatal(err) } dset := NewSet() for i := range digests { if err := dset.Add(digests[i]); err != nil { t.Fatal(err) } } dgst, err := dset.Lookup(digests[0].String()) if err != nil { t.Fatal(err) } if dgst != digests[0] { t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) } if err := dset.Remove(digests[0]); err != nil { t.Fatal(err) } if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) } } func TestAll(t *testing.T) { digests, err := createDigests(100) if err != nil { t.Fatal(err) } dset := NewSet() for i := range digests { if err := dset.Add(digests[i]); err != nil { t.Fatal(err) } } all := map[Digest]struct{}{} for _, dgst := range dset.All() { all[dgst] = struct{}{} } if len(all) != len(digests) { t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) } for i, dgst := range digests { if _, ok := all[dgst]; !ok { t.Fatalf("Missing element at position %d: %s", i, dgst) } } } func assertEqualShort(t *testing.T, actual, expected string) { if actual != expected { t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) } } func TestShortCodeTable(t *testing.T) { digests := []Digest{ "sha256:1234111111111111111111111111111111111111111111111111111111111111", "sha256:1234511111111111111111111111111111111111111111111111111111111111", "sha256:1234611111111111111111111111111111111111111111111111111111111111", "sha256:5432111111111111111111111111111111111111111111111111111111111111", "sha256:6543111111111111111111111111111111111111111111111111111111111111", "sha256:6432111111111111111111111111111111111111111111111111111111111111", "sha256:6542111111111111111111111111111111111111111111111111111111111111", "sha256:6532111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() for i := range digests { if err := dset.Add(digests[i]); err != nil { t.Fatal(err) } } dump := ShortCodeTable(dset, 2) if len(dump) < len(digests) { t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) } assertEqualShort(t, dump[digests[0]], "12341") assertEqualShort(t, dump[digests[1]], "12345") assertEqualShort(t, dump[digests[2]], "12346") assertEqualShort(t, dump[digests[3]], "54") assertEqualShort(t, dump[digests[4]], "6543") assertEqualShort(t, dump[digests[5]], "64") assertEqualShort(t, dump[digests[6]], "6542") assertEqualShort(t, dump[digests[7]], "653") } func createDigests(count int) ([]Digest, error) { r := rand.New(rand.NewSource(25823)) digests := make([]Digest, count) for i := range digests { h := sha256.New() if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { return nil, err } digests[i] = NewDigest("sha256", h) } return digests, nil } func benchAddNTable(b *testing.B, n int) { digests, err := createDigests(n) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} for j := range digests { if err = dset.Add(digests[j]); err != nil { b.Fatal(err) } } } } func benchLookupNTable(b *testing.B, n int, shortLen int) { digests, err := createDigests(n) if err != nil { b.Fatal(err) } dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} for i := range digests { if err := dset.Add(digests[i]); err != nil { b.Fatal(err) } } shorts := make([]string, 0, n) for _, short := range ShortCodeTable(dset, shortLen) { shorts = append(shorts, short) } b.ResetTimer() for i := 0; i < b.N; i++ { if _, err = dset.Lookup(shorts[i%n]); err != nil { b.Fatal(err) } } } func benchRemoveNTable(b *testing.B, n int) { digests, err := createDigests(n) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} b.StopTimer() for j := range digests { if err = dset.Add(digests[j]); err != nil { b.Fatal(err) } } b.StartTimer() for j := range digests { if err = dset.Remove(digests[j]); err != nil { b.Fatal(err) } } } } func benchShortCodeNTable(b *testing.B, n int, shortLen int) { digests, err := createDigests(n) if err != nil { b.Fatal(err) } dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} for i := range digests { if err := dset.Add(digests[i]); err != nil { b.Fatal(err) } } b.ResetTimer() for i := 0; i < b.N; i++ { ShortCodeTable(dset, shortLen) } } func BenchmarkAdd10(b *testing.B) { benchAddNTable(b, 10) } func BenchmarkAdd100(b *testing.B) { benchAddNTable(b, 100) } func BenchmarkAdd1000(b *testing.B) { benchAddNTable(b, 1000) } func BenchmarkRemove10(b *testing.B) { benchRemoveNTable(b, 10) } func BenchmarkRemove100(b *testing.B) { benchRemoveNTable(b, 100) } func BenchmarkRemove1000(b *testing.B) { benchRemoveNTable(b, 1000) } func BenchmarkLookup10(b *testing.B) { benchLookupNTable(b, 10, 12) } func BenchmarkLookup100(b *testing.B) { benchLookupNTable(b, 100, 12) } func BenchmarkLookup1000(b *testing.B) { benchLookupNTable(b, 1000, 12) } func BenchmarkShortCode10(b *testing.B) { benchShortCodeNTable(b, 10, 12) } func BenchmarkShortCode100(b *testing.B) { benchShortCodeNTable(b, 100, 12) } func BenchmarkShortCode1000(b *testing.B) { benchShortCodeNTable(b, 1000, 12) } distribution-2.3.0/digest/verifiers.go000066400000000000000000000017661265472114500200600ustar00rootroot00000000000000package digest import ( "hash" "io" ) // Verifier presents a general verification interface to be used with message // digests and other byte stream verifications. Users instantiate a Verifier // from one of the various methods, write the data under test to it then check // the result with the Verified method. type Verifier interface { io.Writer // Verified will return true if the content written to Verifier matches // the digest. Verified() bool } // NewDigestVerifier returns a verifier that compares the written bytes // against a passed in digest. func NewDigestVerifier(d Digest) (Verifier, error) { if err := d.Validate(); err != nil { return nil, err } return hashVerifier{ hash: d.Algorithm().Hash(), digest: d, }, nil } type hashVerifier struct { digest Digest hash hash.Hash } func (hv hashVerifier) Write(p []byte) (n int, err error) { return hv.hash.Write(p) } func (hv hashVerifier) Verified() bool { return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) } distribution-2.3.0/digest/verifiers_test.go000066400000000000000000000020451265472114500211060ustar00rootroot00000000000000package digest import ( "bytes" "crypto/rand" "io" "testing" ) func TestDigestVerifier(t *testing.T) { p := make([]byte, 1<<20) rand.Read(p) digest := FromBytes(p) verifier, err := NewDigestVerifier(digest) if err != nil { t.Fatalf("unexpected error getting digest verifier: %s", err) } io.Copy(verifier, bytes.NewReader(p)) if !verifier.Verified() { t.Fatalf("bytes not verified") } } // TestVerifierUnsupportedDigest ensures that unsupported digest validation is // flowing through verifier creation. func TestVerifierUnsupportedDigest(t *testing.T) { unsupported := Digest("bean:0123456789abcdef") _, err := NewDigestVerifier(unsupported) if err == nil { t.Fatalf("expected error when creating verifier") } if err != ErrDigestUnsupported { t.Fatalf("incorrect error for unsupported digest: %v", err) } } // TODO(stevvooe): Add benchmarks to measure bytes/second throughput for // DigestVerifier. // // The relevant benchmark for comparison can be run with the following // commands: // // go test -bench . crypto/sha1 // distribution-2.3.0/doc.go000066400000000000000000000004661265472114500153440ustar00rootroot00000000000000// Package distribution will define the interfaces for the components of // docker distribution. The goal is to allow users to reliably package, ship // and store content related to docker images. // // This is currently a work in progress. More details are available in the // README.md. package distribution distribution-2.3.0/docs/000077500000000000000000000000001265472114500151725ustar00rootroot00000000000000distribution-2.3.0/docs/Dockerfile000066400000000000000000000014261265472114500171670ustar00rootroot00000000000000FROM docs/base:latest MAINTAINER Mary Anthony (@moxiegirl) RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry RUN svn checkout https://github.com/kitematic/kitematic/trunk/docs /docs/content/kitematic RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource ENV PROJECT=registry # To get the git info for this repo COPY . /src COPY . /docs/content/$PROJECT/ distribution-2.3.0/docs/Makefile000066400000000000000000000045561265472114500166440ustar00rootroot00000000000000.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate # env vars passed through directly to Docker's build scripts # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_EXECDRIVER \ -e DOCKER_GRAPHDRIVER \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 # Get the IP ADDRESS DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") HUGO_BIND_IP=0.0.0.0 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE # for some docs workarounds (see below in "docs-build" target) GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: docs docs: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-draft: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-build: # ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files # echo "$(GIT_BRANCH)" > GIT_BRANCH # echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET # echo "$(GITCOMMIT)" > GITCOMMIT docker build -t "$(DOCKER_DOCS_IMAGE)" . distribution-2.3.0/docs/apache.md000066400000000000000000000142231265472114500167370ustar00rootroot00000000000000 # Authenticating proxy with apache ## Use-case People already relying on an apache proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. ### Alternatives If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). ### Solution With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. While we use a simple htpasswd file as an example, any other apache authentication backend should be fairly easy to implement once you are done with the exemple. We also implement push restriction (to a limited user group) for the sake of the exemple. Again, you should modify this to fit your mileage. ### Gotchas While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. ## Setting things up Read again [the requirements](recipes.md#requirements). Ready? Run the following script: ``` mkdir -p auth mkdir -p data # This is the main apache configuration you will use cat < auth/httpd.conf LoadModule headers_module modules/mod_headers.so LoadModule authn_file_module modules/mod_authn_file.so LoadModule authn_core_module modules/mod_authn_core.so LoadModule authz_groupfile_module modules/mod_authz_groupfile.so LoadModule authz_user_module modules/mod_authz_user.so LoadModule authz_core_module modules/mod_authz_core.so LoadModule auth_basic_module modules/mod_auth_basic.so LoadModule access_compat_module modules/mod_access_compat.so LoadModule log_config_module modules/mod_log_config.so LoadModule ssl_module modules/mod_ssl.so LoadModule proxy_module modules/mod_proxy.so LoadModule proxy_http_module modules/mod_proxy_http.so LoadModule unixd_module modules/mod_unixd.so SSLRandomSeed startup builtin SSLRandomSeed connect builtin User daemon Group daemon ServerAdmin you@example.com ErrorLog /proc/self/fd/2 LogLevel warn LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined LogFormat "%h %l %u %t \"%r\" %>s %b" common LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio CustomLog /proc/self/fd/1 common ServerRoot "/usr/local/apache2" Listen 5043 AllowOverride none Require all denied ServerName myregistrydomain.com SSLEngine on SSLCertificateFile /usr/local/apache2/conf/domain.crt SSLCertificateKeyFile /usr/local/apache2/conf/domain.key ## SSL settings recommandation from: https://raymii.org/s/tutorials/Strong_SSL_Security_On_Apache2.html # Anti CRIME SSLCompression off # POODLE and other stuff SSLProtocol all -SSLv2 -SSLv3 -TLSv1 # Secure cypher suites SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH SSLHonorCipherOrder on Header always set "Docker-Distribution-Api-Version" "registry/2.0" Header onsuccess set "Docker-Distribution-Api-Version" "registry/2.0" RequestHeader set X-Forwarded-Proto "https" ProxyRequests off ProxyPreserveHost on # no proxy for /error/ (Apache HTTPd errors messages) ProxyPass /error/ ! ProxyPass /v2 http://registry:5000/v2 ProxyPassReverse /v2 http://registry:5000/v2 Order deny,allow Allow from all AuthName "Registry Authentication" AuthType basic AuthUserFile "/usr/local/apache2/conf/httpd.htpasswd" AuthGroupFile "/usr/local/apache2/conf/httpd.groups" # Read access to authentified users Require valid-user # Write access to docker-deployer only Require group pusher EOF # Now, create a password file for "testuser" and "testpassword" docker run --entrypoint htpasswd httpd:2.4 -Bbn testuser testpassword > auth/httpd.htpasswd # Create another one for "testuserpush" and "testpasswordpush" docker run --entrypoint htpasswd httpd:2.4 -Bbn testuserpush testpasswordpush >> auth/httpd.htpasswd # Create your group file echo "pusher: testuserpush" > auth/httpd.groups # Copy over your certificate files cp domain.crt auth cp domain.key auth # Now create your compose file cat < docker-compose.yml apache: image: "httpd:2.4" hostname: myregistrydomain.com ports: - 5043:5043 links: - registry:registry volumes: - `pwd`/auth:/usr/local/apache2/conf registry: image: registry:2 ports: - 127.0.0.1:5000:5000 volumes: - `pwd`/data:/var/lib/registry EOF ``` ## Starting and stopping Now, start your stack: docker-compose up -d Login with a "push" authorized user (using `testuserpush` and `testpasswordpush`), then tag and push your first image: docker login myregistrydomain.com:5043 docker tag ubuntu myregistrydomain.com:5043/test docker push myregistrydomain.com:5043/test Now, login with a "pull-only" user (using `testuser` and `testpassword`), then pull back the image: docker login myregistrydomain.com:5043 docker pull myregistrydomain.com:5043/test Verify that the "pull-only" can NOT push: docker push myregistrydomain.com:5043/test distribution-2.3.0/docs/architecture.md000066400000000000000000000044121265472114500201770ustar00rootroot00000000000000 # Architecture ## Design **TODO(stevvooe):** Discuss the architecture of the registry, internally and externally, in a few different deployment scenarios. ### Eventual Consistency > **NOTE:** This section belongs somewhere, perhaps in a design document. We > are leaving this here so the information is not lost. Running the registry on eventually consistent backends has been part of the design from the beginning. This section covers some of the approaches to dealing with this reality. There are a few classes of issues that we need to worry about when implementing something on top of the storage drivers: 1. Read-After-Write consistency (see this [article on s3](http://shlomoswidler.com/2009/12/read-after-write-consistency-in-amazon.html)). 2. [Write-Write Conflicts](http://en.wikipedia.org/wiki/Write%E2%80%93write_conflict). In reality, the registry must worry about these kinds of errors when doing the following: 1. Accepting data into a temporary upload file may not have latest data block yet (read-after-write). 2. Moving uploaded data into its blob location (write-write race). 3. Modifying the "current" manifest for given tag (write-write race). 4. A whole slew of operations around deletes (read-after-write, delete-write races, garbage collection, etc.). The backend path layout employs a few techniques to avoid these problems: 1. Large writes are done to private upload directories. This alleviates most of the corruption potential under multiple writers by avoiding multiple writers. 2. Constraints in storage driver implementations, such as support for writing after the end of a file to extend it. 3. Digest verification to avoid data corruption. 4. Manifest files are stored by digest and cannot change. 5. All other non-content files (links, hashes, etc.) are written as an atomic unit. Anything that requires additions and deletions is broken out into separate "files". Last writer still wins. Unfortunately, one must play this game when trying to build something like this on top of eventually consistent storage systems. If we run into serious problems, we can wrap the storagedrivers in a shared consistency layer but that would increase complexity and hinder registry cluster performance. distribution-2.3.0/docs/building.md000066400000000000000000000144651265472114500173230ustar00rootroot00000000000000 # Building the registry source ## Use-case This is useful if you intend to actively work on the registry. ### Alternatives Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). ### Gotchas You are expected to know your way around with go & git. If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. ## Build the development environment The first prerequisite of properly building distribution targets is to have a Go development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the environment. If a Go development environment is setup, one can use `go get` to install the `registry` command from the current latest: go get github.com/docker/distribution/cmd/registry The above will install the source repository into the `GOPATH`. Now create the directory for the registry data (this might require you to set permissions properly) mkdir -p /var/lib/registry ... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. The `registry` binary can then be run with the following: $ $GOPATH/bin/registry --version $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown > __NOTE:__ While you do not need to use `go get` to checkout the distribution > project, for these build instructions to work, the project must be checked > out in the correct location in the `GOPATH`. This should almost always be > `$GOPATH/src/github.com/docker/distribution`. The registry can be run with the default config using the following incantation: $ $GOPATH/bin/registry $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown INFO[0000] debug server listening localhost:5001 If it is working, one should see the above log messages. ### Repeatable Builds For the full development experience, one should `cd` into `$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` commands, such as `go test`, should work per package (please see [Developing](#developing) if they don't work). A `Makefile` has been provided as a convenience to support repeatable builds. Please install the following into `GOPATH` for it to work: go get github.com/tools/godep github.com/golang/lint/golint **TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. Once these commands are available in the `GOPATH`, run `make` to get a full build: $ GOPATH=`godep path`:$GOPATH make + clean + fmt + vet + lint + build github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar github.com/Sirupsen/logrus github.com/docker/libtrust ... github.com/yvasiyarov/gorelic github.com/docker/distribution/registry/handlers github.com/docker/distribution/cmd/registry + test ... ok github.com/docker/distribution/digest 7.875s ok github.com/docker/distribution/manifest 0.028s ok github.com/docker/distribution/notifications 17.322s ? github.com/docker/distribution/registry [no test files] ok github.com/docker/distribution/registry/api/v2 0.101s ? github.com/docker/distribution/registry/auth [no test files] ok github.com/docker/distribution/registry/auth/silly 0.011s ... + /Users/sday/go/src/github.com/docker/distribution/bin/registry + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + binaries The above provides a repeatable build using the contents of the vendored Godeps directory. This includes formatting, vetting, linting, building, testing and generating tagged binaries. We can verify this worked by running the registry binary generated in the "./bin" directory: $ ./bin/registry -version ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m ### Developing The above approaches are helpful for small experimentation. If more complex tasks are at hand, it is recommended to employ the full power of `godep`. The Makefile is designed to have its `GOPATH` defined externally. This allows one to experiment with various development environment setups. This is primarily useful when testing upstream bugfixes, by modifying local code. This can be demonstrated using `godep` to migrate the `GOPATH` to use the specified dependencies. The `GOPATH` can be migrated to the current package versions declared in `Godeps` with the following command: godep restore > **WARNING:** This command will checkout versions of the code specified in > Godeps/Godeps.json, modifying the contents of `GOPATH`. If this is > undesired, it is recommended to create a workspace devoted to work on the > _Distribution_ project. With a successful run of the above command, one can now use `make` without specifying the `GOPATH`: make If that is successful, standard `go` commands, such as `go test` should work, per package, without issue. ### Optional build tags Optional [build tags](http://golang.org/pkg/go/build/) can be provided using the environment variable `DOCKER_BUILDTAGS`. To enable the [Ceph RADOS storage driver](storage-drivers/rados.md) (librados-dev and librbd-dev will be required to build the bindings): export DOCKER_BUILDTAGS='include_rados' distribution-2.3.0/docs/configuration.md000066400000000000000000001333531265472114500203730ustar00rootroot00000000000000 # Registry Configuration Reference The Registry configuration is based on a YAML file, detailed below. While it comes with sane default values out of the box, you are heavily encouraged to review it exhaustively before moving your systems to production. ## Override specific configuration options In a typical setup where you run your Registry from the official image, you can specify a configuration variable from the environment by passing `-e` arguments to your `docker run` stanza, or from within a Dockerfile using the `ENV` instruction. To override a configuration option, create an environment variable named `REGISTRY_variable` where *`variable`* is the name of the configuration option and the `_` (underscore) represents indention levels. For example, you can configure the `rootdirectory` of the `filesystem` storage backend: storage: filesystem: rootdirectory: /var/lib/registry To override this value, set an environment variable like this: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere This variable overrides the `/var/lib/registry` value to the `/somewhere` directory. >**NOTE**: It is highly recommended to create a base configuration file with which environment variables can be used to tweak individual values. Overriding configuration sections with environment variables is not recommended. ## Overriding the entire configuration file If the default configuration is not a sound basis for your usage, or if you are having issues overriding keys from the environment, you can specify an alternate YAML configuration file by mounting it as a volume in the container. Typically, create a new configuration file from scratch, and call it `config.yml`, then: docker run -d -p 5000:5000 --restart=always --name registry \ -v `pwd`/config.yml:/etc/docker/registry/config.yml \ registry:2 You can (and probably should) use [this as a starting point](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). ## List of configuration options This section lists all the registry configuration options. Some options in the list are mutually exclusive. So, make sure to read the detailed reference information about each option that appears later in this page. version: 0.1 log: level: debug formatter: text fields: service: registry environment: staging hooks: - type: mail disabled: true levels: - panic options: smtp: addr: mail.example.com:25 username: mailuser password: password insecure: true from: sender@example.com to: - errors@example.com loglevel: debug # deprecated: use "log" storage: filesystem: rootdirectory: /var/lib/registry azure: accountname: accountname accountkey: base64encodedaccountkey container: containername gcs: bucket: bucketname keyfile: /path/to/keyfile rootdirectory: /gcs/object/name/prefix s3: accesskey: awsaccesskey secretkey: awssecretkey region: us-west-1 bucket: bucketname encrypt: true secure: true v4auth: true chunksize: 5242880 rootdirectory: /s3/object/name/prefix rados: poolname: radospool username: radosuser chunksize: 4194304 swift: username: username password: password authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth tenant: tenantname tenantid: tenantid domain: domain name for Openstack Identity v3 API domainid: domain id for Openstack Identity v3 API insecureskipverify: true region: fr container: containername rootdirectory: /swift/object/name/prefix oss: accesskeyid: accesskeyid accesskeysecret: accesskeysecret region: OSS region name endpoint: optional endpoints internal: optional internal endpoint bucket: OSS bucket encrypt: optional data encryption setting secure: optional ssl setting chunksize: optional size valye rootdirectory: optional root directory inmemory: # This driver takes no parameters delete: enabled: false redirect: disable: false cache: blobdescriptor: redis maintenance: uploadpurging: enabled: true age: 168h interval: 24h dryrun: false readonly: enabled: false auth: silly: realm: silly-realm service: silly-service token: realm: token-realm service: token-service issuer: registry-token-issuer rootcertbundle: /root/certs/bundle htpasswd: realm: basic-realm path: /path/to/htpasswd middleware: registry: - name: ARegistryMiddleware options: foo: bar repository: - name: ARepositoryMiddleware options: foo: bar storage: - name: cloudfront options: baseurl: https://my.cloudfronted.domain.com/ privatekey: /path/to/pem keypairid: cloudfrontkeypairid duration: 3000 reporting: bugsnag: apikey: bugsnagapikey releasestage: bugsnagreleasestage endpoint: bugsnagendpoint newrelic: licensekey: newreliclicensekey name: newrelicname verbose: true http: addr: localhost:5000 prefix: /my/nested/registry/ host: https://myregistryaddress.org:5000 secret: asecretforlocaldevelopment tls: certificate: /path/to/x509/public key: /path/to/x509/private clientcas: - /path/to/ca.pem - /path/to/another/ca.pem debug: addr: localhost:5001 headers: X-Content-Type-Options: [nosniff] notifications: endpoints: - name: alistener disabled: false url: https://my.listener.com/event headers: timeout: 500 threshold: 5 backoff: 1000 redis: addr: localhost:6379 password: asecret db: 0 dialtimeout: 10ms readtimeout: 10ms writetimeout: 10ms pool: maxidle: 16 maxactive: 64 idletimeout: 300s health: storagedriver: enabled: true interval: 10s threshold: 3 file: - file: /path/to/checked/file interval: 10s http: - uri: http://server.to.check/must/return/200 headers: Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] statuscode: 200 timeout: 3s interval: 10s threshold: 3 tcp: - addr: redis-server.domain.com:6379 timeout: 3s interval: 10s threshold: 3 proxy: remoteurl: https://registry-1.docker.io username: [username] password: [password] In some instances a configuration option is **optional** but it contains child options marked as **required**. This indicates that you can omit the parent with all its children. However, if the parent is included, you must also include all the children marked **required**. ## version version: 0.1 The `version` option is **required**. It specifies the configuration's version. It is expected to remain a top-level field, to allow for a consistent version check before parsing the remainder of the configuration file. ## log The `log` subsection configures the behavior of the logging system. The logging system outputs everything to stdout. You can adjust the granularity and format with this configuration section. log: level: debug formatter: text fields: service: registry environment: staging
Parameter Required Description
level no Sets the sensitivity of logging output. Permitted values are error, warn, info and debug. The default is info.
formatter no This selects the format of logging output. The format primarily affects how keyed attributes for a log line are encoded. Options are text, json or logstash. The default is text.
fields no A map of field names to values. These are added to every log line for the context. This is useful for identifying log messages source after being mixed in other systems.
## hooks hooks: - type: mail levels: - panic options: smtp: addr: smtp.sendhost.com:25 username: sendername password: password insecure: true from: name@sendhost.com to: - name@receivehost.com The `hooks` subsection configures the logging hooks' behavior. This subsection includes a sequence handler which you can use for sending mail, for example. Refer to `loglevel` to configure the level of messages printed. ## loglevel > **DEPRECATED:** Please use [log](#log) instead. loglevel: debug Permitted values are `error`, `warn`, `info` and `debug`. The default is `info`. ## storage storage: filesystem: rootdirectory: /var/lib/registry azure: accountname: accountname accountkey: base64encodedaccountkey container: containername gcs: bucket: bucketname keyfile: /path/to/keyfile rootdirectory: /gcs/object/name/prefix s3: accesskey: awsaccesskey secretkey: awssecretkey region: us-west-1 bucket: bucketname encrypt: true secure: true v4auth: true chunksize: 5242880 rootdirectory: /s3/object/name/prefix rados: poolname: radospool username: radosuser chunksize: 4194304 swift: username: username password: password authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth tenant: tenantname tenantid: tenantid domain: domain name for Openstack Identity v3 API domainid: domain id for Openstack Identity v3 API insecureskipverify: true region: fr container: containername rootdirectory: /swift/object/name/prefix oss: accesskeyid: accesskeyid accesskeysecret: accesskeysecret region: OSS region name endpoint: optional endpoints internal: optional internal endpoint bucket: OSS bucket encrypt: optional data encryption setting secure: optional ssl setting chunksize: optional size valye rootdirectory: optional root directory inmemory: delete: enabled: false cache: blobdescriptor: inmemory maintenance: uploadpurging: enabled: true age: 168h interval: 24h dryrun: false redirect: disable: false The storage option is **required** and defines which storage backend is in use. You must configure one backend; if you configure more, the registry returns an error. You can choose any of these backend storage drivers:
filesystem Uses the local disk to store registry files. It is ideal for development and may be appropriate for some small-scale production applications. See the driver's reference documentation.
azure Uses Microsoft's Azure Blob Storage. See the driver's reference documentation.
gcs Uses Google Cloud Storage. See the driver's reference documentation.
rados Uses Ceph Object Storage. See the driver's reference documentation.
s3 Uses Amazon's Simple Storage Service (S3). See the driver's reference documentation.
swift Uses Openstack Swift object storage. See the driver's reference documentation.
oss Uses Aliyun OSS for object storage. See the driver's reference documentation.
For purely tests purposes, you can use the [`inmemory` storage driver](storage-drivers/inmemory.md). If you would like to run a registry from volatile memory, use the [`filesystem` driver](storage-drivers/filesystem.md) on a ramdisk. If you are deploying a registry on Windows, be aware that a Windows volume mounted from the host is not recommended. Instead, you can use a S3, or Azure, backing data-store. If you do use a Windows volume, you must ensure that the `PATH` to the mount point is within Windows' `MAX_PATH` limits (typically 255 characters). Failure to do so can result in the following error message: mkdir /XXX protocol error and your registry will not function properly. ### Maintenance Currently upload purging and read-only mode are the only maintenance functions available. These and future maintenance functions which are related to storage can be configured under the maintenance section. ### Upload Purging Upload purging is a background process that periodically removes orphaned files from the upload directories of the registry. Upload purging is enabled by default. To configure upload directory purging, the following parameters must be set. | Parameter | Required | Description --------- | -------- | ----------- `enabled` | yes | Set to true to enable upload purging. Default=true. | `age` | yes | Upload directories which are older than this age will be deleted. Default=168h (1 week) `interval` | yes | The interval between upload directory purging. Default=24h. `dryrun` | yes | dryrun can be set to true to obtain a summary of what directories will be deleted. Default=false. Note: `age` and `interval` are strings containing a number with optional fraction and a unit suffix: e.g. 45m, 2h10m, 168h (1 week). ### Read-only mode If the `readonly` section under `maintenance` has `enabled` set to `true`, clients will not be allowed to write to the registry. This mode is useful to temporarily prevent writes to the backend storage so a garbage collection pass can be run. Before running garbage collection, the registry should be restarted with readonly's `enabled` set to true. After the garbage collection pass finishes, the registry may be restarted again, this time with `readonly` removed from the configuration (or set to false). ### delete Use the `delete` subsection to enable the deletion of image blobs and manifests by digest. It defaults to false, but it can be enabled by writing the following on the configuration file: delete: enabled: true ### cache Use the `cache` subsection to enable caching of data accessed in the storage backend. Currently, the only available cache provides fast access to layer metadata. This, if configured, uses the `blobdescriptor` field. You can set `blobdescriptor` field to `redis` or `inmemory`. The `redis` value uses a Redis pool to cache layer metadata. The `inmemory` value uses an in memory map. >**NOTE**: Formerly, `blobdescriptor` was known as `layerinfo`. While these >are equivalent, `layerinfo` has been deprecated, in favor or >`blobdescriptor`. ### redirect The `redirect` subsection provides configuration for managing redirects from content backends. For backends that support it, redirecting is enabled by default. Certain deployment scenarios may prefer to route all data through the Registry, rather than redirecting to the backend. This may be more efficient when using a backend that is not co-located or when a registry instance is doing aggressive caching. Redirects can be disabled by adding a single flag `disable`, set to `true` under the `redirect` section: redirect: disable: true ## auth auth: silly: realm: silly-realm service: silly-service token: realm: token-realm service: token-service issuer: registry-token-issuer rootcertbundle: /root/certs/bundle htpasswd: realm: basic-realm path: /path/to/htpasswd The `auth` option is **optional**. There are currently 3 possible auth providers, `silly`, `token` and `htpasswd`. You can configure only one `auth` provider. ### silly The `silly` auth is only for development purposes. It simply checks for the existence of the `Authorization` header in the HTTP request. It has no regard for the header's value. If the header does not exist, the `silly` auth responds with a challenge response, echoing back the realm, service, and scope that access was denied for. The following values are used to configure the response:
Parameter Required Description
realm yes The realm in which the registry server authenticates.
service yes The service being authenticated.
### token Token based authentication allows the authentication system to be decoupled from the registry. It is a well established authentication paradigm with a high degree of security.
Parameter Required Description
realm yes The realm in which the registry server authenticates.
service yes The service being authenticated.
issuer yes The name of the token issuer. The issuer inserts this into the token so it must match the value configured for the issuer.
rootcertbundle yes The absolute path to the root certificate bundle. This bundle contains the public part of the certificates that is used to sign authentication tokens.
For more information about Token based authentication configuration, see the [specification](spec/auth/token.md). ### htpasswd The _htpasswd_ authentication backed allows one to configure basic auth using an [Apache htpasswd file](https://httpd.apache.org/docs/2.4/programs/htpasswd.html). Only [`bcrypt`](http://en.wikipedia.org/wiki/Bcrypt) format passwords are supported. Entries with other hash types will be ignored. The htpasswd file is loaded once, at startup. If the file is invalid, the registry will display an error and will not start. > __WARNING:__ This authentication scheme should only be used with TLS > configured, since basic authentication sends passwords as part of the http > header.
Parameter Required Description
realm yes The realm in which the registry server authenticates.
path yes Path to htpasswd file to load at startup.
## middleware The `middleware` option is **optional**. Use this option to inject middleware at named hook points. All middleware must implement the same interface as the object they're wrapping. This means a registry middleware must implement the `distribution.Namespace` interface, repository middleware must implement `distribution.Repository`, and storage middleware must implement `driver.StorageDriver`. Currently only one middleware, `cloudfront`, a storage middleware, is supported in the registry implementation. middleware: registry: - name: ARegistryMiddleware options: foo: bar repository: - name: ARepositoryMiddleware options: foo: bar storage: - name: cloudfront options: baseurl: https://my.cloudfronted.domain.com/ privatekey: /path/to/pem keypairid: cloudfrontkeypairid duration: 3000 Each middleware entry has `name` and `options` entries. The `name` must correspond to the name under which the middleware registers itself. The `options` field is a map that details custom configuration required to initialize the middleware. It is treated as a `map[string]interface{}`. As such, it supports any interesting structures desired, leaving it up to the middleware initialization function to best determine how to handle the specific interpretation of the options. ### cloudfront
Parameter Required Description
baseurl yes SCHEME://HOST[/PATH] at which Cloudfront is served.
privatekey yes Private Key for Cloudfront provided by AWS.
keypairid yes Key pair ID provided by AWS.
duration no Duration for which a signed URL should be valid.
## reporting reporting: bugsnag: apikey: bugsnagapikey releasestage: bugsnagreleasestage endpoint: bugsnagendpoint newrelic: licensekey: newreliclicensekey name: newrelicname verbose: true The `reporting` option is **optional** and configures error and metrics reporting tools. At the moment only two services are supported, [New Relic](http://newrelic.com/) and [Bugsnag](http://bugsnag.com), a valid configuration may contain both. ### bugsnag
Parameter Required Description
apikey yes API Key provided by Bugsnag
releasestage no Tracks where the registry is deployed, for example, production,staging, or development.
endpoint no Specify the enterprise Bugsnag endpoint.
### newrelic
Parameter Required Description
licensekey yes License key provided by New Relic.
name no New Relic application name.
verbose no Enable New Relic debugging output on stdout.
## http http: addr: localhost:5000 net: tcp prefix: /my/nested/registry/ host: https://myregistryaddress.org:5000 secret: asecretforlocaldevelopment tls: certificate: /path/to/x509/public key: /path/to/x509/private clientcas: - /path/to/ca.pem - /path/to/another/ca.pem debug: addr: localhost:5001 headers: X-Content-Type-Options: [nosniff] The `http` option details the configuration for the HTTP server that hosts the registry.
Parameter Required Description
addr yes The address for which the server should accept connections. The form depends on a network type (see net option): HOST:PORT for tcp and FILE for a unix socket.
net no The network which is used to create a listening socket. Known networks are unix and tcp. The default empty value means tcp.
prefix no If the server does not run at the root path use this value to specify the prefix. The root path is the section before v2. It should have both preceding and trailing slashes, for example /path/.
host no This parameter specifies an externally-reachable address for the registry, as a fully qualified URL. If present, it is used when creating generated URLs. Otherwise, these URLs are derived from client requests.
secret yes A random piece of data. This is used to sign state that may be stored with the client to protect against tampering. For production environments you should generate a random piece of data using a cryptographically secure random generator. This configuration parameter may be omitted, in which case the registry will automatically generate a secret at launch.

WARNING: If you are building a cluster of registries behind a load balancer, you MUST ensure the secret is the same for all registries.

### tls The `tls` struct within `http` is **optional**. Use this to configure TLS for the server. If you already have a server such as Nginx or Apache running on the same host as the registry, you may prefer to configure TLS termination there and proxy connections to the registry server.
Parameter Required Description
certificate yes Absolute path to x509 cert file
key yes Absolute path to x509 private key file.
clientcas no An array of absolute paths to a x509 CA file
### debug The `debug` option is **optional** . Use it to configure a debug server that can be helpful in diagnosing problems. The debug endpoint can be used for monitoring registry metrics and health, as well as profiling. Sensitive information may be available via the debug endpoint. Please be certain that access to the debug endpoint is locked down in a production environment. The `debug` section takes a single, required `addr` parameter. This parameter specifies the `HOST:PORT` on which the debug server should accept connections. ### headers The `headers` option is **optional** . Use it to specify headers that the HTTP server should include in responses. This can be used for security headers such as `Strict-Transport-Security`. The `headers` option should contain an option for each header to include, where the parameter name is the header's name, and the parameter value a list of the header's payload values. Including `X-Content-Type-Options: [nosniff]` is recommended, so that browsers will not interpret content as HTML if they are directed to load a page from the registry. This header is included in the example configuration files. ## notifications notifications: endpoints: - name: alistener disabled: false url: https://my.listener.com/event headers: timeout: 500 threshold: 5 backoff: 1000 The notifications option is **optional** and currently may contain a single option, `endpoints`. ### endpoints Endpoints is a list of named services (URLs) that can accept event notifications.
Parameter Required Description
name yes A human readable name for the service.
disabled no A boolean to enable/disable notifications for a service.
url yes The URL to which events should be published.
headers yes Static headers to add to each request. Each header's name should be a key underneath headers, and each value is a list of payloads for that header name. Note that values must always be lists.
timeout yes An HTTP timeout value. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds.
threshold yes An integer specifying how long to wait before backing off a failure.
backoff yes How long the system backs off before retrying. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds.
## redis redis: addr: localhost:6379 password: asecret db: 0 dialtimeout: 10ms readtimeout: 10ms writetimeout: 10ms pool: maxidle: 16 maxactive: 64 idletimeout: 300s Declare parameters for constructing the redis connections. Registry instances may use the Redis instance for several applications. The current purpose is caching information about immutable blobs. Most of the options below control how the registry connects to redis. You can control the pool's behavior with the [pool](#pool) subsection. It's advisable to configure Redis itself with the **allkeys-lru** eviction policy as the registry does not set an expire value on keys.
Parameter Required Description
addr yes Address (host and port) of redis instance.
password no A password used to authenticate to the redis instance.
db no Selects the db for each connection.
dialtimeout no Timeout for connecting to a redis instance.
readtimeout no Timeout for reading from redis connections.
writetimeout no Timeout for writing to redis connections.
### pool pool: maxidle: 16 maxactive: 64 idletimeout: 300s Configure the behavior of the Redis connection pool.
Parameter Required Description
maxidle no Sets the maximum number of idle connections.
maxactive no sets the maximum number of connections that should be opened before blocking a connection request.
idletimeout no sets the amount time to wait before closing inactive connections.
## health health: storagedriver: enabled: true interval: 10s threshold: 3 file: - file: /path/to/checked/file interval: 10s http: - uri: http://server.to.check/must/return/200 headers: Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] statuscode: 200 timeout: 3s interval: 10s threshold: 3 tcp: - addr: redis-server.domain.com:6379 timeout: 3s interval: 10s threshold: 3 The health option is **optional**. It may contain preferences for a periodic health check on the storage driver's backend storage, and optional periodic checks on local files, HTTP URIs, and/or TCP servers. The results of the health checks are available at /debug/health on the debug HTTP server if the debug HTTP server is enabled (see http section). ### storagedriver storagedriver contains options for a health check on the configured storage driver's backend storage. enabled must be set to true for this health check to be active.
Parameter Required Description
enabled yes "true" to enable the storage driver health check or "false" to disable it.
interval no The length of time to wait between repetitions of the check. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. The default value is 10 seconds if this field is omitted.
threshold no An integer specifying the number of times the check must fail before the check triggers an unhealthy state. If this filed is not specified, a single failure will trigger an unhealthy state.
### file file is a list of paths to be periodically checked for the existence of a file. If a file exists at the given path, the health check will fail. This can be used as a way of bringing a registry out of rotation by creating a file.
Parameter Required Description
file yes The path to check for the existence of a file.
interval no The length of time to wait between repetitions of the check. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. The default value is 10 seconds if this field is omitted.
### http http is a list of HTTP URIs to be periodically checked with HEAD requests. If a HEAD request doesn't complete or returns an unexpected status code, the health check will fail.
Parameter Required Description
uri yes The URI to check.
headers no Static headers to add to each request. Each header's name should be a key underneath headers, and each value is a list of payloads for that header name. Note that values must always be lists.
statuscode no Expected status code from the HTTP URI. Defaults to 200.
timeout no The length of time to wait before timing out the HTTP request. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds.
interval no The length of time to wait between repetitions of the check. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. The default value is 10 seconds if this field is omitted.
threshold no An integer specifying the number of times the check must fail before the check triggers an unhealthy state. If this filed is not specified, a single failure will trigger an unhealthy state.
### tcp tcp is a list of TCP addresses to be periodically checked with connection attempts. The addresses must include port numbers. If a connection attempt fails, the health check will fail.
Parameter Required Description
addr yes The TCP address to connect to, including a port number.
timeout no The length of time to wait before timing out the TCP connection. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds.
interval no The length of time to wait between repetitions of the check. This field takes a positive integer and an optional suffix indicating the unit of time. Possible units are:
  • ns (nanoseconds)
  • us (microseconds)
  • ms (milliseconds)
  • s (seconds)
  • m (minutes)
  • h (hours)
If you omit the suffix, the system interprets the value as nanoseconds. The default value is 10 seconds if this field is omitted.
threshold no An integer specifying the number of times the check must fail before the check triggers an unhealthy state. If this filed is not specified, a single failure will trigger an unhealthy state.
## Proxy proxy: remoteurl: https://registry-1.docker.io username: [username] password: [password] Proxy enables a registry to be configured as a pull through cache to the official Docker Hub. See [mirror](mirror.md) for more information. Pushing to a registry configured as a pull through cache is currently unsupported.
Parameter Required Description
remoteurl yes The URL of the official Docker Hub
username no The username of the Docker Hub account
password no The password for the official Docker Hub account
To enable pulling private repositories (e.g. `batman/robin`) a username and password for user `batman` must be specified. Note: These private repositories will be stored in the proxy cache's storage and relevant measures should be taken to protect access to this. ## Example: Development configuration The following is a simple example you can use for local development: version: 0.1 log: level: debug storage: filesystem: rootdirectory: /var/lib/registry http: addr: localhost:5000 secret: asecretforlocaldevelopment debug: addr: localhost:5001 The above configures the registry instance to run on port `5000`, binding to `localhost`, with the `debug` server enabled. Registry data storage is in the `/var/lib/registry` directory. Logging is in `debug` mode, which is the most verbose. A similar simple configuration is available at [config-example.yml](https://github.com/docker/distribution/blob/master/cmd/registry/config-example.yml). Both are generally useful for local development. ## Example: Middleware configuration This example illustrates how to configure storage middleware in a registry. Middleware allows the registry to serve layers via a content delivery network (CDN). This is useful for reducing requests to the storage layer. Currently, the registry supports [Amazon Cloudfront](http://aws.amazon.com/cloudfront/). You can only use Cloudfront in conjunction with the S3 storage driver.
Parameter Description
name The storage middleware name. Currently cloudfront is an accepted value.
disabled Set to false to easily disable the middleware.
options: A set of key/value options to configure the middleware.
  • baseurl: The Cloudfront base URL.
  • privatekey: The location of your AWS private key on the filesystem.
  • keypairid: The ID of your Cloudfront keypair.
  • duration: The duration in minutes for which the URL is valid. Default is 20.
The following example illustrates these values: middleware: storage: - name: cloudfront disabled: false options: baseurl: http://d111111abcdef8.cloudfront.net privatekey: /path/to/asecret.pem keypairid: asecret duration: 60 >**Note**: Cloudfront keys exist separately to other AWS keys. See >[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) >for more information. distribution-2.3.0/docs/deploying.md000066400000000000000000000231701265472114500175110ustar00rootroot00000000000000 # Deploying a registry server You need to [install Docker version 1.6.0 or newer](https://docs.docker.com/installation/). ## Running on localhost Start your registry: docker run -d -p 5000:5000 --restart=always --name registry registry:2 You can now use it with docker. Get any image from the hub and tag it to point to your registry: docker pull ubuntu && docker tag ubuntu localhost:5000/ubuntu ... then push it to your registry: docker push localhost:5000/ubuntu ... then pull it back from your registry: docker pull localhost:5000/ubuntu To stop your registry, you would: docker stop registry && docker rm -v registry ## Storage By default, your registry data is persisted as a [docker volume](https://docs.docker.com/userguide/dockervolumes/) on the host filesystem. Properly understanding volumes is essential if you want to stick with a local filesystem storage. Specifically, you might want to point your volume location to a specific place in order to more easily access your registry data. To do so you can: docker run -d -p 5000:5000 --restart=always --name registry \ -v `pwd`/data:/var/lib/registry \ registry:2 ### Alternatives You should usually consider using [another storage backend](https://github.com/docker/distribution/blob/master/docs/storagedrivers.md) instead of the local filesystem. Use the [storage configuration options](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) to configure an alternate storage backend. Using one of these will allow you to more easily scale your registry, and leverage your storage redundancy and availability features. ## Running a domain registry While running on `localhost` has its uses, most people want their registry to be more widely available. To do so, the Docker engine requires you to secure it using TLS, which is conceptually very similar to configuring your web server with SSL. ### Get a certificate Assuming that you own the domain `myregistrydomain.com`, and that its DNS record points to the host where you are running your registry, you first need to get a certificate from a CA. Create a `certs` directory: mkdir -p certs Then move and/or rename your crt file to: `certs/domain.crt`, and your key file to: `certs/domain.key`. Make sure you stopped your registry from the previous steps, then start your registry again with TLS enabled: docker run -d -p 5000:5000 --restart=always --name registry \ -v `pwd`/certs:/certs \ -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ registry:2 You should now be able to access your registry from another docker host: docker pull ubuntu docker tag ubuntu myregistrydomain.com:5000/ubuntu docker push myregistrydomain.com:5000/ubuntu docker pull myregistrydomain.com:5000/ubuntu #### Gotcha A certificate issuer may supply you with an *intermediate* certificate. In this case, you must combine your certificate with the intermediate's to form a *certificate bundle*. You can do this using the `cat` command: cat domain.crt intermediate-certificates.pem > certs/domain.crt ### Alternatives While rarely advisable, you may want to use self-signed certificates instead, or use your registry in an insecure fashion. You will find instructions [here](insecure.md). ## Load Balancing Considerations One may want to use a load balancer to distribute load, terminate TLS or provide high availability. While a full load balancing setup is outside the scope of this document, there are a few considerations that can make the process smoother. The most important aspect is that a load balanced cluster of registries must share the same resources. For the current version of the registry, this means the following must be the same: - Storage Driver - HTTP Secret - Redis Cache (if configured) If any of these are different, the registry will have trouble serving requests. As an example, if you're using the filesystem driver, all registry instances must have access to the same filesystem root, which means they should be in the same machine. For other drivers, such as s3 or azure, they should be accessing the same resource, and will likely share an identical configuration. The _HTTP Secret_ coordinates uploads, so also must be the same across instances. Configuring different redis instances will work (at the time of writing), but will not be optimal if the instances are not shared, causing more requests to be directed to the backend. Getting the headers correct is very important. For all responses to any request under the "/v2/" url space, the `Docker-Distribution-API-Version` header should be set to the value "registry/2.0", even for a 4xx response. This header allows the docker engine to quickly resolve authentication realms and fallback to version 1 registries, if necessary. Confirming this is setup correctly can help avoid problems with fallback. In the same train of thought, you must make sure you are properly sending the `X-Forwarded-Proto`, `X-Forwarded-For` and `Host` headers to their "client-side" values. Failure to do so usually makes the registry issue redirects to internal hostnames or downgrading from https to http. A properly secured registry should return 401 when the "/v2/" endpoint is hit without credentials. The response should include a `WWW-Authenticate` challenge, providing guidance on how to authenticate, such as with basic auth or a token service. If the load balancer has health checks, it is recommended to configure it to consider a 401 response as healthy and any other as down. This will secure your registry by ensuring that configuration problems with authentication don't accidentally expose an unprotected registry. If you're using a less sophisticated load balancer, such as Amazon's Elastic Load Balancer, that doesn't allow one to change the healthy response code, health checks can be directed at "/", which will always return a `200 OK` response. ## Restricting access Except for registries running on secure local networks, registries should always implement access restrictions. ### Native basic auth The simplest way to achieve access restriction is through basic authentication (this is very similar to other web servers' basic authentication mechanism). > **Warning**: You **cannot** use authentication with an insecure registry. You have to [configure TLS first](#running-a-domain-registry) for this to work. First create a password file with one entry for the user "testuser", with password "testpassword": mkdir auth docker run --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/htpasswd Make sure you stopped your registry from the previous step, then start it again: docker run -d -p 5000:5000 --restart=always --name registry \ -v `pwd`/auth:/auth \ -e "REGISTRY_AUTH=htpasswd" \ -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \ -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \ -v `pwd`/certs:/certs \ -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ registry:2 You should now be able to: docker login myregistrydomain.com:5000 And then push and pull images as an authenticated user. #### Gotcha Seeing X509 errors is usually a sign you are trying to use self-signed certificates, and failed to [configure your docker daemon properly](insecure.md). ### Alternatives 1. You may want to leverage more advanced basic auth implementations through a proxy design, in front of the registry. You will find examples of such patterns in the [recipes list](recipes.md). 2. Alternatively, the Registry also supports delegated authentication, redirecting users to a specific, trusted token server. That approach requires significantly more investment, and only makes sense if you want to fully configure ACLs and more control over the Registry integration into your global authorization and authentication systems. You will find [background information here](spec/auth/token.md), and [configuration information here](configuration.md#auth). Beware that you will have to implement your own authentication service for this to work, or leverage a third-party implementation. ## Managing with Compose As your registry configuration grows more complex, dealing with it can quickly become tedious. It's highly recommended to use [Docker Compose](https://docs.docker.com/compose/) to facilitate operating your registry. Here is a simple `docker-compose.yml` example that condenses everything explained so far: ``` registry: restart: always image: registry:2 ports: - 5000:5000 environment: REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt REGISTRY_HTTP_TLS_KEY: /certs/domain.key REGISTRY_AUTH: htpasswd REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm volumes: - /path/data:/var/lib/registry - /path/certs:/certs - /path/auth:/auth ``` > **Warning**: replace `/path` by whatever directory that holds your `certs` and `auth` folder from above. You can then start your registry with a simple docker-compose up -d ## Next You will find more specific and advanced informations in the following sections: - [Configuration reference](configuration.md) - [Working with notifications](notifications.md) - [Advanced "recipes"](recipes.md) - [Registry API](spec/api.md) - [Storage driver model](storagedrivers.md) - [Token authentication](spec/auth/token.md) distribution-2.3.0/docs/glossary.md000066400000000000000000000044621265472114500173650ustar00rootroot00000000000000 # Glossary This page contains definitions for distribution related terms.

Blob

A blob is any kind of content that is stored by a Registry under a content-addressable identifier (a "digest").

Layers are a good example of "blobs".

Image

An image is a named set of immutable data from which a Docker container can be created.

An image is represented by a json file called a manifest, and is conceptually a set of layers. Image names indicate the location where they can be pulled from and pushed to, as they usually start with a registry domain name and port.

Layer

A layer is a tar archive bundling partial content from a filesystem.

Layers from an image are usually extracted in order on top of each other to make up a root filesystem from which containers run out.

Manifest

A manifest is the JSON representation of an image.

Namespace

A namespace is a collection of repositories with a common name prefix.

The namespace with an empty prefix is considered the Global Namespace.

Registry

A registry is a service that let you store and deliver images.

Repository

A repository is a set of data containing all versions of a given image.

Scope

A scope is the portion of a namespace onto which a given authorization token is granted.

Tag

A tag is conceptually a "version" of a named image.

Example: `docker pull myimage:latest` instructs docker to pull the image "myimage" in version "latest".

distribution-2.3.0/docs/help.md000066400000000000000000000015711265472114500164500ustar00rootroot00000000000000 # Getting help If you need help, or just want to chat, you can reach us: - on irc: `#docker-distribution` on freenode - on the [mailing list](https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution) (mail at ) If you want to report a bug: - be sure to first read about [how to contribute](https://github.com/docker/distribution/blob/master/CONTRIBUTING.md) - you can then do so on the [GitHub project bugtracker](https://github.com/docker/distribution/issues) You can also find out more about the Docker's project [Getting Help resources](https://docs.docker.com/opensource/get-help/). distribution-2.3.0/docs/images/000077500000000000000000000000001265472114500164375ustar00rootroot00000000000000distribution-2.3.0/docs/images/notifications.gliffy000066400000000000000000001340471265472114500225230ustar00rootroot00000000000000{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":737,"height":630,"nodeIndex":171,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":true,"drawingGuidesOn":true,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":290,"y":83},"max":{"x":736.5,"y":630}},"objects":[{"x":699.0,"y":246.0,"rotation":0.0,"id":166,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-30.0,-12.0],[-30.0,59.5],[33.0,59.5],[33.0,131.0]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":632.0,"y":243.0,"rotation":0.0,"id":165,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-25.0,-11.0],[-25.0,64.5],[-88.0,64.5],[-88.0,140.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[]},{"x":512.0,"y":203.0,"rotation":0.0,"id":161,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":1,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-19.0,-3.0],[79.12746812182615,-3.0]],"lockSegments":{},"ortho":false}},"linkMap":[],"children":[]},{"x":589.9999999999999,"y":167.5,"rotation":0.0,"id":143,"width":101.11111111111111,"height":65.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":0.722222222222222,"y":0.0,"rotation":0.0,"id":144,"width":99.66666666666663,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Broadcaster

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":290.0,"y":105.0,"rotation":0.0,"id":160,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":26,"lockAspectRatio":false,"lockShape":false,"children":[{"x":12.92581625076238,"y":17.018834253729665,"rotation":0.0,"id":155,"width":189.57418374923762,"height":151.48116574627034,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":25,"lockAspectRatio":false,"lockShape":false,"children":[{"x":97.57418374923762,"y":58.481165746270335,"rotation":90.0,"id":151,"width":149.0,"height":37.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":153,"magnitude":1},{"id":154,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":152,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":151,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":151,"magnitude":1},{"id":154,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":153,"width":149.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":151,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Listener

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":67.5,"y":1.0,"rotation":0.0,"id":154,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":152,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":10.074195639419855,"y":17.481165746270335,"rotation":0.0,"id":150,"width":120.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":20,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":80.5,"rotation":0.0,"id":133,"width":117.0,"height":38.5,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":16,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":135,"magnitude":1},{"id":136,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":134,"width":117.0,"height":30.5,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":133,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":133,"magnitude":1},{"id":136,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":135,"width":117.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":133,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

handler

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":51.5,"y":1.0,"rotation":0.0,"id":136,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":134,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":129,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":12,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":131,"magnitude":1},{"id":132,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":130,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":129,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":129,"magnitude":1},{"id":132,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":131,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":129,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

repository

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":132,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":130,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":125,"width":120.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":127,"magnitude":1},{"id":128,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":126,"width":120.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":125,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":125,"magnitude":1},{"id":128,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":127,"width":120.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":125,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

request

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":53.0,"y":31.0,"rotation":0.0,"id":128,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":126,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.5154455517800614,"y":0.5154455517799761,"rotation":90.39513704250749,"id":145,"width":150.0,"height":150.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_bottom","order":4,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":147,"magnitude":1},{"id":148,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":8.0,"rotation":0.0,"id":146,"width":150.0,"height":142.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":145,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":8.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":145,"magnitude":1},{"id":148,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":147,"width":150.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":145,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":10,"paddingBottom":8,"paddingLeft":10,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":68.0,"y":0.9999999999999432,"rotation":0.0,"id":148,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":146,"px":0.5,"py":0.0,"xOffset":-7.0,"yOffset":-7.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_bottom","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":156,"width":210.0,"height":190.0,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":159,"width":206.0,"height":16.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Registry instance

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":473.0,"y":525.0,"rotation":0.0,"id":115,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":69,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":68,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":109,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[2.0,4.5],[2.0,11.533649282003012],[2.0,18.567298564006137],[2.0,25.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":665.0,"y":530.0,"rotation":0.0,"id":114,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":68,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":100,"py":1.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":112,"py":0.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-2.0,-0.5],[-2.0,6.533649282003012],[-2.0,13.567298564006137],[-2.0,20.60094784600915]],"lockSegments":{},"ortho":true}},"linkMap":[]},{"x":598.0,"y":550.0,"rotation":0.0,"id":112,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":113,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_N

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":420.0,"y":550.0,"rotation":0.0,"id":109,"width":120.0,"height":80.0,"uid":"com.gliffy.shape.network.network_v3.home.cloud","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cloud.network_v3","strokeWidth":2.0,"strokeColor":"#000000","fillColor":"#000000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":111,"width":116.00000000000001,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Remote

Endpoint_1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}}}]},{"x":540.0,"y":438.5,"rotation":0.0,"id":104,"width":50.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":63,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

. . .

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[]},{"x":410.0,"y":379.5,"rotation":0.0,"id":103,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":84,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":45,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":80,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":41,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":82,"magnitude":1},{"id":83,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":81,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":80,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":80,"magnitude":1},{"id":83,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":82,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":80,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":83,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":81,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":76,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":37,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":78,"magnitude":1},{"id":79,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":77,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":76,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":76,"magnitude":1},{"id":79,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":78,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":76,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":79,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":77,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":72,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":33,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":74,"magnitude":1},{"id":75,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":73,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":72,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":72,"magnitude":1},{"id":75,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":74,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":72,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":75,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":73,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":68,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":71,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_1

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]},{"x":598.0,"y":379.5,"rotation":0.0,"id":102,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":15.0,"y":20.0,"rotation":0.0,"id":87,"width":100.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":60,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":78.0,"rotation":0.0,"id":88,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":56,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":90,"magnitude":1},{"id":91,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":89,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":88,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":88,"magnitude":1},{"id":91,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":90,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":88,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

http

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":91,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":89,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":39.0,"rotation":0.0,"id":92,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":94,"magnitude":1},{"id":95,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":93,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":92,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":92,"magnitude":1},{"id":95,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":94,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":92,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

retry

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":95,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":93,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":96,"width":100.0,"height":40.0,"uid":"com.gliffy.shape.ui.ui_v3.containers_content.popover_top","order":48,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"MinWidthConstraint","MinWidthConstraint":{"width":100}},{"type":"HeightConstraint","HeightConstraint":{"isMin":true,"heightInfo":[{"id":98,"magnitude":1},{"id":99,"magnitude":1}],"minHeight":0.0,"growParent":false,"padding":0.0}}]},"linkMap":[],"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":97,"width":100.0,"height":32.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"PositionConstraint","PositionConstraint":{"nodeId":96,"px":0.0,"py":0.0,"xOffset":0.0,"yOffset":0.0}},{"type":"HeightConstraint","HeightConstraint":{"isMin":false,"heightInfo":[{"id":96,"magnitude":1},{"id":99,"magnitude":-1}],"minHeight":0.0,"growParent":false,"padding":0.0}},{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":98,"width":100.0,"height":29.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"WidthConstraint","WidthConstraint":{"isMin":false,"widthInfo":[{"id":96,"magnitude":1}],"minWidth":0.0,"growParent":false,"padding":0.0}}]},"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

queue

","tid":null,"valign":"top","vposition":"none","hposition":"none"}}}]},{"x":43.0,"y":31.0,"rotation":0.0,"id":99,"width":15.0,"height":8.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[{"type":"ConstWidthConstraint","ConstWidthConstraint":{"width":15}},{"type":"ConstHeightConstraint","ConstHeightConstraint":{"height":8}},{"type":"PositionConstraint","PositionConstraint":{"nodeId":97,"px":0.5,"py":1.0,"xOffset":-7.0,"yOffset":-1.0}}]},"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.ui.ui_v3.containers_content.popover_top","strokeWidth":2.0,"strokeColor":"#BBBBBB","fillColor":"#FFFFFF","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"children":[]}]}]},{"x":0.0,"y":0.0,"rotation":0.0,"id":100,"width":130.0,"height":150.0,"uid":"com.gliffy.shape.sitemap.sitemap_v1.default.download","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.download.sitemap_v1","strokeWidth":2.0,"strokeColor":"#666666","fillColor":"#ffffff","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.0,"y":0.0,"rotation":0.0,"id":101,"width":126.0,"height":14.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Endpoint_N

","tid":null,"valign":"middle","vposition":"above","hposition":"none"}}}]}]}],"shapeStyles":{"com.gliffy.shape.sitemap.sitemap_v1.default":{"fill":"#ffffff","stroke":"#666666","strokeWidth":2},"com.gliffy.shape.network.network_v3.home":{"fill":"#000000"},"com.gliffy.shape.network.network_v3.business":{"fill":"#003366"},"com.gliffy.shape.basic.basic_v1.default":{"fill":"#FFFFFF","stroke":"#434343","strokeWidth":2}},"lineStyles":{"global":{"endArrow":1}},"textStyles":{"global":{"size":"14px"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.sitemap.sitemap_v2","com.gliffy.libraries.sitemap.sitemap_v1.default","com.gliffy.libraries.ui.ui_v3.containers_content","com.gliffy.libraries.table.table_v2.default","com.gliffy.libraries.ui.ui_v3.navigation","com.gliffy.libraries.ui.ui_v3.forms_controls","com.gliffy.libraries.ui.ui_v3.icon_symbols","com.gliffy.libraries.ui.ui_v2.forms_components","com.gliffy.libraries.ui.ui_v2.content","com.gliffy.libraries.ui.ui_v2.miscellaneous","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack","com.gliffy.libraries.uml.uml_v2.class","com.gliffy.libraries.uml.uml_v2.sequence","com.gliffy.libraries.uml.uml_v2.activity","com.gliffy.libraries.uml.uml_v2.state_machine","com.gliffy.libraries.uml.uml_v2.deployment","com.gliffy.libraries.uml.uml_v2.use_case","com.gliffy.libraries.erd.erd_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.uml.uml_v2.component","com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.images"]},"embeddedResources":{"index":0,"resources":[]}}distribution-2.3.0/docs/images/notifications.png000066400000000000000000001117141265472114500220230ustar00rootroot00000000000000PNG  IHDRH_ZsRGB@IDATx UAiMnѠAѣLy!=%zB!#3 "DQҠgu;g{ϰ9{]uY{߽veHHH Jf< (! GP=R PHHH#(fHH(l$@$@$eTA$@$@e  G*   2 xE#l EmHHxHHrtE֭[ۺut%\ XL4IN@ǎ0[0{Lٳ'| q[=o=oVW]~}-ɿ5b0aBk:Ъ_37.(c9&_^}UZ~IQF 8P_|Eرcꫯ4^pFiͺ)cѢEHH!W #T!??Xh V/RjBPA!-9D׸-[7#N2 Š s3`&(G\bh5]#kV2IyDΘ#GgDy<[Çka71cF7>p3֭jNHHYƋ޽{˵^+6lEM , "XcY7` X yjKu~XO`'5kʩ*W^y׬Y>-QoAc(n0Z`HXȅa ۷o/%Kgʗ/bڭ nt ֭`dVˇ~(_̝;W>S%\q |zq0ڵk)P`ct?~."5B! HgD9(A`bsK5ҫW/۵nRVhذ+S~?FͱFZVoSw!@]v ճ4^1}5 ]d㈅l n޸.貆_~2nwΉc: D0h5Y8묳- RJay  xJM/ũVXC*X؄]k8[z .޲抵k job AAO Ƣ JoQ?fD i&pM5ībg赚/ܓ1'O:j6mT!0e˖o5glEY=GAG2@ 1: c^yټy97 <:|-~:̌fFo+R=>[T^v/^CֆxCl1^@̱Z4%W/!`+mFǁb2WI~IH#r|-kg+kcB],Ɯta]āU91BGXe% 5jH-v  gxzN9"bwԨQ:'2{ы<t0ai1.  !ܩ"HH WdUO9W*$ NWHH(>4fHH ; ONb, d6!~)g$@$@>!ZOٔwI qqJC cIHE9! 3ky' *嬪NHH(~=HH P:Y  ?(w  "@QΪdaHHLcIHE9! 3ky' *嬪NHH(~=HH P:Y  ?pՍ?_~֭[⠃ʕ+'mժUs;p)W/_+a>HH cse! O>=a16~ƍ^ˏ'8 `ԬYS6l hHrDyҥ*ȸ6nX/ D9u"'k >sNٵkWRAYJP7nhFI6r$z&H+ωu@- aZ)׆ɮ!WaƔzٳ6oӦ {U IslzL]uC׬Y3Ye#ȸ[kb _z?l׮]4C`ҥ꘢=ʒ G Лa/Ӭ:J H_y7 qHH |)ʙEIHHew2V  HE9ad @$@$@(Õ @( #c  pE #3G$@HcsNeԩ9VrHO:r4xDRLGed$@$@6۽+Cf͚Ʉ UV]ƌ#7o\*[`GXwq?K=i[g{1c.+, ޽`NHR%kQƪ[.IYLjF0*iStii>*U*a$@9@ޱYGhl$1+z5uaa/+K(|Y (c{Mlm!viuQN$K$@`ي1UP yyy;LݻW}) /E#c&`q&K>h_g6 ZU8?MߏN:Il"?}|$*H%@QNe=scYfGkܹsL?ew}'>"4"/ a;Nz]?xM Efl23g>Ch"eÆ >-wn:Ypqv:cH8ѣ{2cfӜfϞ=$D(2yd8dȐ!zӳg\D2 L`4tPc1&LVZW_󎯼 [DG_A/N2ES n ̍ 0jU:S677naKgرCGpn~:VHFVSv7.b(sԨQI@GC˘Ņ}:O w,°5OuX=>-chbc z Xv10`MS^s7H5y@#Mw^_ Xy$sGNbS>@0`=Iv9` g+냵$phxGACjF R7k:`=q[Zיuxg֚uCuC ~p-@~5E:5ݥ.]c=ׯ穧 X&X}n<^;_?36Gaj5Xw'+@uJ@!UEdX[Q~c=) U0zxhyzܮW^:C{zt?Fpl </ڦ%2z;Ma akob'EͨK'D@=eL s(£!xw%>Ӻ  <[(iY01׋ ."tcǎ:_%'"p#بÓ@byĔnhKC&Q9LC: Gz. A0@̾VVو1-u-7X=3}'C/qi\7̃zi,<7 Pө4eJ;lGi#@QSJpWڿp+0v+/a!Ly|J=|ga%mdˌ  .4.Y!63cqӜL/X)ub;Vڢ3 0I YC:" Xm ?Y5T/Fs1h|ܹsuQ) 7 \ŊuT~T z͊7|S?x#,M(Opÿ]*Yԃ>=dЋHgKS#O(16HzӃ06jod!X D/]̈́{UWAw/Cּ /F/C0چ7/g( u2p :~<E96Ǽ1.bkzKˉ9J<(J;Ym37|9$MX`?zF{8@oƤ;zB5mu{` k׆EOq.1;Bp';0|L6MrrCdN)uI'M7*=bM S5N,G/[ba؅bATg1, Shg_.bQ5n,;!hxx7vHJc0gpb/#K,0`nƍG3/Fx 2ahz7tgCʆEbfi'@Q}z+SܹsԸ'+lBA#̈c|4Ç|[ Vu,̂1=Lر<{3w-o\iA/i+[<Ǵq$pN,tћDOBlSFx 7Lƾwq7^lpr>obsp7 +y5e w$:S=e*n;S<63zŐ!?shsH30t/Џw@ wbL&Ʀ]\v 2) &D9lcn=jSժOl0egdl$@$`({k 톂l(1xby{/o HE9U.0c Aa(Ȋ?d,koѯs=CLH 'zN9kl!l|0Sx V]cS~vv쐄b!  P&p|f0Ǫl pe,x샍pH @QvqE0evn}:|m sxT c%6 8I$MQ͚5ӗ-' -^r4/u# @(qʌG?a9$RP {^WH \}EpyNfK18 ^lYH#rbi{f9mp@BXuwp'HH );A8^l9L̰ 0lFG$@"@QPe +9s7zf5zfUC$@(; 4(:eT2< xEC-41lvw0G1 d>*zRe)JMB//pB}Qq7F͆  S|+d3Lƃ(߯Clbf,@ cY& DVCƅ)aQ n2S"0cG񳐡xڸqc?W%N$@I2zU0 (DC 0Bl5QO$@@=e`YfCek l-'mxgvfԩ}Fp$ e 8M Q:tfgذaNgrU6J$>ٳgG'/ڲeZJ3R~}RJ3P}^> @R'Jd0k,/5^z׆jvۅ^]-, xMŒ4(ΐ1 #(ʎ`,d„ 2|p+V<'8c$ H+);~'׷!xh̙E4$@$@&ef%92:ty9#  ?`Oمڽ{3fh^xuYjرCNnݺɞ={76jHߧEիWnMV&Nub@ cƌ?X{͛7J#ɓN:wF9  `Oa۷O-[&\s@G-N|C;C:w,ƍg}fo]vr!/b{tIΝ3o2\{?]w%ڵL"gq uHxHH #A*UҘ^z ȑ#?,>! 6֢{l汥;eȐ!*|aQ ѦL2*rn2p@&^ap 3Ɯ9w 7荀RW! UV9\y'~r. vEvlaqZx| VmذAr!*88C$@$94W z6 ]؆=#7?EHH8aHHrE9*E% 6뇹# !lHH(ޮHH PsYT  o( :Gz-?IH  VU9gcc+L:kQ~F$@J4lPwU͚5\?'+ *ߊ2zT-Zŋ\s"^n [7mڔ=eW$O$ߊ2J]n]U5AFcQdq>9I<@ע ~chM @J|+R30 xEك, &rn;KM$@$AeV D$@$(ʹY,5  P=X) @n(f$@$@$@Q`0K$@$@IS\\9Ơܬ:|KX"H<% Asᶙ^o9X1ml<)&Ӥ]\N$aL13Ɠ$@$`(C0ڵk'xud׮] W^Lazr & $c^~z8qĭ&_c3k7C (fd D9CC\o`-7.:$w0iFfϞoCm|"M (D ,ܜMa9IN\{z k7q%HFS!  t(6"  "PC'  H'r:i3-  (E8t"  t(6"  "PC'  H'r:i3-  (E8t"  t(6"  "PC'  H'r:i3-  (E8t"  t(6"  "PC'  H'r:i3-  (E8t"  t(6"  "T#N~#0  v)g{ |$@$@!ZOyܸq Z`HHV*lڴI?>,LQ\0C$@~#k.rr[NgժUI)2Ve6 H5C=4Řڵk nJVX޲GQv" mfRm۶)h޼ԯ__~1r Å(; ё oF kB/˼y>3j{SXHrD>;usk׮_}Y?3X8QR-DDHrRn#v?Hɒ%uW2ed2m4_޺uݻW tx%)} /˃>/Jǎ^roo)ӧOJ*|3VgרQCf1(&]M$5+]V.\(GAtnvkd۶mkĉk^hlذAWeoƈ25'|~%e|Fb3zdQ0<СK.Sc3X}v7r5N7xe]|ź~b(;M ?^{xTƈ2hkE|C~Dle.ځI&_7cvPɸH.fŜ)ST<+ thʖ-d#c9g,XCo^zKNҥ.ds*^HT P..IXAުU+];_]>c9sFd1. 5V$Cw޾:I e/a'3'ED1 @Ѓ:tcԀ`%1$ `]vG}۩)Nd<$@8 z9@8w}cDO8e(Nd<$@yzgu^YQ)CQv$!% 1d;A/=lm|J_`. d3J9 հl$ffc̷ѐ $N83   WP]HIHH qę1 B VFJ$@$@('Ό!HHHeW2R  HE9qf A$@$@( @(ʉ3c  pEHH'@QNC +(ʮ`e$@$@$8rHH\!@Qv+#%  Pg$@$@$ +X) $N83   WP]HIHH qę1 B VFJ$@$@('Ό!HHHeW2R  HE9qf A$@$@( @(ʉ3c  pEHH'@QNC +(ʮ`e$@$@$8rHH\!@Qv+#%  Pg$@$@$ +X) $N83   WP]HIHH qę1 B VFJ$@$@('Ό!HHHeW2R  HE9qf A$@$@( @(ʉ3c  pEHH'@QNC +(ʮ`e$@$@$8rHH\!@Qv+#M7wyGJ(qΓǝ]vIfdƍq{D ~?H ⊷H/ZH}@ a=ܓW\!*I+7|\ti)Y2{RJ&c9蠃7ސ+|ر~z93~A9bCn>o<ė_~YnViذ\R.]œ 5*h%pF$??_?C2=OK6mN|Сo>u[dr)zi&=_[AO{Rz7޽[/_._~<#Ns C ? .c9FF!eʔ)lmB܀lݺU0" ~`S΅Z΁2B&L M6={h!{ԭ[W!_}g~lѢңG9ru]aZÇ+"ݻ znM>cUSNg'i m|^}U\`XG;Sz-˂am۶ 0 ֏,ACѳ6l~%~XtMRB͛رS{mx~T?Ciٲ~I' .&jժiϸ[nRlY^zE>CvK/$}]D(GG8Ƀdk[:~IՓK/Tps0y6ZӦMȂeĉ +4 }SE^%b-3;v쐶m9d˕+n˗U=.] `sz;Ƃ._ @IDATc*U7PqYDms /ʉG\XC4ֶu4na."9=#w(g?@1kE+c:2YLCmXAm3"ڵk =l0q)؇G7gcL`dO>:*^Ovs3ƒaXaUaϘ1#M&zģFo C9icKo͂9] 1dդIy'u +CvW W9s syE [)Ss"Ic0|+׹RY^N,¢,|sXdX9~* իEcׯ/;wa5|4H )|Pb!Ƹs#йj6e0&;llX3QƓf}rGBB SEΕ$و&&,>tH V\bIGul _s5Zg}bJ`[Oe8^=|N0G ?d1w'j&}:x9P." 6K)z Zf? @[BTf2ޖA.lEyۻG*"Hg{gO9mIP+ʃdsŕB\!s6 ۻ?^ne)O촄-Fld2{u4{_VbiI e؆o߾OfoFA.#@;E9KH(BEAv9# twr+I v)~U0jS.xHXPwSaF ʺBCHG{(lbIYa1 Y{je5sD$ǣxlo!:31nwrƪ @vw= Ґ@!j\Up&  t(8#  BP $@$@$ntgz$@$@$Pr!`xHHMnLHH !@Q. O @ 9tgz$%~,) AHW{(_A$@fb)NjR {>l)7n[fK|Uʕ+Kj䠃xHGűriXxEWŋ .T4#Anڴԭ[7sȁ޽Q~heoz ,2M L]/_Rƈ%qDNQ2YFظqc&s.]*NxsN=5X޹+eaÆ)HL#c&`زGI;E9"S4\`akS'Qe{|S3N  Pe6  G*   2 xE#l EmHH#)Qwq?K= PfHlEq͑ZFp(,T8PdW(Ϝ)`37̃a˗ˎ;j׮u .z:VJ~iXs@=l UڿLscu0{cH~hDj~!ޱY(>4'z0uQXԁaaO~ixj~%`H5nX쪆ꄢN=5X޹+e[;ёyyyG"e$ [,9C{(givf;4%akS'Mvnزg~)ʙo' (2 xE#l EmHHseCI==묳モ[nEcƌWD."kii/{l2žr&3mOضm]VcQ!8p@,z-ݺuI&3<ѠA)Ur)}͛7-[oWTl[ k5k֨340mSlst*$E)'+uaiY^#G|={HJT{X=ӴWn:.Wߊe mU"eosAw\aQF:Ex!Rl1? !~d…yf wӐ {V8z`.}k׮Xa651PlY/zxo2PfHlEq͑ZFp(,T8PdW(Ϝ)`37̃a˗ˎ;ɵkp WKlc+%wr9 f6rA*_uvN&C:=lM$f?wr"5J@,BO @=(w0Fl0'Vwr?t۶mꫯ#GڵkSB3ge?s>}t= ֭['UV믿y^csY3Ai.3(!H Xʹ|M=|g}&/2Ͱ5.fO>ؾ.<3s3Spg&\#Fz} +E;^1}Ҿ}{e:/miG@ xۻ* E[@Ŋ#rZR%K6v¦"yyy~x@~%(5Suԯ_?J"#lζ<[0w^rKݩmv+c%  PF$@$@$;\+ $L02   wPXIHH a儑1@ /yC1|L5cblJy?wrS V~205khLd8;YaC{sʙo'9 K.5kzv*_1̮^Zwڽ{w"ۀnݺUBиCNQNf?e9hB/^`!]\rҡCzϟ/3{q9wEզM% CE(:uC{(M0uJZdƍޚW]vӥ]v %K Fʖ-U\Yyz.Y!W_;E9m%gs1`a:??_zn6m 7#(`{gzC~r.r7,X 9&UE9s왲k+z5-gIH<9R]Cˈ.TXjg b,# r3E al|09dW.z=eW2l% U^^rl)Ϙ)d)-[j !gi%XalaZ(ʮeNYf0FCNB<29+E Õ+X) $N83   WP]HIHH qę1 B VFJ$@$@('Ό!HHHeW2R  H7ICD3fL@`{޺B(^ᣏ>y }{3qę1D@.]rźudܹZ֭[KZrE=mYƹ瞛%+XǏ^{M=vU:vX| 5wt4`.JeA$@$@q(^HHH (4HHH 8 E9 A$z!  t(2   8PD/$@$@$tPf$@$@$rHHALHH @Q @:PAi @(q@  Hr:(3   E9HB$@$@ @QNeA$@$@q(^HHH (4HHH % ,_\Əg:v(i&={5kc7o 6mHժUh'Վw%K, 6Dކag;E9)l DAߗ_~Y7Fm۶ [^vHtP/ r|}c qӦͤI&W7OI< gk֬| I~pѻwoׯ{dxc'(:Ey1K "6h y饗%l֬tr4mTnk6HYBApN' ~mn*&LY3gʤIe۶mt s u$-$$1E9? `n qݺuGˤ)Hݼ!>Px- r@:h >c?9 oFٌ2kWA\(Džrs]phnzr?;_V;6ᵋ. wOG_Ⱥv@n}T\)BWHeP\ z(1x>~G /0P8G=5CaΡgQَAoZƃbH ".d<2e8B_!z K/mJE9;LL6MVX!]v' )=.:SNU!nX >@v?0[ :E1d+4r8a =::ARZ5ndjvT`=GW_G @(zs.@9^O'|Ju94 {,ç;y8Pê'pf_ g m|Xya믿ֲx1Z.qq\!gl_1 4HV-J*X[dy2e]{-j ߨQ#ٿ+\^<6x=.+W,;Ω[PDX4 Ņ A{Mt~/>E-[.?-%vvh ('J]%v7,;w?/e888LC5\0UK5a,H!v@ƒa!vǶÏմ5>: 0ȏGv\셜fJ۶mu3lIJ'q|-h_!f|~ '-B@xBafD4,vA5aH .$ad!-&/b FqG㐟hS7D"J[ǗnyxcڵA"6hseҥ3/lHcx1؎扖enA6l0v_E9"Jy C? tQП7B;l/G3qj8+]B=îy dQ`6mBZ)б&Ag"Ss>ӦNui" [x!@rHjR^}a?mG\a9f;,YG݂vIG FAp8B8.l_2eggDÏ82[]駞 <֭O= ]ze4OF6mY𯿡8H C%+GCvcqTg/B!1ѐI6&=@{E7 #jǡaZ}vN ?P"⹎[则ɢ*ao4*<J=k/O[ Mpw8ɓ7Pf2;v&MH9ԭK8٦ZRTbLJ9]vָ&'$4kvڹsDd"uVzWhƍ;?Ҟ==g+f"',@a0-+1|DWF*wNYX|{U\U쯌8L)s%a$Y;ՠAjѲ;Ɣѝ|6EfܥlFv9ԺukqЮ}{NBvoOKc&Y>|dV`ɲ藿lMUUeч Y>@r98d ">i֬2\ZjbMHid:\2zT)E AU8+Ñ!Ƶ1 #;]k]Ŋq/u.hC'̒hR,~PٳK*UB_&t>t]h';gvalf|`o& \LiH _>OB;vEx8;T;./@rSрCyѢh 4dF2sa؃F$4Np0L\?0z*z ]=z_N&i :fE2`]Bt3KxLv/[K~ҍޠ|b0.zȺ]ޝ&LOx6l([`a2j Bd)-EWIn"La-(ͤ!ՎC1/_d>u%GR `K(ϳKf ?,?3 zK 8G=& Ƥ^)# 'WQ^vu>X2 siVHDIo\d/${$"ѧ囓ɢ<^Kˎ/_6iZ;A(8{~a0a'-ֆc"" Gkr59r[}n|: 4<0{{^&+<-6s]oŮOO@#I{.w:Kłƻ,䛀bcƎsEwblx6խ[Gnܛ6m`nhĉƆՍϻı |MԨ!lx ^&v̘GT>l4z[efl9b8mܴQ`ײ ϟ?1ƀ«Ae"C빅 ztѾ,`r;Vl@oWZ6y j^fT^p¨p!~d'TJqc>*\?4}41mrRXNbJY~ qWcyCe#o~8[s+ m۶LlQٿ_?A g+v,Oæ"C(pr=_ bcoƍS^nM`"_] BOY%:unݮɬ]Kڎ7m{va+ FNlüg;v|6mCil7LlU gq-艹'm뮻u=rw8qx~|F]LΆ<XlD6cRa)3Aw䫯6"L`qM剒3׎=U54 {NN]}<ۗ b QqklC2%QJEd˕Ə@_W4Y&ѳi_s.]`voܸe[9OV<.x /ҹ;ZN;5]#G_ W oƌ|Fx'KdgKY 6~ҔkJO=-)X}0Qi4i 9sׯ/'{-);{rg(sԼy ymH|maF2qc?/]l'Rp&O~*l "Cms$);|L6mp bOGqߏzSDs?!\NҵE찳O c ƌ\Zr%wmtۭho/i-?RGDDkmR>X[5+PH$☝7ϼ5Mb;ekz飏ާ~k/#uH<+ԅȞ={iMԩS'Cx6N2>:بt89X:1B?p:s8SqG;vkW!d讬8-"TDX|h8"e/<͝+/nIDAT,'{^[5x+);n,]gðKx%1Wrh u 5R"6|cǏކa5k֊gm8?M<Vg$,+6 ̜C~,"0{"Q3Hb3Bo ƧʷصOMC1+nF (@lꔣPkm۶2駟Nqxm~9E$z$?Z01lw%sylv25pټy3ߣ:Zdc"̒|l*0 aƾ*U yKͷ/m&t6Unqb,c,Tq.km:{K̎9(- ^frFlؒwن0c0jÆ q*h%(N3`cWXj's͚/ "E< XP֒G%vT O [+VcxXn=.Ⱦ{C{V_&vOD\TJݻLIdsZ4baxFl5Œlry&tݣsk3]LE4x2 StgXhV:rp(db?Re[$CD@j7s#0(|ģ- ( . J+cJx)O?^zi3P= /'AJ*67`J$ !njh@cA'M6|Ϯn6 וKhMh% ɒ;{E1|YYBgŎ\OKр9d Ö_@>0- Ci~4Z\a0.QgjFyhؠez SrZ 5;Kh85#3>w]ڂ [!LVd=zPlTYc1ܖyeMab6N<309L _\;Fk:'ot]<ٱNT?Q%0EEҎ)GPj똱̨(M~M)E$|I@< guq&E sJYo ;Mj \]ѱiG32*@ION$y#_8dt[k(2ŎaÉDB¼n1)DpIb> CXq.oZu ˕WX)~Zpk)K{8p7wT9 dX@ҙ4ß 3 8l,^i!]LxRSF>`#Xoʵh4qٱ lxLZP| þ%ۮ."6rdR.wѠE6Q+wv%4,E@̕O$=BrA2WօmƑ/;bvPvw~ ,0InM$L%%X{Yr_s/bQ!wŸ +^q0J_&$98_ʋ+=++ 8|jȨ\!VjAp6m?m InjǬDk0Q[[3<ٱ:TDi}cRX[Y`?m!;7nHWZ?Ba&D aKÒpXATΤr%8`F/3dk+bh˖-3jQrsADՎC˴0ԎSmMrNAH8:ⴘ?Z!:;4Si5$U&2BF:%@VJ99@pZ a'袆q(Hy?)Jv靰׫jL`ʣSơ@&jUf/KW\qeh>mFp-oyn|Lԩ/YlcbXk"-2 +@:'(sWjA|Y CD"! ?kJՎ]dkp::_hB&h[mfggKQp|ٲee%3?eCy!wńCM`G#T|J hQĒED l|?C9 GF1m$P; ,R[iF1d>4bHe3Hj#˯/u8K/Oi&c;&xac3$|2OjZDǢQ!e5e0-ciWvL~MT;ۻqz8C$Ū2Cp,8bns{䤦J^ac. !vΜ9LժUԵkW9I#FAqeDGh]dWN CĆ^f%?x8рڱ{v%ja^X=5#QtW$栭w:u*oN֑DBX.][x3$KX)[8' I҅Mܑ}d~ݻ8<i_<奔#D)Z'*4pA:?k|Nq̠[3}PG͛7O ڱq~N9?(>5n@LLp84}Ѣ9n?5 N/p̣05j0 eZ  ?m% * EgJ"XY3rkK^2VjjƩN9Y#?`4'*6ץh#tt87Khjߡ;Cx\Spj8KM[$ Ym4v#42:dQyQ;V; Wr X bC3S%N+#:f (fZp4tE0A{uIW<ʨ[dA%SG*鹮:uԢeKxbUpAdf<F'vkiĉ4s曢tFGO8ߞ>ӀڱDf)geЂ䯰we1|D-u!,rn:0q<|8cd2Sc>-Fh@8C^DP\r4I5L.5jT q=zR-s-ysiƛ3h>]hӦ8c@Р(ԎS\:2 `eYxF$]vt)6͵{scB}u5v:, KC*\hctd3ϤRegistry instanceBroadcaster requestrepositoryhandlerListenerEndpoint_1queueretryhttpEndpoint_Nqueueretryhttp. . .RemoteEndpoint_1RemoteEndpoint_Ndistribution-2.3.0/docs/index.md000066400000000000000000000040001265472114500166150ustar00rootroot00000000000000 # Docker Registry ## What it is The Registry is a stateless, highly scalable server side application that stores and lets you distribute Docker images. The Registry is open-source, under the permissive [Apache license](http://en.wikipedia.org/wiki/Apache_License). ## Why use it You should use the Registry if you want to: * tightly control where your images are being stored * fully own your images distribution pipeline * integrate image storage and distribution tightly into your in-house development workflow ## Alternatives Users looking for a zero maintenance, ready-to-go solution are encouraged to head-over to the [Docker Hub](https://hub.docker.com), which provides a free-to-use, hosted Registry, plus additional features (organization accounts, automated builds, and more). Users looking for a commercially supported version of the Registry should look into [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/). ## Requirements The Registry is compatible with Docker engine **version 1.6.0 or higher**. If you really need to work with older Docker versions, you should look into the [old python registry](https://github.com/docker/docker-registry). ## TL;DR Start your registry docker run -d -p 5000:5000 --name registry registry:2 Pull (or build) some image from the hub docker pull ubuntu Tag the image so that it points to your registry docker tag ubuntu localhost:5000/myfirstimage Push it docker push localhost:5000/myfirstimage Pull it back docker pull localhost:5000/myfirstimage Now stop your registry and remove all data docker stop registry && docker rm -v registry ## Next You should now read the [detailed introduction about the registry](introduction.md), or jump directly to [deployment instructions](deploying.md). distribution-2.3.0/docs/insecure.md000066400000000000000000000072421265472114500173360ustar00rootroot00000000000000 # Insecure Registry While it's highly recommended to secure your registry using a TLS certificate issued by a known CA, you may alternatively decide to use self-signed certificates, or even use your registry over plain http. You have to understand the downsides in doing so, and the extra burden in configuration. ## Deploying a plain HTTP registry > **Warning**: it's not possible to use an insecure registry with basic authentication This basically tells Docker to entirely disregard security for your registry. 1. edit the file `/etc/default/docker` so that there is a line that reads: `DOCKER_OPTS="--insecure-registry myregistrydomain.com:5000"` (or add that to existing `DOCKER_OPTS`) 2. restart your Docker daemon: on ubuntu, this is usually `service docker stop && service docker start` **Pros:** - relatively easy to configure **Cons:** - this is **very** insecure: you are basically exposing yourself to trivial MITM, and this solution should only be used for isolated testing or in a tightly controlled, air-gapped environment - you have to configure every docker daemon that wants to access your registry ## Using self-signed certificates > **Warning**: using this along with basic authentication requires to **also** trust the certificate into the OS cert store for some versions of docker (see below) Generate your own certificate: mkdir -p certs && openssl req \ -newkey rsa:4096 -nodes -sha256 -keyout certs/domain.key \ -x509 -days 365 -out certs/domain.crt Be sure to use the name `myregistrydomain.com` as a CN. Use the result to [start your registry with TLS enabled](https://github.com/docker/distribution/blob/master/docs/deploying.md#get-a-certificate) Then you have to instruct every docker daemon to trust that certificate. This is done by copying the `domain.crt` file to `/etc/docker/certs.d/myregistrydomain.com:5000/ca.crt`. Don't forget to restart docker after doing so. **Pros:** - more secure than the insecure registry solution **Cons:** - you have to configure every docker daemon that wants to access your registry ## Failing... Failing to configure docker and trying to pull from a registry that is not using TLS will result in the following message: ``` FATA[0000] Error response from daemon: v1 ping attempt failed with error: Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add `--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt ``` ## Docker still complains about the certificate when using authentication? When using authentication, some versions of docker also require you to trust the certificate at the OS level. Usually, on Ubuntu this is done with: cp auth/domain.crt /usr/local/share/ca-certificates/myregistrydomain.com.crt update-ca-certificates ... and on Red Hat (and its derivatives) with: cp auth/domain.crt /etc/pki/ca-trust/source/anchors/myregistrydomain.com.crt update-ca-trust ... On some distributions, e.g. Oracle Linux 6, the Shared System Certificates feature needs to be manually enabled: update-ca-trust enable Now restart docker (`service docker stop && service docker start`, or any other way you use to restart docker). distribution-2.3.0/docs/introduction.md000066400000000000000000000071331265472114500202410ustar00rootroot00000000000000 # Understanding the Registry A registry is a storage and content delivery system, holding named Docker images, available in different tagged versions. > Example: the image `distribution/registry`, with tags `2.0` and `2.1`. Users interact with a registry by using docker push and pull commands. > Example: `docker pull registry-1.docker.io/distribution/registry:2.1`. Storage itself is delegated to drivers. The default storage driver is the local posix filesystem, which is suitable for development or small deployments. Additional cloud-based storage drivers like S3, Microsoft Azure, Ceph Rados, OpenStack Swift and Aliyun OSS are also supported. People looking into using other storage backends may do so by writing their own driver implementing the [Storage API](storagedrivers.md). Since securing access to your hosted images is paramount, the Registry natively supports TLS and basic authentication. The Registry GitHub repository includes additional information about advanced authentication and authorization methods. Only very large or public deployments are expected to extend the Registry in this way. Finally, the Registry ships with a robust [notification system](notifications.md), calling webhooks in response to activity, and both extensive logging and reporting, mostly useful for large installations that want to collect metrics. ## Understanding image naming Image names as used in typical docker commands reflect their origin: * `docker pull ubuntu` instructs docker to pull an image named `ubuntu` from the official Docker Hub. This is simply a shortcut for the longer `docker pull docker.io/library/ubuntu` command * `docker pull myregistrydomain:port/foo/bar` instructs docker to contact the registry located at `myregistrydomain:port` to find the image `foo/bar` You can find out more about the various Docker commands dealing with images in the [official Docker engine documentation](https://docs.docker.com/reference/commandline/cli/). ## Use cases Running your own Registry is a great solution to integrate with and complement your CI/CD system. In a typical workflow, a commit to your source revision control system would trigger a build on your CI system, which would then push a new image to your Registry if the build is successful. A notification from the Registry would then trigger a deployment on a staging environment, or notify other systems that a new image is available. It's also an essential component if you want to quickly deploy a new image over a large cluster of machines. Finally, it's the best way to distribute images inside an isolated network. ## Requirements You absolutely need to be familiar with Docker, specifically with regard to pushing and pulling images. You must understand the difference between the daemon and the cli, and at least grasp basic concepts about networking. Also, while just starting a registry is fairly easy, operating it in a production environment requires operational skills, just like any other service. You are expected to be familiar with systems availability and scalability, logging and log processing, systems monitoring, and security 101. Strong understanding of http and overall network communications, plus familiarity with golang are certainly useful as well for advanced operations or hacking. ## Next Dive into [deploying your registry](deploying.md) distribution-2.3.0/docs/migration.md000066400000000000000000000020771265472114500175130ustar00rootroot00000000000000 # Migrating a 1.0 registry to 2.0 TODO: This needs to be revised in light of Olivier's work A few thoughts here: There was no "1.0". There was an implementation of the Registry API V1 but only a version 0.9 of the service was released. The image formats are not compatible in any way. One must convert v1 images to v2 images using a docker client or other tool. One can migrate images from one version to the other by pulling images from the old registry and pushing them to the v2 registry. ----- The Docker Registry 2.0 is backward compatible with images created by the earlier specification. If you are migrating a private registry to version 2.0, you should use the following process: 1. Configure and test a 2.0 registry image in a sandbox environment. 2. Back up up your production image storage. Your production image storage should reside on a volume or storage backend. Make sure you have a backup of its contents. 3. Stop your existing registry service. 4. Restart your registry with your tested 2.0 image. distribution-2.3.0/docs/mirror.md000066400000000000000000000073311265472114500170320ustar00rootroot00000000000000 # Registry as a pull through cache ## Use-case If you have multiple instances of Docker running in your environment (e.g., multiple physical or virtual machines, all running the Docker daemon), each time one of them requires an image that it doesn’t have it will go out to the internet and fetch it from the public Docker registry. By running a local registry mirror, you can keep most of the redundant image fetch traffic on your local network. ### Alternatives Alternatively, if the set of images you are using is well delimited, you can simply pull them manually and push them to a simple, local, private registry. Furthermore, if your images are all built in-house, not using the Hub at all and relying entirely on your local registry is the simplest scenario. ### Gotcha It's currently not possible to mirror another private registry. Only the central Hub can be mirrored. ### Solution The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. ## How does it work? The first time you request an image from your local registry mirror, it pulls the image from the public Docker registry and stores it locally before handing it back to you. On subsequent requests, the local registry mirror is able to serve the image from its own storage. ### What if the content changes on the Hub? When a pull is attempted with a tag, the Registry will check the remote to ensure if it has the latest version of the requested content. If it doesn't it will fetch the latest content and cache it. ### What about my disk? In environments with high churn rates, stale data can build up in the cache. When running as a pull through cache the Registry will periodically remove old content to save disk space. Subsequent requests for removed content will cause a remote fetch and local re-caching. To ensure best performance and guarantee correctness the Registry cache should be configured to use the `filesystem` driver for storage. ## Running a Registry as a pull through cache The easiest way to run a registry as a pull through cache is to run the official Registry image. Multiple registry caches can be deployed over the same back-end. A single registry cache will ensure that concurrent requests do not pull duplicate data, but this property will not hold true for a registry cache cluster. ### Configuring the cache To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. In order to access private images on the Docker Hub, a username and password can be supplied. proxy: remoteurl: https://registry-1.docker.io username: [username] password: [password] > :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! ### Configuring the Docker daemon You will need to pass the `--registry-mirror` option to your Docker daemon on startup: docker --registry-mirror=https:// daemon For example, if your mirror is serving on http://10.0.0.2:5000, you would run: docker --registry-mirror=https://10.0.0.2:5000 daemon NOTE: Depending on your local host setup, you may be able to add the `--registry-mirror` option to the `DOCKER_OPTS` variable in `/etc/default/docker`. distribution-2.3.0/docs/nginx.md000066400000000000000000000136111265472114500166410ustar00rootroot00000000000000 # Authenticating proxy with nginx ## Use-case People already relying on a nginx proxy to authenticate their users to other services might want to leverage it and have Registry communications tunneled through the same pipeline. Usually, that includes enterprise setups using LDAP/AD on the backend and a SSO mechanism fronting their internal http portal. ### Alternatives If you just want authentication for your registry, and are happy maintaining users access separately, you should really consider sticking with the native [basic auth registry feature](deploying.md#native-basic-auth). ### Solution With the method presented here, you implement basic authentication for docker engines in a reverse proxy that sits in front of your registry. While we use a simple htpasswd file as an example, any other nginx authentication backend should be fairly easy to implement once you are done with the example. We also implement push restriction (to a limited user group) for the sake of the example. Again, you should modify this to fit your mileage. ### Gotchas While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. Make sure the extra complexity is required. For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: ``` X-Real-IP X-Forwarded-For X-Forwarded-Proto ``` So if you have an nginx sitting behind it, should remove these lines from the example config below: ``` X-Real-IP $remote_addr; # pass on real client's IP X-Forwarded-For $proxy_add_x_forwarded_for; X-Forwarded-Proto $scheme; ``` Otherwise nginx will reset the ELB's values, and the requests will not be routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). ## Setting things up Read again [the requirements](recipes.md#requirements). Ready? -- Create the required directories ``` mkdir -p auth mkdir -p data ``` Create the main nginx configuration you will use. ``` cat < auth/nginx.conf events { worker_connections 1024; } http { upstream docker-registry { server registry:5000; } ## Set a variable to help us decide if we need to add the ## 'Docker-Distribution-Api-Version' header. ## The registry always sets this header. ## In the case of nginx performing auth, the header will be unset ## since nginx is auth-ing before proxying. map \$upstream_http_docker_distribution_api_version \$docker_distribution_api_version { 'registry/2.0' ''; default registry/2.0; } server { listen 443 ssl; server_name myregistrydomain.com; # SSL ssl_certificate /etc/nginx/conf.d/domain.crt; ssl_certificate_key /etc/nginx/conf.d/domain.key; # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html ssl_protocols TLSv1.1 TLSv1.2; ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; ssl_prefer_server_ciphers on; ssl_session_cache shared:SSL:10m; # disable any limits to avoid HTTP 413 for large image uploads client_max_body_size 0; # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) chunked_transfer_encoding on; location /v2/ { # Do not allow connections from docker 1.5 and earlier # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents if (\$http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*\$" ) { return 404; } # To add basic authentication to v2 use auth_basic setting. auth_basic "Registry realm"; auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; ## If $docker_distribution_api_version is empty, the header will not be added. ## See the map directive above where this variable is defined. add_header 'Docker-Distribution-Api-Version' \$docker_distribution_api_version always; proxy_pass http://docker-registry; proxy_set_header Host \$http_host; # required for docker client's sake proxy_set_header X-Real-IP \$remote_addr; # pass on real client's IP proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto \$scheme; proxy_read_timeout 900; } } } EOF ``` Now create a password file for "testuser" and "testpassword" ``` docker run --rm --entrypoint htpasswd registry:2 -bn testuser testpassword > auth/nginx.htpasswd ``` Copy over your certificate files ``` cp domain.crt auth cp domain.key auth ``` Now create your compose file ``` cat < docker-compose.yml nginx: image: "nginx:1.9" ports: - 5043:443 links: - registry:registry volumes: - ./auth:/etc/nginx/conf.d - ./auth/nginx.conf:/etc/nginx/nginx.conf:ro registry: image: registry:2 ports: - 127.0.0.1:5000:5000 volumes: - `pwd`./data:/var/lib/registry EOF ``` ## Starting and stopping Now, start your stack: docker-compose up -d Login with a "push" authorized user (using `testuser` and `testpassword`), then tag and push your first image: docker login -p=testuser -u=testpassword -e=root@example.ch myregistrydomain.com:5043 docker tag ubuntu myregistrydomain.com:5043/test docker push myregistrydomain.com:5043/test docker pull myregistrydomain.com:5043/test distribution-2.3.0/docs/notifications.md000066400000000000000000000251161265472114500203720ustar00rootroot00000000000000 # Notifications The Registry supports sending webhook notifications in response to events happening within the registry. Notifications are sent in response to manifest pushes and pulls and layer pushes and pulls. These actions are serialized into events. The events are queued into a registry-internal broadcast system which queues and dispatches events to [_Endpoints_](#endpoints). ![](images/notifications.png) ## Endpoints Notifications are sent to _endpoints_ via HTTP requests. Each configured endpoint has isolated queues, retry configuration and http targets within each instance of a registry. When an action happens within the registry, it is converted into an event which is dropped into an inmemory queue. When the event reaches the end of the queue, an http request is made to the endpoint until the request succeeds. The events are sent serially to each endpoint but order is not guaranteed. ## Configuration To setup a registry instance to send notifications to endpoints, one must add them to the configuration. A simple example follows: notifications: endpoints: - name: alistener url: https://mylistener.example.com/event headers: Authorization: [Bearer ] timeout: 500ms threshold: 5 backoff: 1s The above would configure the registry with an endpoint to send events to `https://mylistener.example.com/event`, with the header "Authorization: Bearer ". The request would timeout after 500 milliseconds. If 5 failures happen consecutively, the registry will backoff for 1 second before trying again. For details on the fields, please see the [configuration documentation](configuration.md#notifications). A properly configured endpoint should lead to a log message from the registry upon startup: ``` INFO[0000] configuring endpoint alistener (https://mylistener.example.com/event), timeout=500ms, headers=map[Authorization:[Bearer ]] app.id=812bfeb2-62d6-43cf-b0c6-152f541618a3 environment=development service=registry ``` ## Events Events have a well-defined JSON structure and are sent as the body of notification requests. One or more events are sent in a structure called an envelope. Each event has a unique id that can be used to uniquely identify incoming requests, if required. Along with that, an _action_ is provided with a _target, identifying the object mutated during the event. The fields available in an event are described in detail in the [godoc](http://godoc.org/github.com/docker/distribution/notifications#Event). **TODO:** Let's break out the fields here rather than rely on the godoc. The following is an example of a JSON event, sent in response to the push of a manifest: ```json { "id": "asdf-asdf-asdf-asdf-0", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v1+json", "size": 1, "digest": "sha256:0123456789abcdef0", "length": 1, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } } ``` > __NOTE:__ As of version 2.1, the `length` field for event targets > is being deprecated for the `size` field, bringing the target in line with > common nomenclature. Both will continue to be set for the foreseeable > future. Newer code should favor `size` but accept either. ## Envelope The envelope contains one or more events, with the following json structure: ```json { "events": [ ... ], } ``` While events may be sent in the same envelope, the set of events within that envelope have no implied relationship. For example, the registry may choose to group unrelated events and send them in the same envelope to reduce the total number of requests. The full package has the mediatype "application/vnd.docker.distribution.events.v1+json", which will be set on the request coming to an endpoint. An example of a full event may look as follows: ```json GET /callback Host: application/vnd.docker.distribution.events.v1+json Authorization: Bearer Content-Type: application/vnd.docker.distribution.events.v1+json { "events": [ { "id": "asdf-asdf-asdf-asdf-0", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v1+json", "length": 1, "digest": "sha256:0123456789abcdef0", "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } }, { "id": "asdf-asdf-asdf-asdf-1", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "length": 2, "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } }, { "id": "asdf-asdf-asdf-asdf-2", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "length": 3, "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } } ] } ``` ## Responses The registry is fairly accepting of the response codes from endpoints. If an endpoint responds with any 2xx or 3xx response code (after following redirects), the message will be considered delivered and discarded. In turn, it is recommended that endpoints are accepting of incoming responses, as well. While the format of event envelopes are standardized by media type, any "pickyness" about validation may cause the queue to backup on the registry. ## Monitoring The state of the endpoints are reported via the debug/vars http interface, usually configured to `http://localhost:5001/debug/vars`. Information such as configuration and metrics are available by endpoint. The following provides an example of a few endpoints that have experienced several failures and have since recovered: ```json "notifications":{ "endpoints":[ { "name":"local-5003", "url":"http://localhost:5003/callback", "Headers":{ "Authorization":[ "Bearer \u003can example token\u003e" ] }, "Timeout":1000000000, "Threshold":10, "Backoff":1000000000, "Metrics":{ "Pending":76, "Events":76, "Successes":0, "Failures":0, "Errors":46, "Statuses":{ } } }, { "name":"local-8083", "url":"http://localhost:8083/callback", "Headers":null, "Timeout":1000000000, "Threshold":10, "Backoff":1000000000, "Metrics":{ "Pending":0, "Events":76, "Successes":76, "Failures":0, "Errors":28, "Statuses":{ "202 Accepted":76 } } } ] } ``` If using notification as part of a larger application, it is _critical_ to monitor the size ("Pending" above) of the endpoint queues. If failures or queue sizes are increasing, it can indicate a larger problem. The logs are also a valuable resource for monitoring problems. A failing endpoint will lead to messages similar to the following: ``` ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off ``` The above indicates that several errors have led to a backoff and the registry will wait before retrying. ## Considerations Currently, the queues are inmemory, so endpoints should be _reasonably reliable_. They are designed to make a best-effort to send the messages but if an instance is lost, messages may be dropped. If an endpoint goes down, care should be taken to ensure that the registry instance is not terminated before the endpoint comes back up or messages will be lost. This can be mitigated by running endpoints in close proximity to the registry instances. One could run an endpoint that pages to disk and then forwards a request to provide better durability. The notification system is designed around a series of interchangeable _sinks_ which can be wired up to achieve interesting behavior. If this system doesn't provide acceptable guarantees, adding a transactional `Sink` to the registry is a possibility, although it may have an effect on request service time. Please see the [godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) for more information. distribution-2.3.0/docs/osx-setup-guide.md000066400000000000000000000047671265472114500205740ustar00rootroot00000000000000 # OS X Setup Guide ## Use-case This is useful if you intend to run a registry server natively on OS X. ### Alternatives You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](https://docs.docker.com/machine/), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. ### Solution Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. ### Gotchas Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. ## Setup golang on your machine If you know, safely skip to the next section. If you don't, the TLDR is: bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) source ~/.gvm/scripts/gvm gvm install go1.4.2 gvm use go1.4.2 If you want to understand, you should read [How to Write Go Code](https://golang.org/doc/code.html). ## Checkout the Docker Distribution source tree mkdir -p $GOPATH/src/github.com/docker git clone https://github.com/docker/distribution.git $GOPATH/src/github.com/docker/distribution cd $GOPATH/src/github.com/docker/distribution ## Build the binary GOPATH=$(PWD)/Godeps/_workspace:$GOPATH make binaries sudo cp bin/registry /usr/local/libexec/registry ## Setup Copy the registry configuration file in place: mkdir /Users/Shared/Registry cp docs/osx/config.yml /Users/Shared/Registry/config.yml ## Running the Docker Registry under launchd Copy the Docker registry plist into place: plutil -lint docs/osx/com.docker.registry.plist cp docs/osx/com.docker.registry.plist ~/Library/LaunchAgents/ chmod 644 ~/Library/LaunchAgents/com.docker.registry.plist Start the Docker registry: launchctl load ~/Library/LaunchAgents/com.docker.registry.plist ### Restarting the docker registry service launchctl stop com.docker.registry launchctl start com.docker.registry ### Unloading the docker registry service launchctl unload ~/Library/LaunchAgents/com.docker.registry.plist distribution-2.3.0/docs/osx/000077500000000000000000000000001265472114500160035ustar00rootroot00000000000000distribution-2.3.0/docs/osx/com.docker.registry.plist000066400000000000000000000021171265472114500227540ustar00rootroot00000000000000 Label com.docker.registry KeepAlive StandardErrorPath /Users/Shared/Registry/registry.log StandardOutPath /Users/Shared/Registry/registry.log Program /usr/local/libexec/registry ProgramArguments /usr/local/libexec/registry /Users/Shared/Registry/config.yml Sockets http-listen-address SockServiceName 5000 SockType dgram SockFamily IPv4 http-debug-address SockServiceName 5001 SockType dgram SockFamily IPv4 distribution-2.3.0/docs/osx/config.yml000066400000000000000000000004601265472114500177730ustar00rootroot00000000000000version: 0.1 log: level: info fields: service: registry environment: macbook-air storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /Users/Shared/Registry http: addr: 0.0.0.0:5000 secret: mytokensecret debug: addr: localhost:5001 distribution-2.3.0/docs/recipes.md000066400000000000000000000024441265472114500171520ustar00rootroot00000000000000 # Recipes You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. Most users are not expected to have a use for these. ## Requirements You should have followed entirely the basic [deployment guide](deploying.md). If you have not, please take the time to do so. At this point, it's assumed that: * you understand Docker security requirements, and how to configure your docker engines properly * you have installed Docker Compose * it's HIGHLY recommended that you get a certificate from a known CA instead of self-signed certificates * inside the current directory, you have a X509 `domain.crt` and `domain.key`, for the CN `myregistrydomain.com` * be sure you have stopped and removed any previously running registry (typically `docker stop registry && docker rm -v registry`) ## The List * [using Apache as an authenticating proxy](apache.md) * [using Nginx as an authenticating proxy](nginx.md) * [running a Registry on OS X](osx-setup-guide.md) * [hacking the registry: build instructions](building.md) * [mirror the Docker Hub](mirror.md)distribution-2.3.0/docs/spec/000077500000000000000000000000001265472114500161245ustar00rootroot00000000000000distribution-2.3.0/docs/spec/api.md000066400000000000000000004043111265472114500172220ustar00rootroot00000000000000 # Docker Registry HTTP API V2 ## Introduction The _Docker Registry HTTP API_ is the protocol to facilitate distribution of images to the docker engine. It interacts with instances of the docker registry, which is a service to manage information about docker images and enable their distribution. The specification covers the operation of version 2 of this API, known as _Docker Registry HTTP API V2_. While the V1 registry protocol is usable, there are several problems with the architecture that have led to this new version. The main driver of this specification these changes to the docker the image format, covered in [docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image definition and improves security. This specification will build on that work, leveraging new properties of the manifest format to improve performance, reduce bandwidth usage and decrease the likelihood of backend corruption. For relevant details and history leading up to this specification, please see the following issues: - [docker/docker#8093](https://github.com/docker/docker/issues/8093) - [docker/docker#9015](https://github.com/docker/docker/issues/9015) - [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) ### Scope This specification covers the URL layout and protocols of the interaction between docker registry and docker core. This will affect the docker core registry API and the rewrite of docker-registry. Docker registry implementations may implement other API endpoints, but they are not covered by this specification. This includes the following features: - Namespace-oriented URI Layout - PUSH/PULL registry server for V2 image manifest format - Resumable layer PUSH support - V2 Client library implementation While authentication and authorization support will influence this specification, details of the protocol will be left to a future specification. Relevant header definitions and error codes are present to provide an indication of what a client may encounter. #### Future There are features that have been discussed during the process of cutting this specification. The following is an incomplete list: - Immutable image references - Multiple architecture support - Migration from v2compatibility representation These may represent features that are either out of the scope of this specification, the purview of another specification or have been deferred to a future version. ### Use Cases For the most part, the use cases of the former registry API apply to the new version. Differentiating use cases are covered below. #### Image Verification A docker engine instance would like to run verified image named "library/ubuntu", with the tag "latest". The engine contacts the registry, requesting the manifest for "library/ubuntu:latest". An untrusted registry returns a manifest. Before proceeding to download the individual layers, the engine verifies the manifest's signature, ensuring that the content was produced from a trusted source and no tampering has occured. After each layer is downloaded, the engine verifies the digest of the layer, ensuring that the content matches that specified by the manifest. #### Resumable Push Company X's build servers lose connectivity to docker registry before completing an image layer transfer. After connectivity returns, the build server attempts to re-upload the image. The registry notifies the build server that the upload has already been partially attempted. The build server responds by only sending the remaining data to complete the image file. #### Resumable Pull Company X is having more connectivity problems but this time in their deployment datacenter. When downloading an image, the connection is interrupted before completion. The client keeps the partial data and uses http `Range` requests to avoid downloading repeated data. #### Layer Upload De-duplication Company Y's build system creates two identical docker layers from build processes A and B. Build process A completes uploading the layer before B. When process B attempts to upload the layer, the registry indicates that its not necessary because the layer is already known. If process A and B upload the same layer at the same time, both operations will proceed and the first to complete will be stored in the registry (Note: we may modify this to prevent dogpile with some locking mechanism). ### Changes The V2 specification has been written to work as a living document, specifying only what is certain and leaving what is not specified open or to future changes. Only non-conflicting additions should be made to the API and accepted changes should avoid preventing future changes from happening. This section should be updated when changes are made to the specification, indicating what is different. Optionally, we may start marking parts of the specification to correspond with the versions enumerated here. Each set of changes is given a letter corresponding to a set of modifications that were applied to the baseline specification. These are merely for reference and shouldn't be used outside the specification other than to identify a set of modifications.
j
  • Add ability to mount blobs across repositories.
i
  • Clarified expected behavior response to manifest HEAD request.
h
  • All mention of tarsum removed.
g
  • Clarify behavior of pagination behavior with unspecified parameters.
f
  • Specify the delete API for layers and manifests.
e
  • Added support for listing registry contents.
  • Added pagination to tags API.
  • Added common approach to support pagination.
d
  • Allow repository name components to be one character.
  • Clarified that single component names are allowed.
c
  • Added section covering digest format.
  • Added more clarification that manifest cannot be deleted by tag.
b
  • Added capability of doing streaming upload to PATCH blob upload.
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
a
  • Added support for immutable manifest references in manifest endpoints.
  • Deleting a manifest by tag has been deprecated.
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • Added error code for unsupported operations.
## Overview This section covers client flows and details of the API endpoints. The URI layout of the new API is structured to support a rich authentication and authorization model by leveraging namespaces. All endpoints will be prefixed by the API version and the repository name: /v2// For example, an API endpoint that will work with the `library/ubuntu` repository, the URI prefix will be: /v2/library/ubuntu/ This scheme provides rich access control over various operations and methods using the URI prefix and http methods that can be controlled in variety of ways. Classically, repository names have always been two path components where each path component is less than 30 characters. The V2 registry API does not enforce this. The rules for a repository name are as follows: 1. A repository name is broken up into _path components_. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. 2. If a repository name has two or more path components, they must be separated by a forward slash ("/"). 3. The total length of a repository name, including slashes, must be less the 256 characters. These name requirements _only_ apply to the registry API and should accept a superset of what is supported by other docker ecosystem components. All endpoints should support aggressive http caching, compression and range headers, where appropriate. The new API attempts to leverage HTTP semantics where possible but may break from standards to implement targeted features. For detail on individual endpoints, please see the [_Detail_](#detail) section. ### Errors Actionable failure conditions, covered in detail in their relevant sections, are reported as part of 4xx responses, in a json response body. One or more errors will be returned in the following format: { "errors:" [{ "code": , "message": , "detail": }, ... ] } The `code` field will be a unique identifier, all caps with underscores by convention. The `message` field will be a human readable string. The optional `detail` field may contain arbitrary json data providing information the client can use to resolve the issue. While the client can take action on certain error codes, the registry may add new error codes over time. All client implementations should treat unknown error codes as `UNKNOWN`, allowing future error codes to be added without breaking API compatibility. For the purposes of the specification error codes will only be added and never removed. For a complete account of all error codes, please see the _Detail_ section. ### API Version Check A minimal endpoint, mounted at `/v2/` will provide version support information based on its response statuses. The request format is as follows: GET /v2/ If a `200 OK` response is returned, the registry implements the V2(.1) registry API and the client may proceed safely with other V2 operations. Optionally, the response may contain information about the supported paths in the response body. The client should be prepared to ignore this data. If a `401 Unauthorized` response is returned, the client should take action based on the contents of the "WWW-Authenticate" header and try the endpoint again. Depending on access control setup, the client may still have to authenticate against different resources, even if this check succeeds. If `404 Not Found` response status, or other unexpected status, is returned, the client should proceed with the assumption that the registry does not implement V2 of the API. When a `200 OK` or `401 Unauthorized` response is returned, the "Docker-Distribution-API-Version" header should be set to "registry/2.0". Clients may require this header value to determine if the endpoint serves this API. When this header is omitted, clients may fallback to an older API version. ### Content Digests This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). The core of this design is the concept of a content addressable identifier. It uniquely identifies content by taking a collision-resistant hash of the bytes. Such an identifier can be independently calculated and verified by selection of a common _algorithm_. If such an identifier can be communicated in a secure manner, one can retrieve the content from an insecure source, calculate it independently and be certain that the correct content was obtained. Put simply, the identifier is a property of the content. To disambiguate from other concepts, we call this identifier a _digest_. A _digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ portion. The _algorithm_ identifies the methodology used to calculate the digest. The _hex_ portion is the hex-encoded result of the hash. We define a _digest_ string to match the following grammar: ``` digest := algorithm ":" hex algorithm := /[A-Fa-f0-9_+.-]+/ hex := /[A-Fa-f0-9]+/ ``` Some examples of _digests_ include the following: digest | description | ----------------------------------------------------------------------------------|------------------------------------------------ sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | While the _algorithm_ does allow one to implement a wide variety of algorithms, compliant implementations should use sha256. Heavy processing of input before calculating a hash is discouraged to avoid degrading the uniqueness of the _digest_ but some canonicalization may be performed to ensure consistent identifiers. Let's use a simple example in pseudo-code to demonstrate a digest calculation: ``` let C = 'a small string' let B = sha256(C) let D = 'sha256:' + EncodeHex(B) let ID(C) = D ``` Above, we have bytestring `C` passed into a function, `SHA256`, that returns a bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` as equal to `D`. A digest can be verified by independently calculating `D` and comparing it with identifier `ID(C)`. #### Digest Header To provide verification of http content, any response may include a `Docker- Content-Digest` header. This will include the digest of the target entity returned in the response. For blobs, this is the entire blob content. For manifests, this is the manifest body without the signature content, also known as the JWS payload. Note that the commonly used canonicalization for digest calculation may be dependent on the mediatype of the content, such as with manifests. The client may choose to ignore the header or may verify it to ensure content integrity and transport security. This is most important when fetching by a digest. To ensure security, the content should be verified against the digest used to fetch the content. At times, the returned digest may differ from that used to initiate a request. Such digests are considered to be from different _domains_, meaning they have different values for _algorithm_. In such a case, the client may choose to verify the digests in both domains or ignore the server's digest. To maintain security, the client _must_ always verify the content against the _digest_ used to fetch the content. > __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use > the same digest used to fetch the content to verify it. The header `Docker- > Content-Digest` should not be trusted over the "local" digest. ### Pulling An Image An "image" is a combination of a JSON manifest and individual layer files. The process of pulling an image centers around retrieving these two components. The first step in pulling an image is to retrieve the manifest. For reference, the relevant manifest fields for the registry are the following: field | description | ----------|------------------------------------------------| name | The name of the image. | tag | The tag for this version of the image. | fsLayers | A list of layer descriptors (including digest) | signature | A JWS used to verify the manifest content | For more information about the manifest format, please see [docker/docker#8093](https://github.com/docker/docker/issues/8093). When the manifest is in hand, the client must verify the signature to ensure the names and layers are valid. Once confirmed, the client will then use the digests to download the individual layers. Layers are stored in as blobs in the V2 registry API, keyed by their digest. #### Pulling an Image Manifest The image manifest can be fetched with the following url: ``` GET /v2//manifests/ ``` The `name` and `reference` parameter identify the image and are required. The reference may include a tag or digest. A `404 Not Found` response will be returned if the image is unknown to the registry. If the image exists and the response is successful, the image manifest will be returned, with the following format (see docker/docker#8093 for details): { "name": , "tag": , "fsLayers": [ { "blobSum": }, ... ] ], "history": , "signature": } The client should verify the returned manifest signature for authenticity before fetching layers. ##### Existing Manifests The image manifest can be checked for existence with the following url: ``` HEAD /v2//manifests/ ``` The `name` and `reference` parameter identify the image and are required. The reference may include a tag or digest. A `404 Not Found` response will be returned if the image is unknown to the registry. If the image exists and the response is successful the response will be as follows: ``` 200 OK Content-Length: Docker-Content-Digest: ``` #### Pulling a Layer Layers are stored in the blob portion of the registry, keyed by digest. Pulling a layer is carried out by a standard http request. The URL is as follows: GET /v2//blobs/ Access to a layer will be gated by the `name` of the repository but is identified uniquely in the registry by `digest`. This endpoint may issue a 307 (302 for /blobs/uploads/ ``` The parameters of this request are the image namespace under which the layer will be linked. Responses to this request are covered below. ##### Existing Layers The existence of a layer can be checked via a `HEAD` request to the blob store API. The request should be formatted as follows: ``` HEAD /v2//blobs/ ``` If the layer with the digest specified in `digest` is available, a 200 OK response will be received, with no actual body content (this is according to http specification). The response will look as follows: ``` 200 OK Content-Length: Docker-Content-Digest: ``` When this response is received, the client can assume that the layer is already available in the registry under the given name and should take no further action to upload the layer. Note that the binary digests may differ for the existing registry layer, but the digests will be guaranteed to match. ##### Uploading the Layer If the POST request is successful, a `202 Accepted` response will be returned with the upload URL in the `Location` header: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` The rest of the upload process can be carried out with the returned url, called the "Upload URL" from the `Location` header. All responses to the upload url, whether sending data or getting status, will be in this format. Though the URI format (`/v2//blobs/uploads/`) for the `Location` header is specified, clients should treat it as an opaque url and should never try to assemble the it. While the `uuid` parameter may be an actual UUID, this proposal imposes no constraints on the format and clients should never impose any. If clients need to correlate local upload state with remote upload state, the contents of the `Docker-Upload-UUID` header should be used. Such an id can be used to key the last used location header when implementing resumable uploads. ##### Upload Progress The progress and chunk coordination of the upload process will be coordinated through the `Range` header. While this is a non-standard use of the `Range` header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. For an upload that just started, for an example with a 1000 byte layer file, the `Range` header would be as follows: ``` Range: bytes=0-0 ``` To get the status of an upload, issue a GET request to the upload URL: ``` GET /v2//blobs/uploads/ Host: ``` The response will be similar to the above, except will return 204 status: ``` 204 No Content Location: /v2//blobs/uploads/ Range: bytes=0- Docker-Upload-UUID: ``` Note that the HTTP `Range` header byte ranges are inclusive and that will be honored, even in non-standard use cases. ##### Monolithic Upload A monolithic upload is simply a chunked upload with a single chunk and may be favored by clients that would like to avoided the complexity of chunking. To carry out a "monolithic" upload, one can simply put the entire content blob to the provided URL: ``` PUT /v2//blobs/uploads/?digest= Content-Length: Content-Type: application/octet-stream ``` The "digest" parameter must be included with the PUT request. Please see the _Completed Upload_ section for details on the parameters and expected responses. Additionally, the upload can be completed with a single `POST` request to the uploads endpoint, including the "size" and "digest" parameters: ``` POST /v2//blobs/uploads/?digest= Content-Length: Content-Type: application/octet-stream ``` On the registry service, this should allocate a download, accept and verify the data and return the same response as the final chunk of an upload. If the POST request fails collecting the data in any way, the registry should attempt to return an error response to the client with the `Location` header providing a place to continue the download. The single `POST` method is provided for convenience and most clients should implement `POST` + `PUT` to support reliable resume of uploads. ##### Chunked Upload To carry out an upload of a chunk, the client can specify a range header and only include that part of the layer file: ``` PATCH /v2//blobs/uploads/ Content-Length: Content-Range: - Content-Type: application/octet-stream ``` There is no enforcement on layer chunk splits other than that the server must receive them in order. The server may enforce a minimum chunk size. If the server cannot accept the chunk, a `416 Requested Range Not Satisfiable` response will be returned and will include a `Range` header indicating the current status: ``` 416 Requested Range Not Satisfiable Location: /v2//blobs/uploads/ Range: 0- Content-Length: 0 Docker-Upload-UUID: ``` If this response is received, the client should resume from the "last valid range" and upload the subsequent chunk. A 416 will be returned under the following conditions: - Invalid Content-Range header format - Out of order chunk: the range of the next chunk must start immediately after the "last valid range" from the previous response. When a chunk is accepted as part of the upload, a `202 Accepted` response will be returned, including a `Range` header with the current upload status: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` ##### Completed Upload For an upload to be considered complete, the client must submit a `PUT` request on the upload endpoint with a digest parameter. If it is not provided, the upload will not be considered complete. The format for the final chunk will be as follows: ``` PUT /v2//blob/uploads/?digest= Content-Length: Content-Range: - Content-Type: application/octet-stream ``` Optionally, if all chunks have already been uploaded, a `PUT` request with a `digest` parameter and zero-length body may be sent to complete and validated the upload. Multiple "digest" parameters may be provided with different digests. The server may verify none or all of them but _must_ notify the client if the content is rejected. When the last chunk is received and the layer has been validated, the client will receive a `201 Created` response: ``` 201 Created Location: /v2//blobs/ Content-Length: 0 Docker-Content-Digest: ``` The `Location` header will contain the registry URL to access the accepted layer file. The `Docker-Content-Digest` header returns the canonical digest of the uploaded blob which may differ from the provided digest. Most clients may ignore the value but if it is used, the client should verify the value against the uploaded blob data. ###### Digest Parameter The "digest" parameter is designed as an opaque parameter to support verification of a successful transfer. For example, a HTTP URI parameter might be as follows: ``` sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b ``` Given this parameter, the registry will verify that the provided content does match this digest. ##### Canceling an Upload An upload can be cancelled by issuing a DELETE request to the upload endpoint. The format will be as follows: ``` DELETE /v2//blobs/uploads/ ``` After this request is issued, the upload uuid will no longer be valid and the registry server will dump all intermediate data. While uploads will time out if not completed, clients should issue this request if they encounter a fatal error but still have the ability to issue an http request. ##### Cross Repository Blob Mount A blob may be mounted from another repository that the client has read access to, removing the need to upload a blob already known to the registry. To issue a blob mount instead of an upload, a POST request should be issued in the following format: ``` POST /v2//blobs/uploads/?mount=&from= Content-Length: 0 ``` If the blob is successfully mounted, the client will receive a `201 Created` response: ``` 201 Created Location: /v2//blobs/ Content-Length: 0 Docker-Content-Digest: ``` The `Location` header will contain the registry URL to access the accepted layer file. The `Docker-Content-Digest` header returns the canonical digest of the uploaded blob which may differ from the provided digest. Most clients may ignore the value but if it is used, the client should verify the value against the uploaded blob data. If a mount fails due to invalid repository or digest arguments, the registry will fall back to the standard upload behavior and return a `202 Accepted` with the upload URL in the `Location` header: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` This behavior is consistent with older versions of the registry, which do not recognize the repository mount query parameters. Note: a client may issue a HEAD request to check existence of a blob in a source repository to distinguish between the registry not supporting blob mounts and the blob not existing in the expected repository. ##### Errors If an 502, 503 or 504 error is received, the client should assume that the download can proceed due to a temporary condition, honoring the appropriate retry mechanism. Other 5xx errors should be treated as terminal. If there is a problem with the upload, a 4xx error will be returned indicating the problem. After receiving a 4xx response (except 416, as called out above), the upload will be considered failed and the client should take appropriate action. Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. ### Deleting a Layer A layer may be deleted from the registry via its `name` and `digest`. A delete may be issued with the following request format: DELETE /v2//blobs/ If the blob exists and has been successfully deleted, the following response will be issued: 202 Accepted Content-Length: None If the blob had already been deleted or did not exist, a `404 Not Found` response will be issued instead. If a layer is deleted which is referenced by a manifest in the registry, then the complete images will not be resolvable. #### Pushing an Image Manifest Once all of the layers for an image are uploaded, the client can upload the image manifest. An image can be pushed using the following request format: PUT /v2//manifests/ { "name": , "tag": , "fsLayers": [ { "blobSum": }, ... ] ], "history": , "signature": , ... } The `name` and `reference` fields of the response body must match those specified in the URL. The `reference` field may be a "tag" or a "digest". If there is a problem with pushing the manifest, a relevant 4xx response will be returned with a JSON error message. Please see the _PUT Manifest section for details on possible error codes that may be returned. If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are returned. The `detail` field of the error response will have a `digest` field identifying the missing blob. An error is returned for each unknown blob. The response format is as follows: { "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": } }, ... ] } ### Listing Repositories Images are stored in collections, known as a _repository_, which is keyed by a `name`, as seen throughout the API specification. A registry instance may contain several repositories. The list of available repositories is made available through the _catalog_. The catalog for a given registry can be retrieved with the following request: ``` GET /v2/_catalog ``` The response will be in the following format: ``` 200 OK Content-Type: application/json { "repositories": [ , ... ] } ``` Note that the contents of the response are specific to the registry implementation. Some registries may opt to provide a full catalog output, limit it based on the user's access level or omit upstream results, if providing mirroring functionality. Subsequently, the presence of a repository in the catalog listing only means that the registry *may* provide access to the repository at the time of the request. Conversely, a missing entry does *not* mean that the registry does not have the repository. More succinctly, the presence of a repository only guarantees that it is there but not that it is _not_ there. For registries with a large number of repositories, this response may be quite large. If such a response is expected, one should use pagination. A registry may also limit the amount of responses returned even if pagination was not explicitly requested. In this case the `Link` header will be returned along with the results, and subsequent results can be obtained by following the link as if pagination had been initially requested. For details of the `Link` header, please see the _Pagination_ section. #### Pagination Paginated catalog results can be retrieved by adding an `n` parameter to the request URL, declaring that the response should be limited to `n` results. Starting a paginated flow begins as follows: ``` GET /v2/_catalog?n= ``` The above specifies that a catalog response should be returned, from the start of the result set, ordered lexically, limiting the number of results to `n`. The response to such a request would look as follows: ``` 200 OK Content-Type: application/json Link: <?n=&last=>; rel="next" { "repositories": [ , ... ] } ``` The above includes the _first_ `n` entries from the result set. To get the _next_ `n` entries, one can create a URL where the argument `last` has the value from `repositories[len(repositories)-1]`. If there are indeed more results, the URL for the next block is encoded in an [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" relation. The presence of the `Link` header communicates to the client that the entire result set has not been returned and another request must be issued. If the header is not present, the client can assume that all results have been recieved. > __NOTE:__ In the request template above, note that the brackets > are required. For example, if the url is > `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would > be `; rel="next"`. Please see > [RFC5988](https://tools.ietf.org/html/rfc5988) for details. Compliant client implementations should always use the `Link` header value when proceeding through results linearly. The client may construct URLs to skip forward in the catalog. To get the next result set, a client would issue the request as follows, using the URL encoded in the described `Link` header: ``` GET /v2/_catalog?n=&last= ``` The above process should then be repeated until the `Link` header is no longer set. The catalog result set is represented abstractly as a lexically sorted list, where the position in that list can be specified by the query term `last`. The entries in the response start _after_ the term specified by `last`, up to `n` entries. The behavior of `last` is quite simple when demonstrated with an example. Let us say the registry has the following repositories: ``` a b c d ``` If the value of `n` is 2, _a_ and _b_ will be returned on the first response. The `Link` header returned on the response will have `n` set to 2 and last set to _b_: ``` Link: <?n=2&last=b>; rel="next" ``` The client can then issue the request with above value from the `Link` header, receiving the values _c_ and _d_. Note that n may change on second to last response or be omitted fully, if the server may so choose. ### Listing Image Tags It may be necessary to list all of the tags under a given repository. The tags for an image repository can be retrieved with the following request: GET /v2//tags/list The response will be in the following format: 200 OK Content-Type: application/json { "name": , "tags": [ , ... ] } For repositories with a large number of tags, this response may be quite large. If such a response is expected, one should use the pagination. #### Pagination Paginated tag results can be retrieved by adding the appropriate parameters to the request URL described above. The behavior of tag pagination is identical to that specified for catalog pagination. We cover a simple flow to highlight any differences. Starting a paginated flow may begin as follows: ``` GET /v2//tags/list?n= ``` The above specifies that a tags response should be returned, from the start of the result set, ordered lexically, limiting the number of results to `n`. The response to such a request would look as follows: ``` 200 OK Content-Type: application/json Link: <?n=&last=>; rel="next" { "name": , "tags": [ , ... ] } ``` To get the next result set, a client would issue the request as follows, using the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header: ``` GET /v2//tags/list?n=&last= ``` The above process should then be repeated until the `Link` header is no longer set in the response. The behavior of the `last` parameter, the provided response result, lexical ordering and encoding of the `Link` header are identical to that of catalog pagination. ### Deleting an Image An image may be deleted from the registry via its `name` and `reference`. A delete may be issued with the following request format: DELETE /v2//manifests/ For deletes, `reference` *must* be a digest or the delete will fail. If the image exists and has been successfully deleted, the following response will be issued: 202 Accepted Content-Length: None If the image had already been deleted or did not exist, a `404 Not Found` response will be issued instead. ## Detail > **Note**: This section is still under construction. For the purposes of > implementation, if any details below differ from the described request flows > above, the section below should be corrected. When they match, this note > should be removed. The behavior of the endpoints are covered in detail in this section, organized by route and entity. All aspects of the request and responses are covered, including headers, parameters and body formats. Examples of requests and their corresponding responses, with success and failure, are enumerated. > **Note**: The sections on endpoint detail are arranged with an example > request, a description of the request, followed by information about that > request. A list of methods and URIs are covered in the table below: |Method|Path|Entity|Description| |------|----|------|-----------| | GET | `/v2/` | Base | Check that the endpoint implements Docker Registry API V2. | | GET | `/v2//tags/list` | Tags | Fetch the tags under the repository identified by `name`. | | GET | `/v2//manifests/` | Manifest | Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | | PUT | `/v2//manifests/` | Manifest | Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. | | DELETE | `/v2//manifests/` | Manifest | Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. | | GET | `/v2//blobs/` | Blob | Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. | | DELETE | `/v2//blobs/` | Blob | Delete the blob identified by `name` and `digest` | | POST | `/v2//blobs/uploads/` | Initiate Blob Upload | Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. | | GET | `/v2//blobs/uploads/` | Blob Upload | Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. | | PATCH | `/v2//blobs/uploads/` | Blob Upload | Upload a chunk of data for the specified upload. | | PUT | `/v2//blobs/uploads/` | Blob Upload | Complete the upload specified by `uuid`, optionally appending the body as the final chunk. | | DELETE | `/v2//blobs/uploads/` | Blob Upload | Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. | | GET | `/v2/_catalog` | Catalog | Retrieve a sorted, json list of repositories available in the registry. | The detail for each endpoint is covered in the following sections. ### Errors The error codes encountered via the API are enumerated in the following table: |Code|Message|Description| |----|-------|-----------| `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. `MANIFEST_BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a manifest blob is unknown to the registry. `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. `SIZE_INVALID` | provided length did not match content length | When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned. `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. ### Base Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication. #### GET Base Check that the endpoint implements Docker Registry API V2. ``` GET /v2/ Host: Authorization: ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| ###### On Success: OK ``` 200 OK ``` The API implements V2 protocol and is accessible. ###### On Failure: Not Found ``` 404 Not Found ``` The registry does not implement the V2 API. ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ### Tags Retrieve information about tags. #### GET Tags Fetch the tags under the repository identified by `name`. ##### Tags ``` GET /v2//tags/list Host: Authorization: ``` Return all tags for the repository The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| ###### On Success: OK ``` 200 OK Content-Length: Content-Type: application/json; charset=utf-8 { "name": , "tags": [ , ... ] } ``` A list of tags for the named repository. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ##### Tags Paginated ``` GET /v2//tags/list?n=&last= ``` Return a portion of the tags for the specified repository. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`name`|path|Name of the target repository.| |`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| |`last`|query|Result set will include values lexically after last.| ###### On Success: OK ``` 200 OK Content-Length: Link: <?n=&last=>; rel="next" Content-Type: application/json; charset=utf-8 { "name": , "tags": [ , ... ], } ``` A list of tags for the named repository. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| |`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ### Manifest Create, update, delete and retrieve manifests. #### GET Manifest Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. ``` GET /v2//manifests/ Host: Authorization: ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`reference`|path|Tag or digest of the target manifest.| ###### On Success: OK ``` 200 OK Docker-Content-Digest: Content-Type: application/json; charset=utf-8 { "name": , "tag": , "fsLayers": [ { "blobSum": "" }, ... ] ], "history": , "signature": } ``` The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The name or reference was invalid. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | #### PUT Manifest Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest. ``` PUT /v2//manifests/ Host: Authorization: Content-Type: application/json; charset=utf-8 { "name": , "tag": , "fsLayers": [ { "blobSum": "" }, ... ] ], "history": , "signature": } ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`reference`|path|Tag or digest of the target manifest.| ###### On Success: Created ``` 201 Created Location: Content-Length: 0 Docker-Content-Digest: ``` The manifest has been accepted by the registry and is stored under the specified `name` and `tag`. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|The canonical location url of the uploaded manifest.| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Failure: Invalid Manifest ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | | `MANIFEST_INVALID` | manifest invalid | During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation. | | `MANIFEST_UNVERIFIED` | manifest failed signature verification | During manifest upload, if the manifest fails signature verification, this error will be returned. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ###### On Failure: Missing Layer(s) ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": "" } }, ... ] } ``` One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | ###### On Failure: Not allowed ``` 405 Method Not Allowed ``` Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | #### DELETE Manifest Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`. ``` DELETE /v2//manifests/ Host: Authorization: ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`reference`|path|Tag or digest of the target manifest.| ###### On Success: Accepted ``` 202 Accepted ``` ###### On Failure: Invalid Name or Reference ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The specified `name` or `reference` were invalid and the delete was unable to proceed. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `TAG_INVALID` | manifest tag did not match URI | During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ###### On Failure: Unknown Manifest ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `MANIFEST_UNKNOWN` | manifest unknown | This error is returned when the manifest, identified by name and tag is unknown to the repository. | ###### On Failure: Not allowed ``` 405 Method Not Allowed ``` Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | ### Blob Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest. #### GET Blob Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data. ##### Fetch Blob ``` GET /v2//blobs/ Host: Authorization: ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`digest`|path|Digest of desired blob.| ###### On Success: OK ``` 200 OK Content-Length: Docker-Content-Digest: Content-Type: application/octet-stream ``` The blob identified by `digest` is available. The blob content will be present in the body of the request. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|The length of the requested blob content.| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Success: Temporary Redirect ``` 307 Temporary Redirect Location: Docker-Content-Digest: ``` The blob identified by `digest` is available at the provided location. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|The location where the layer should be accessible.| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The blob, identified by `name` and `digest`, is unknown to the registry. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ##### Fetch Blob Part ``` GET /v2//blobs/ Host: Authorization: Range: bytes=- ``` This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Range`|header|HTTP Range header specifying blob chunk.| |`name`|path|Name of the target repository.| |`digest`|path|Digest of desired blob.| ###### On Success: Partial Content ``` 206 Partial Content Content-Length: Content-Range: bytes -/ Content-Type: application/octet-stream ``` The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|The length of the requested blob chunk.| |`Content-Range`|Content range of blob chunk.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | ###### On Failure: Requested Range Not Satisfiable ``` 416 Requested Range Not Satisfiable ``` The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content. ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | #### DELETE Blob Delete the blob identified by `name` and `digest` ``` DELETE /v2//blobs/ Host: Authorization: ``` The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`digest`|path|Digest of desired blob.| ###### On Success: Accepted ``` 202 Accepted Content-Length: 0 Docker-Content-Digest: ``` The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|0| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Failure: Invalid Name or Digest ``` 400 Bad Request ``` The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The blob, identified by `name` and `digest`, is unknown to the registry. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | | `BLOB_UNKNOWN` | blob unknown to registry | This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload. | ###### On Failure: Method Not Allowed ``` 405 Method Not Allowed Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ### Initiate Blob Upload Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads. #### POST Initiate Blob Upload Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request. ##### Initiate Monolithic Blob Upload ``` POST /v2//blobs/uploads/?digest= Host: Authorization: Content-Length: Content-Type: application/octect-stream ``` Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Length`|header|| |`name`|path|Name of the target repository.| |`digest`|query|Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.| ###### On Success: Created ``` 201 Created Location: Content-Length: 0 Docker-Upload-UUID: ``` The blob has been created in the registry and is available at the provided location. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Invalid Name or Digest ``` 400 Bad Request ``` The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | ###### On Failure: Not allowed ``` 405 Method Not Allowed ``` Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ##### Initiate Resumable Blob Upload ``` POST /v2//blobs/uploads/ Host: Authorization: Content-Length: 0 ``` Initiate a resumable blob upload with an empty request body. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| |`name`|path|Name of the target repository.| ###### On Success: Accepted ``` 202 Accepted Content-Length: 0 Location: /v2//blobs/uploads/ Range: 0-0 Docker-Upload-UUID: ``` The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Location`|The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.| |`Range`|Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Invalid Name or Digest ``` 400 Bad Request ``` The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ##### Mount Blob ``` POST /v2//blobs/uploads/?mount=&from= Host: Authorization: Content-Length: 0 ``` Mount a blob identified by the `mount` parameter from another repository. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| |`name`|path|Name of the target repository.| |`mount`|query|Digest of blob to mount from the source repository.| |`from`|query|Name of the source repository.| ###### On Success: Created ``` 201 Created Location: Content-Length: 0 Docker-Upload-UUID: ``` The blob has been mounted in the repository and is available at the provided location. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Invalid Name or Digest ``` 400 Bad Request ``` The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | ###### On Failure: Not allowed ``` 405 Method Not Allowed ``` Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ### Blob Upload Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls. #### GET Blob Upload Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload. ``` GET /v2//blobs/uploads/ Host: Authorization: ``` Retrieve the progress of the current upload, as reported by the `Range` header. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| ###### On Success: Upload Progress ``` 204 No Content Range: 0- Content-Length: 0 Docker-Upload-UUID: ``` The upload is known and in progress. The last received offset is available in the `Range` header. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Range`|Range indicating the current progress of the upload.| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | #### PATCH Blob Upload Upload a chunk of data for the specified upload. ##### Stream upload ``` PATCH /v2//blobs/uploads/ Host: Authorization: Content-Type: application/octet-stream ``` Upload a stream of data to upload without completing the upload. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`name`|path|Name of the target repository.| |`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| ###### On Success: Data Accepted ``` 204 No Content Location: /v2//blobs/uploads/ Range: 0- Content-Length: 0 Docker-Upload-UUID: ``` The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| |`Range`|Range indicating the current progress of the upload.| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ##### Chunked upload ``` PATCH /v2//blobs/uploads/ Host: Authorization: Content-Range: - Content-Length: Content-Type: application/octet-stream ``` Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Range`|header|Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.| |`Content-Length`|header|Length of the chunk being uploaded, corresponding the length of the request body.| |`name`|path|Name of the target repository.| |`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| ###### On Success: Chunk Accepted ``` 204 No Content Location: /v2//blobs/uploads/ Range: 0- Content-Length: 0 Docker-Upload-UUID: ``` The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.| |`Range`|Range indicating the current progress of the upload.| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Upload-UUID`|Identifies the docker upload uuid for the current request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | ###### On Failure: Requested Range Not Satisfiable ``` 416 Requested Range Not Satisfiable ``` The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | #### PUT Blob Upload Complete the upload specified by `uuid`, optionally appending the body as the final chunk. ``` PUT /v2//blobs/uploads/?digest= Host: Authorization: Content-Length: Content-Type: application/octet-stream ``` Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Length`|header|Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.| |`name`|path|Name of the target repository.| |`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| |`digest`|query|Digest of uploaded blob.| ###### On Success: Upload Complete ``` 204 No Content Location: Content-Range: - Content-Length: 0 Docker-Content-Digest: ``` The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Location`|The canonical location of the blob for retrieval| |`Content-Range`|Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| |`Docker-Content-Digest`|Digest of the targeted content for the request.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` There was an error processing the upload and it must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DIGEST_INVALID` | provided digest did not match uploaded content | When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest. | | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | | `UNSUPPORTED` | The operation is unsupported. | The operation was unsupported due to a missing implementation or invalid set of parameters. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The upload is unknown to the registry. The upload must be restarted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | #### DELETE Blob Upload Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout. ``` DELETE /v2//blobs/uploads/ Host: Authorization: Content-Length: 0 ``` Cancel the upload specified by `uuid`. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`Host`|header|Standard HTTP Host Header. Should be set to the registry host.| |`Authorization`|header|An RFC7235 compliant authorization header.| |`Content-Length`|header|The `Content-Length` header must be zero and the body must be empty.| |`name`|path|Name of the target repository.| |`uuid`|path|A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.| ###### On Success: Upload Deleted ``` 204 No Content Content-Length: 0 ``` The upload has been successfully deleted. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|The `Content-Length` header must be zero and the body must be empty.| ###### On Failure: Bad Request ``` 400 Bad Request Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` An error was encountered processing the delete. The client may ignore this error. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_INVALID` | invalid repository name | Invalid repository name encountered either during manifest validation or any API operation. | | `BLOB_UPLOAD_INVALID` | blob upload invalid | The blob upload encountered an error and can no longer proceed. | ###### On Failure: Not Found ``` 404 Not Found Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted. The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `BLOB_UPLOAD_UNKNOWN` | blob upload unknown to registry | If a blob upload has been cancelled or was never started, this error code may be returned. | ###### On Failure: Authentication Required ``` 401 Unauthorized WWW-Authenticate: realm="", ..." Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client is not authenticated. The following headers will be returned on the response: |Name|Description| |----|-----------| |`WWW-Authenticate`|An RFC7235 compliant authentication challenge header.| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `UNAUTHORIZED` | authentication required | The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate. | ###### On Failure: No Such Repository Error ``` 404 Not Found Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The repository is not known to the registry. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `NAME_UNKNOWN` | repository name not known to registry | This is returned if the name used during an operation is unknown to the registry. | ###### On Failure: Access Denied ``` 403 Forbidden Content-Length: Content-Type: application/json; charset=utf-8 { "errors:" [ { "code": , "message": "", "detail": ... }, ... ] } ``` The client does not have required access to the repository. The following headers will be returned on the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| | `DENIED` | requested access to the resource is denied | The access controller denied access for the operation on a resource. | ### Catalog List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available. #### GET Catalog Retrieve a sorted, json list of repositories available in the registry. ##### Catalog Fetch Complete ``` GET /v2/_catalog ``` Request an unabridged list of repositories available. ###### On Success: OK ``` 200 OK Content-Length: Content-Type: application/json; charset=utf-8 { "repositories": [ , ... ] } ``` Returns the unabridged list of repositories as a json response. The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| ##### Catalog Fetch Paginated ``` GET /v2/_catalog?n=&last= ``` Return the specified portion of repositories. The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| |`n`|query|Limit the number of entries in each response. It not present, all entries will be returned.| |`last`|query|Result set will include values lexically after last.| ###### On Success: OK ``` 200 OK Content-Length: Link: <?n=&last=>; rel="next" Content-Type: application/json; charset=utf-8 { "repositories": [ , ... ] "next": "?last=&n=" } ``` The following headers will be returned with the response: |Name|Description| |----|-----------| |`Content-Length`|Length of the JSON response body.| |`Link`|RFC5988 compliant rel='next' with URL to next result set, if available| distribution-2.3.0/docs/spec/api.md.tmpl000066400000000000000000001226111265472114500201750ustar00rootroot00000000000000 # Docker Registry HTTP API V2 ## Introduction The _Docker Registry HTTP API_ is the protocol to facilitate distribution of images to the docker engine. It interacts with instances of the docker registry, which is a service to manage information about docker images and enable their distribution. The specification covers the operation of version 2 of this API, known as _Docker Registry HTTP API V2_. While the V1 registry protocol is usable, there are several problems with the architecture that have led to this new version. The main driver of this specification these changes to the docker the image format, covered in [docker/docker#8093](https://github.com/docker/docker/issues/8093). The new, self-contained image manifest simplifies image definition and improves security. This specification will build on that work, leveraging new properties of the manifest format to improve performance, reduce bandwidth usage and decrease the likelihood of backend corruption. For relevant details and history leading up to this specification, please see the following issues: - [docker/docker#8093](https://github.com/docker/docker/issues/8093) - [docker/docker#9015](https://github.com/docker/docker/issues/9015) - [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) ### Scope This specification covers the URL layout and protocols of the interaction between docker registry and docker core. This will affect the docker core registry API and the rewrite of docker-registry. Docker registry implementations may implement other API endpoints, but they are not covered by this specification. This includes the following features: - Namespace-oriented URI Layout - PUSH/PULL registry server for V2 image manifest format - Resumable layer PUSH support - V2 Client library implementation While authentication and authorization support will influence this specification, details of the protocol will be left to a future specification. Relevant header definitions and error codes are present to provide an indication of what a client may encounter. #### Future There are features that have been discussed during the process of cutting this specification. The following is an incomplete list: - Immutable image references - Multiple architecture support - Migration from v2compatibility representation These may represent features that are either out of the scope of this specification, the purview of another specification or have been deferred to a future version. ### Use Cases For the most part, the use cases of the former registry API apply to the new version. Differentiating use cases are covered below. #### Image Verification A docker engine instance would like to run verified image named "library/ubuntu", with the tag "latest". The engine contacts the registry, requesting the manifest for "library/ubuntu:latest". An untrusted registry returns a manifest. Before proceeding to download the individual layers, the engine verifies the manifest's signature, ensuring that the content was produced from a trusted source and no tampering has occured. After each layer is downloaded, the engine verifies the digest of the layer, ensuring that the content matches that specified by the manifest. #### Resumable Push Company X's build servers lose connectivity to docker registry before completing an image layer transfer. After connectivity returns, the build server attempts to re-upload the image. The registry notifies the build server that the upload has already been partially attempted. The build server responds by only sending the remaining data to complete the image file. #### Resumable Pull Company X is having more connectivity problems but this time in their deployment datacenter. When downloading an image, the connection is interrupted before completion. The client keeps the partial data and uses http `Range` requests to avoid downloading repeated data. #### Layer Upload De-duplication Company Y's build system creates two identical docker layers from build processes A and B. Build process A completes uploading the layer before B. When process B attempts to upload the layer, the registry indicates that its not necessary because the layer is already known. If process A and B upload the same layer at the same time, both operations will proceed and the first to complete will be stored in the registry (Note: we may modify this to prevent dogpile with some locking mechanism). ### Changes The V2 specification has been written to work as a living document, specifying only what is certain and leaving what is not specified open or to future changes. Only non-conflicting additions should be made to the API and accepted changes should avoid preventing future changes from happening. This section should be updated when changes are made to the specification, indicating what is different. Optionally, we may start marking parts of the specification to correspond with the versions enumerated here. Each set of changes is given a letter corresponding to a set of modifications that were applied to the baseline specification. These are merely for reference and shouldn't be used outside the specification other than to identify a set of modifications.
j
  • Add ability to mount blobs across repositories.
i
  • Clarified expected behavior response to manifest HEAD request.
h
  • All mention of tarsum removed.
g
  • Clarify behavior of pagination behavior with unspecified parameters.
f
  • Specify the delete API for layers and manifests.
e
  • Added support for listing registry contents.
  • Added pagination to tags API.
  • Added common approach to support pagination.
d
  • Allow repository name components to be one character.
  • Clarified that single component names are allowed.
c
  • Added section covering digest format.
  • Added more clarification that manifest cannot be deleted by tag.
b
  • Added capability of doing streaming upload to PATCH blob upload.
  • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
  • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
a
  • Added support for immutable manifest references in manifest endpoints.
  • Deleting a manifest by tag has been deprecated.
  • Specified `Docker-Content-Digest` header for appropriate entities.
  • Added error code for unsupported operations.
## Overview This section covers client flows and details of the API endpoints. The URI layout of the new API is structured to support a rich authentication and authorization model by leveraging namespaces. All endpoints will be prefixed by the API version and the repository name: /v2// For example, an API endpoint that will work with the `library/ubuntu` repository, the URI prefix will be: /v2/library/ubuntu/ This scheme provides rich access control over various operations and methods using the URI prefix and http methods that can be controlled in variety of ways. Classically, repository names have always been two path components where each path component is less than 30 characters. The V2 registry API does not enforce this. The rules for a repository name are as follows: 1. A repository name is broken up into _path components_. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. 2. If a repository name has two or more path components, they must be separated by a forward slash ("/"). 3. The total length of a repository name, including slashes, must be less the 256 characters. These name requirements _only_ apply to the registry API and should accept a superset of what is supported by other docker ecosystem components. All endpoints should support aggressive http caching, compression and range headers, where appropriate. The new API attempts to leverage HTTP semantics where possible but may break from standards to implement targeted features. For detail on individual endpoints, please see the [_Detail_](#detail) section. ### Errors Actionable failure conditions, covered in detail in their relevant sections, are reported as part of 4xx responses, in a json response body. One or more errors will be returned in the following format: { "errors:" [{ "code": , "message": , "detail": }, ... ] } The `code` field will be a unique identifier, all caps with underscores by convention. The `message` field will be a human readable string. The optional `detail` field may contain arbitrary json data providing information the client can use to resolve the issue. While the client can take action on certain error codes, the registry may add new error codes over time. All client implementations should treat unknown error codes as `UNKNOWN`, allowing future error codes to be added without breaking API compatibility. For the purposes of the specification error codes will only be added and never removed. For a complete account of all error codes, please see the _Detail_ section. ### API Version Check A minimal endpoint, mounted at `/v2/` will provide version support information based on its response statuses. The request format is as follows: GET /v2/ If a `200 OK` response is returned, the registry implements the V2(.1) registry API and the client may proceed safely with other V2 operations. Optionally, the response may contain information about the supported paths in the response body. The client should be prepared to ignore this data. If a `401 Unauthorized` response is returned, the client should take action based on the contents of the "WWW-Authenticate" header and try the endpoint again. Depending on access control setup, the client may still have to authenticate against different resources, even if this check succeeds. If `404 Not Found` response status, or other unexpected status, is returned, the client should proceed with the assumption that the registry does not implement V2 of the API. When a `200 OK` or `401 Unauthorized` response is returned, the "Docker-Distribution-API-Version" header should be set to "registry/2.0". Clients may require this header value to determine if the endpoint serves this API. When this header is omitted, clients may fallback to an older API version. ### Content Digests This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). The core of this design is the concept of a content addressable identifier. It uniquely identifies content by taking a collision-resistant hash of the bytes. Such an identifier can be independently calculated and verified by selection of a common _algorithm_. If such an identifier can be communicated in a secure manner, one can retrieve the content from an insecure source, calculate it independently and be certain that the correct content was obtained. Put simply, the identifier is a property of the content. To disambiguate from other concepts, we call this identifier a _digest_. A _digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ portion. The _algorithm_ identifies the methodology used to calculate the digest. The _hex_ portion is the hex-encoded result of the hash. We define a _digest_ string to match the following grammar: ``` digest := algorithm ":" hex algorithm := /[A-Fa-f0-9_+.-]+/ hex := /[A-Fa-f0-9]+/ ``` Some examples of _digests_ include the following: digest | description | ----------------------------------------------------------------------------------|------------------------------------------------ sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | While the _algorithm_ does allow one to implement a wide variety of algorithms, compliant implementations should use sha256. Heavy processing of input before calculating a hash is discouraged to avoid degrading the uniqueness of the _digest_ but some canonicalization may be performed to ensure consistent identifiers. Let's use a simple example in pseudo-code to demonstrate a digest calculation: ``` let C = 'a small string' let B = sha256(C) let D = 'sha256:' + EncodeHex(B) let ID(C) = D ``` Above, we have bytestring `C` passed into a function, `SHA256`, that returns a bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` as equal to `D`. A digest can be verified by independently calculating `D` and comparing it with identifier `ID(C)`. #### Digest Header To provide verification of http content, any response may include a `Docker- Content-Digest` header. This will include the digest of the target entity returned in the response. For blobs, this is the entire blob content. For manifests, this is the manifest body without the signature content, also known as the JWS payload. Note that the commonly used canonicalization for digest calculation may be dependent on the mediatype of the content, such as with manifests. The client may choose to ignore the header or may verify it to ensure content integrity and transport security. This is most important when fetching by a digest. To ensure security, the content should be verified against the digest used to fetch the content. At times, the returned digest may differ from that used to initiate a request. Such digests are considered to be from different _domains_, meaning they have different values for _algorithm_. In such a case, the client may choose to verify the digests in both domains or ignore the server's digest. To maintain security, the client _must_ always verify the content against the _digest_ used to fetch the content. > __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use > the same digest used to fetch the content to verify it. The header `Docker- > Content-Digest` should not be trusted over the "local" digest. ### Pulling An Image An "image" is a combination of a JSON manifest and individual layer files. The process of pulling an image centers around retrieving these two components. The first step in pulling an image is to retrieve the manifest. For reference, the relevant manifest fields for the registry are the following: field | description | ----------|------------------------------------------------| name | The name of the image. | tag | The tag for this version of the image. | fsLayers | A list of layer descriptors (including digest) | signature | A JWS used to verify the manifest content | For more information about the manifest format, please see [docker/docker#8093](https://github.com/docker/docker/issues/8093). When the manifest is in hand, the client must verify the signature to ensure the names and layers are valid. Once confirmed, the client will then use the digests to download the individual layers. Layers are stored in as blobs in the V2 registry API, keyed by their digest. #### Pulling an Image Manifest The image manifest can be fetched with the following url: ``` GET /v2//manifests/ ``` The `name` and `reference` parameter identify the image and are required. The reference may include a tag or digest. A `404 Not Found` response will be returned if the image is unknown to the registry. If the image exists and the response is successful, the image manifest will be returned, with the following format (see docker/docker#8093 for details): { "name": , "tag": , "fsLayers": [ { "blobSum": }, ... ] ], "history": , "signature": } The client should verify the returned manifest signature for authenticity before fetching layers. ##### Existing Manifests The image manifest can be checked for existence with the following url: ``` HEAD /v2//manifests/ ``` The `name` and `reference` parameter identify the image and are required. The reference may include a tag or digest. A `404 Not Found` response will be returned if the image is unknown to the registry. If the image exists and the response is successful the response will be as follows: ``` 200 OK Content-Length: Docker-Content-Digest: ``` #### Pulling a Layer Layers are stored in the blob portion of the registry, keyed by digest. Pulling a layer is carried out by a standard http request. The URL is as follows: GET /v2//blobs/ Access to a layer will be gated by the `name` of the repository but is identified uniquely in the registry by `digest`. This endpoint may issue a 307 (302 for /blobs/uploads/ ``` The parameters of this request are the image namespace under which the layer will be linked. Responses to this request are covered below. ##### Existing Layers The existence of a layer can be checked via a `HEAD` request to the blob store API. The request should be formatted as follows: ``` HEAD /v2//blobs/ ``` If the layer with the digest specified in `digest` is available, a 200 OK response will be received, with no actual body content (this is according to http specification). The response will look as follows: ``` 200 OK Content-Length: Docker-Content-Digest: ``` When this response is received, the client can assume that the layer is already available in the registry under the given name and should take no further action to upload the layer. Note that the binary digests may differ for the existing registry layer, but the digests will be guaranteed to match. ##### Uploading the Layer If the POST request is successful, a `202 Accepted` response will be returned with the upload URL in the `Location` header: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` The rest of the upload process can be carried out with the returned url, called the "Upload URL" from the `Location` header. All responses to the upload url, whether sending data or getting status, will be in this format. Though the URI format (`/v2//blobs/uploads/`) for the `Location` header is specified, clients should treat it as an opaque url and should never try to assemble the it. While the `uuid` parameter may be an actual UUID, this proposal imposes no constraints on the format and clients should never impose any. If clients need to correlate local upload state with remote upload state, the contents of the `Docker-Upload-UUID` header should be used. Such an id can be used to key the last used location header when implementing resumable uploads. ##### Upload Progress The progress and chunk coordination of the upload process will be coordinated through the `Range` header. While this is a non-standard use of the `Range` header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. For an upload that just started, for an example with a 1000 byte layer file, the `Range` header would be as follows: ``` Range: bytes=0-0 ``` To get the status of an upload, issue a GET request to the upload URL: ``` GET /v2//blobs/uploads/ Host: ``` The response will be similar to the above, except will return 204 status: ``` 204 No Content Location: /v2//blobs/uploads/ Range: bytes=0- Docker-Upload-UUID: ``` Note that the HTTP `Range` header byte ranges are inclusive and that will be honored, even in non-standard use cases. ##### Monolithic Upload A monolithic upload is simply a chunked upload with a single chunk and may be favored by clients that would like to avoided the complexity of chunking. To carry out a "monolithic" upload, one can simply put the entire content blob to the provided URL: ``` PUT /v2//blobs/uploads/?digest= Content-Length: Content-Type: application/octet-stream ``` The "digest" parameter must be included with the PUT request. Please see the _Completed Upload_ section for details on the parameters and expected responses. Additionally, the upload can be completed with a single `POST` request to the uploads endpoint, including the "size" and "digest" parameters: ``` POST /v2//blobs/uploads/?digest= Content-Length: Content-Type: application/octet-stream ``` On the registry service, this should allocate a download, accept and verify the data and return the same response as the final chunk of an upload. If the POST request fails collecting the data in any way, the registry should attempt to return an error response to the client with the `Location` header providing a place to continue the download. The single `POST` method is provided for convenience and most clients should implement `POST` + `PUT` to support reliable resume of uploads. ##### Chunked Upload To carry out an upload of a chunk, the client can specify a range header and only include that part of the layer file: ``` PATCH /v2//blobs/uploads/ Content-Length: Content-Range: - Content-Type: application/octet-stream ``` There is no enforcement on layer chunk splits other than that the server must receive them in order. The server may enforce a minimum chunk size. If the server cannot accept the chunk, a `416 Requested Range Not Satisfiable` response will be returned and will include a `Range` header indicating the current status: ``` 416 Requested Range Not Satisfiable Location: /v2//blobs/uploads/ Range: 0- Content-Length: 0 Docker-Upload-UUID: ``` If this response is received, the client should resume from the "last valid range" and upload the subsequent chunk. A 416 will be returned under the following conditions: - Invalid Content-Range header format - Out of order chunk: the range of the next chunk must start immediately after the "last valid range" from the previous response. When a chunk is accepted as part of the upload, a `202 Accepted` response will be returned, including a `Range` header with the current upload status: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` ##### Completed Upload For an upload to be considered complete, the client must submit a `PUT` request on the upload endpoint with a digest parameter. If it is not provided, the upload will not be considered complete. The format for the final chunk will be as follows: ``` PUT /v2//blob/uploads/?digest= Content-Length: Content-Range: - Content-Type: application/octet-stream ``` Optionally, if all chunks have already been uploaded, a `PUT` request with a `digest` parameter and zero-length body may be sent to complete and validated the upload. Multiple "digest" parameters may be provided with different digests. The server may verify none or all of them but _must_ notify the client if the content is rejected. When the last chunk is received and the layer has been validated, the client will receive a `201 Created` response: ``` 201 Created Location: /v2//blobs/ Content-Length: 0 Docker-Content-Digest: ``` The `Location` header will contain the registry URL to access the accepted layer file. The `Docker-Content-Digest` header returns the canonical digest of the uploaded blob which may differ from the provided digest. Most clients may ignore the value but if it is used, the client should verify the value against the uploaded blob data. ###### Digest Parameter The "digest" parameter is designed as an opaque parameter to support verification of a successful transfer. For example, a HTTP URI parameter might be as follows: ``` sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b ``` Given this parameter, the registry will verify that the provided content does match this digest. ##### Canceling an Upload An upload can be cancelled by issuing a DELETE request to the upload endpoint. The format will be as follows: ``` DELETE /v2//blobs/uploads/ ``` After this request is issued, the upload uuid will no longer be valid and the registry server will dump all intermediate data. While uploads will time out if not completed, clients should issue this request if they encounter a fatal error but still have the ability to issue an http request. ##### Cross Repository Blob Mount A blob may be mounted from another repository that the client has read access to, removing the need to upload a blob already known to the registry. To issue a blob mount instead of an upload, a POST request should be issued in the following format: ``` POST /v2//blobs/uploads/?mount=&from= Content-Length: 0 ``` If the blob is successfully mounted, the client will receive a `201 Created` response: ``` 201 Created Location: /v2//blobs/ Content-Length: 0 Docker-Content-Digest: ``` The `Location` header will contain the registry URL to access the accepted layer file. The `Docker-Content-Digest` header returns the canonical digest of the uploaded blob which may differ from the provided digest. Most clients may ignore the value but if it is used, the client should verify the value against the uploaded blob data. If a mount fails due to invalid repository or digest arguments, the registry will fall back to the standard upload behavior and return a `202 Accepted` with the upload URL in the `Location` header: ``` 202 Accepted Location: /v2//blobs/uploads/ Range: bytes=0- Content-Length: 0 Docker-Upload-UUID: ``` This behavior is consistent with older versions of the registry, which do not recognize the repository mount query parameters. Note: a client may issue a HEAD request to check existence of a blob in a source repository to distinguish between the registry not supporting blob mounts and the blob not existing in the expected repository. ##### Errors If an 502, 503 or 504 error is received, the client should assume that the download can proceed due to a temporary condition, honoring the appropriate retry mechanism. Other 5xx errors should be treated as terminal. If there is a problem with the upload, a 4xx error will be returned indicating the problem. After receiving a 4xx response (except 416, as called out above), the upload will be considered failed and the client should take appropriate action. Note that the upload url will not be available forever. If the upload uuid is unknown to the registry, a `404 Not Found` response will be returned and the client must restart the upload process. ### Deleting a Layer A layer may be deleted from the registry via its `name` and `digest`. A delete may be issued with the following request format: DELETE /v2//blobs/ If the blob exists and has been successfully deleted, the following response will be issued: 202 Accepted Content-Length: None If the blob had already been deleted or did not exist, a `404 Not Found` response will be issued instead. If a layer is deleted which is referenced by a manifest in the registry, then the complete images will not be resolvable. #### Pushing an Image Manifest Once all of the layers for an image are uploaded, the client can upload the image manifest. An image can be pushed using the following request format: PUT /v2//manifests/ { "name": , "tag": , "fsLayers": [ { "blobSum": }, ... ] ], "history": , "signature": , ... } The `name` and `reference` fields of the response body must match those specified in the URL. The `reference` field may be a "tag" or a "digest". If there is a problem with pushing the manifest, a relevant 4xx response will be returned with a JSON error message. Please see the _PUT Manifest section for details on possible error codes that may be returned. If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are returned. The `detail` field of the error response will have a `digest` field identifying the missing blob. An error is returned for each unknown blob. The response format is as follows: { "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": } }, ... ] } ### Listing Repositories Images are stored in collections, known as a _repository_, which is keyed by a `name`, as seen throughout the API specification. A registry instance may contain several repositories. The list of available repositories is made available through the _catalog_. The catalog for a given registry can be retrieved with the following request: ``` GET /v2/_catalog ``` The response will be in the following format: ``` 200 OK Content-Type: application/json { "repositories": [ , ... ] } ``` Note that the contents of the response are specific to the registry implementation. Some registries may opt to provide a full catalog output, limit it based on the user's access level or omit upstream results, if providing mirroring functionality. Subsequently, the presence of a repository in the catalog listing only means that the registry *may* provide access to the repository at the time of the request. Conversely, a missing entry does *not* mean that the registry does not have the repository. More succinctly, the presence of a repository only guarantees that it is there but not that it is _not_ there. For registries with a large number of repositories, this response may be quite large. If such a response is expected, one should use pagination. A registry may also limit the amount of responses returned even if pagination was not explicitly requested. In this case the `Link` header will be returned along with the results, and subsequent results can be obtained by following the link as if pagination had been initially requested. For details of the `Link` header, please see the _Pagination_ section. #### Pagination Paginated catalog results can be retrieved by adding an `n` parameter to the request URL, declaring that the response should be limited to `n` results. Starting a paginated flow begins as follows: ``` GET /v2/_catalog?n= ``` The above specifies that a catalog response should be returned, from the start of the result set, ordered lexically, limiting the number of results to `n`. The response to such a request would look as follows: ``` 200 OK Content-Type: application/json Link: <?n=&last=>; rel="next" { "repositories": [ , ... ] } ``` The above includes the _first_ `n` entries from the result set. To get the _next_ `n` entries, one can create a URL where the argument `last` has the value from `repositories[len(repositories)-1]`. If there are indeed more results, the URL for the next block is encoded in an [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" relation. The presence of the `Link` header communicates to the client that the entire result set has not been returned and another request must be issued. If the header is not present, the client can assume that all results have been recieved. > __NOTE:__ In the request template above, note that the brackets > are required. For example, if the url is > `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would > be `; rel="next"`. Please see > [RFC5988](https://tools.ietf.org/html/rfc5988) for details. Compliant client implementations should always use the `Link` header value when proceeding through results linearly. The client may construct URLs to skip forward in the catalog. To get the next result set, a client would issue the request as follows, using the URL encoded in the described `Link` header: ``` GET /v2/_catalog?n=&last= ``` The above process should then be repeated until the `Link` header is no longer set. The catalog result set is represented abstractly as a lexically sorted list, where the position in that list can be specified by the query term `last`. The entries in the response start _after_ the term specified by `last`, up to `n` entries. The behavior of `last` is quite simple when demonstrated with an example. Let us say the registry has the following repositories: ``` a b c d ``` If the value of `n` is 2, _a_ and _b_ will be returned on the first response. The `Link` header returned on the response will have `n` set to 2 and last set to _b_: ``` Link: <?n=2&last=b>; rel="next" ``` The client can then issue the request with above value from the `Link` header, receiving the values _c_ and _d_. Note that n may change on second to last response or be omitted fully, if the server may so choose. ### Listing Image Tags It may be necessary to list all of the tags under a given repository. The tags for an image repository can be retrieved with the following request: GET /v2//tags/list The response will be in the following format: 200 OK Content-Type: application/json { "name": , "tags": [ , ... ] } For repositories with a large number of tags, this response may be quite large. If such a response is expected, one should use the pagination. #### Pagination Paginated tag results can be retrieved by adding the appropriate parameters to the request URL described above. The behavior of tag pagination is identical to that specified for catalog pagination. We cover a simple flow to highlight any differences. Starting a paginated flow may begin as follows: ``` GET /v2//tags/list?n= ``` The above specifies that a tags response should be returned, from the start of the result set, ordered lexically, limiting the number of results to `n`. The response to such a request would look as follows: ``` 200 OK Content-Type: application/json Link: <?n=&last=>; rel="next" { "name": , "tags": [ , ... ] } ``` To get the next result set, a client would issue the request as follows, using the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header: ``` GET /v2//tags/list?n=&last= ``` The above process should then be repeated until the `Link` header is no longer set in the response. The behavior of the `last` parameter, the provided response result, lexical ordering and encoding of the `Link` header are identical to that of catalog pagination. ### Deleting an Image An image may be deleted from the registry via its `name` and `reference`. A delete may be issued with the following request format: DELETE /v2//manifests/ For deletes, `reference` *must* be a digest or the delete will fail. If the image exists and has been successfully deleted, the following response will be issued: 202 Accepted Content-Length: None If the image had already been deleted or did not exist, a `404 Not Found` response will be issued instead. ## Detail > **Note**: This section is still under construction. For the purposes of > implementation, if any details below differ from the described request flows > above, the section below should be corrected. When they match, this note > should be removed. The behavior of the endpoints are covered in detail in this section, organized by route and entity. All aspects of the request and responses are covered, including headers, parameters and body formats. Examples of requests and their corresponding responses, with success and failure, are enumerated. > **Note**: The sections on endpoint detail are arranged with an example > request, a description of the request, followed by information about that > request. A list of methods and URIs are covered in the table below: |Method|Path|Entity|Description| |------|----|------|-----------| {{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | {{end}}{{end}} The detail for each endpoint is covered in the following sections. ### Errors The error codes encountered via the API are enumerated in the following table: |Code|Message|Description| |----|-------|-----------| {{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} {{end}} {{range $route := .RouteDescriptors}} ### {{.Entity}} {{.Description}} {{range $method := $route.Methods}} #### {{.Method}} {{$route.Entity}} {{.Description}} {{if .Requests}}{{range .Requests}}{{if .Name}} ##### {{.Name}}{{end}} ``` {{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} {{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} {{.Body.Format}}{{end}} ``` {{.Description}} {{if or .Headers .PathParameters .QueryParameters}} The following parameters should be specified on the request: |Name|Kind|Description| |----|----|-----------| {{range .Headers}}|`{{.Name}}`|header|{{.Description}}| {{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| {{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| {{end}}{{end}} {{if .Successes}} {{range .Successes}} ###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} ``` {{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} {{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} {{.Body.Format}}{{end}} ``` {{.Description}} {{if .Fields}}The following fields may be returned in the response body: |Name|Description| |----|-----------| {{range .Fields}}|`{{.Name}}`|{{.Description}}| {{end}}{{end}}{{if .Headers}} The following headers will be returned with the response: |Name|Description| |----|-----------| {{range .Headers}}|`{{.Name}}`|{{.Description}}| {{end}}{{end}}{{end}}{{end}} {{if .Failures}} {{range .Failures}} ###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} ``` {{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} {{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} {{.Body.Format}}{{end}} ``` {{.Description}} {{if .Headers}} The following headers will be returned on the response: |Name|Description| |----|-----------| {{range .Headers}}|`{{.Name}}`|{{.Description}}| {{end}}{{end}} {{if .ErrorCodes}} The error codes that may be included in the response body are enumerated below: |Code|Message|Description| |----|-------|-----------| {{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | {{end}} {{end}}{{end}}{{end}}{{end}}{{end}}{{end}} {{end}} distribution-2.3.0/docs/spec/auth/000077500000000000000000000000001265472114500170655ustar00rootroot00000000000000distribution-2.3.0/docs/spec/auth/index.md000066400000000000000000000007021265472114500205150ustar00rootroot00000000000000 # Docker Registry v2 authentication See the [Token Authentication Specification](token.md) and [Token Authentication Implementation](jwt.md) for more information. distribution-2.3.0/docs/spec/auth/jwt.md000066400000000000000000000330611265472114500202160ustar00rootroot00000000000000 # Docker Registry v2 Bearer token specification This specification covers the `docker/distribution` implementation of the v2 Registry's authentication schema. Specifically, it describes the JSON Web Token schema that `docker/distribution` has adopted to implement the client-opaque Bearer token issued by an authentication service and understood by the registry. This document borrows heavily from the [JSON Web Token Draft Spec](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32) ## Getting a Bearer Token For this example, the client makes an HTTP GET request to the following URL: ``` https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push ``` The token server should first attempt to authenticate the client using any authentication credentials provided with the request. As of Docker 1.8, the registry client in the Docker Engine only supports Basic Authentication to these token servers. If an attempt to authenticate to the token server fails, the token server should return a `401 Unauthorized` response indicating that the provided credentials are invalid. Whether the token server requires authentication is up to the policy of that access control provider. Some requests may require authentication to determine access (such as pushing or pulling a private repository) while others may not (such as pulling from a public repository). After authenticating the client (which may simply be an anonymous client if no attempt was made to authenticate), the token server must next query its access control list to determine whether the client has the requested scope. In this example request, if I have authenticated as user `jlhawn`, the token server will determine what access I have to the repository `samalba/my-app` hosted by the entity `registry.docker.io`. Once the token server has determined what access the client has to the resources requested in the `scope` parameter, it will take the intersection of the set of requested actions on each resource and the set of actions that the client has in fact been granted. If the client only has a subset of the requested access **it must not be considered an error** as it is not the responsibility of the token server to indicate authorization errors as part of this workflow. Continuing with the example request, the token server will find that the client's set of granted access to the repository is `[pull, push]` which when intersected with the requested access `[pull, push]` yields an equal set. If the granted access set was found only to be `[pull]` then the intersected set would only be `[pull]`. If the client has no access to the repository then the intersected set would be empty, `[]`. It is this intersected set of access which is placed in the returned token. The server will now construct a JSON Web Token to sign and return. A JSON Web Token has 3 main parts: 1. Headers The header of a JSON Web Token is a standard JOSE header. The "typ" field will be "JWT" and it will also contain the "alg" which identifies the signing algorithm used to produce the signature. It will also usually have a "kid" field, the ID of the key which was used to sign the token. Here is an example JOSE Header for a JSON Web Token (formatted with whitespace for readability): ``` { "typ": "JWT", "alg": "ES256", "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6" } ``` It specifies that this object is going to be a JSON Web token signed using the key with the given ID using the Elliptic Curve signature algorithm using a SHA256 hash. 2. Claim Set The Claim Set is a JSON struct containing these standard registered claim name fields:
iss (Issuer)
The issuer of the token, typically the fqdn of the authorization server.
sub (Subject)
The subject of the token; the name or id of the client which requested it. This should be empty (`""`) if the client did not authenticate.
aud (Audience)
The intended audience of the token; the name or id of the service which will verify the token to authorize the client/subject.
exp (Expiration)
The token should only be considered valid up to this specified date and time.
nbf (Not Before)
The token should not be considered valid before this specified date and time.
iat (Issued At)
Specifies the date and time which the Authorization server generated this token.
jti (JWT ID)
A unique identifier for this token. Can be used by the intended audience to prevent replays of the token.
The Claim Set will also contain a private claim name unique to this authorization server specification:
access
An array of access entry objects with the following fields:
type
The type of resource hosted by the service.
name
The name of the resource of the given type hosted by the service.
actions
An array of strings which give the actions authorized on this resource.
Here is an example of such a JWT Claim Set (formatted with whitespace for readability): ``` { "iss": "auth.docker.com", "sub": "jlhawn", "aud": "registry.docker.com", "exp": 1415387315, "nbf": 1415387015, "iat": 1415387015, "jti": "tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws", "access": [ { "type": "repository", "name": "samalba/my-app", "actions": [ "pull", "push" ] } ] } ``` 3. Signature The authorization server will produce a JOSE header and Claim Set with no extraneous whitespace, i.e., the JOSE Header from above would be ``` {"typ":"JWT","alg":"ES256","kid":"PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6"} ``` and the Claim Set from above would be ``` {"iss":"auth.docker.com","sub":"jlhawn","aud":"registry.docker.com","exp":1415387315,"nbf":1415387015,"iat":1415387015,"jti":"tYJCO1c6cnyy7kAn0c7rKPgbV1H1bFws","access":[{"type":"repository","name":"samalba/my-app","actions":["push","pull"]}]} ``` The utf-8 representation of this JOSE header and Claim Set are then url-safe base64 encoded (sans trailing '=' buffer), producing: ``` eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0 ``` for the JOSE Header and ``` eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 ``` for the Claim Set. These two are concatenated using a '.' character, yielding the string: ``` eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0 ``` This is then used as the payload to a the `ES256` signature algorithm specified in the JOSE header and specified fully in [Section 3.4 of the JSON Web Algorithms (JWA) draft specification](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-38#section-3.4) This example signature will use the following ECDSA key for the server: ``` { "kty": "EC", "crv": "P-256", "kid": "PYYO:TEWU:V7JH:26JV:AQTZ:LJC3:SXVJ:XGHA:34F2:2LAQ:ZRMK:Z7Q6", "d": "R7OnbfMaD5J2jl7GeE8ESo7CnHSBm_1N2k9IXYFrKJA", "x": "m7zUpx3b-zmVE5cymSs64POG9QcyEpJaYCD82-549_Q", "y": "dU3biz8sZ_8GPB-odm8Wxz3lNDr1xcAQQPQaOcr1fmc" } ``` A resulting signature of the above payload using this key is: ``` QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w ``` Concatenating all of these together with a `.` character gives the resulting JWT: ``` eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w ``` This can now be placed in an HTTP response and returned to the client to use to authenticate to the audience service: ``` HTTP/1.1 200 OK Content-Type: application/json {"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w"} ``` ## Using the signed token Once the client has a token, it will try the registry request again with the token placed in the HTTP `Authorization` header like so: ``` Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw ``` This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) ## Verifying the token The registry must now verify the token presented by the user by inspecting the claim set within. The registry will: - Ensure that the issuer (`iss` claim) is an authority it trusts. - Ensure that the registry identifies as the audience (`aud` claim). - Check that the current time is between the `nbf` and `exp` claim times. - If enforcing single-use tokens, check that the JWT ID (`jti` claim) value has not been seen before. - To enforce this, the registry may keep a record of `jti`s it has seen for up to the `exp` time of the token to prevent token replays. - Check the `access` claim value and use the identified resources and the list of actions authorized to determine whether the token grants the required level of access for the operation the client is attempting to perform. - Verify that the signature of the token is valid. If any of these requirements are not met, the registry will return a `403 Forbidden` response to indicate that the token is invalid. **Note**: it is only at this point in the workflow that an authorization error may occur. The token server should *not* return errors when the user does not have the requested authorization. Instead, the returned token should indicate whatever of the requested scope the client does have (the intersection of requested and granted access). If the token does not supply proper authorization then the registry will return the appropriate error. At no point in this process should the registry need to call back to the authorization server. The registry only needs to be supplied with the trusted public keys to verify the token signatures. distribution-2.3.0/docs/spec/auth/token.md000066400000000000000000000242771265472114500205430ustar00rootroot00000000000000 # Docker Registry v2 authentication via central service This document outlines the v2 Docker registry authentication scheme: ![v2 registry auth](https://docs.google.com/drawings/d/1EHZU9uBLmcH0kytDClBv6jv6WR4xZjE8RKEUw1mARJA/pub?w=480&h=360) 1. Attempt to begin a push/pull operation with the registry. 2. If the registry requires authorization it will return a `401 Unauthorized` HTTP response with information on how to authenticate. 3. The registry client makes a request to the authorization service for a Bearer token. 4. The authorization service returns an opaque Bearer token representing the client's authorized access. 5. The client retries the original request with the Bearer token embedded in the request's Authorization header. 6. The Registry authorizes the client by validating the Bearer token and the claim set embedded within it and begins the push/pull session as usual. ## Requirements - Registry clients which can understand and respond to token auth challenges returned by the resource server. - An authorization server capable of managing access controls to their resources hosted by any given service (such as repositories in a Docker Registry). - A Docker Registry capable of trusting the authorization server to sign tokens which clients can use for authorization and the ability to verify these tokens for single use or for use during a sufficiently short period of time. ## Authorization Server Endpoint Descriptions The described server is meant to serve as a standalone access control manager for resources hosted by other services which wish to authenticate and manage authorizations using a separate access control manager. A service like this is used by the official Docker Registry to authenticate clients and verify their authorization to Docker image repositories. As of Docker 1.6, the registry client within the Docker Engine has been updated to handle such an authorization workflow. ## How to authenticate Registry V1 clients first contact the index to initiate a push or pull. Under the Registry V2 workflow, clients should contact the registry first. If the registry server requires authentication it will return a `401 Unauthorized` response with a `WWW-Authenticate` header detailing how to authenticate to this registry. For example, say I (username `jlhawn`) am attempting to push an image to the repository `samalba/my-app`. For the registry to authorize this, I will need `push` access to the `samalba/my-app` repository. The registry will first return this response: ``` HTTP/1.1 401 Unauthorized Content-Type: application/json; charset=utf-8 Docker-Distribution-Api-Version: registry/2.0 Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" Date: Thu, 10 Sep 2015 19:32:31 GMT Content-Length: 235 Strict-Transport-Security: max-age=31536000 {"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":[{"Type":"repository","Name":"samalba/my-app","Action":"pull"},{"Type":"repository","Name":"samalba/my-app","Action":"push"}]}]} ``` Note the HTTP Response Header indicating the auth challenge: ``` Www-Authenticate: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:samalba/my-app:pull,push" ``` This format is documented in [Section 3 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-3) This challenge indicates that the registry requires a token issued by the specified token server and that the request the client is attempting will need to include sufficient access entries in its claim set. To respond to this challenge, the client will need to make a `GET` request to the URL `https://auth.docker.io/token` using the `service` and `scope` values from the `WWW-Authenticate` header. ## Requesting a Token #### Query Parameters
service
The name of the service which hosts the resource.
scope
The resource in question, formatted as one of the space-delimited entries from the scope parameters from the WWW-Authenticate header shown above. This query parameter should be specified multiple times if there is more than one scope entry from the WWW-Authenticate header. The above example would be specified as: scope=repository:samalba/my-app:push.
#### Token Response Fields
token
An opaque Bearer token that clients should supply to subsequent requests in the Authorization header.
access_token
For compatibility with OAuth 2.0, we will also accept token under the name access_token. At least one of these fields must be specified, but both may also appear (for compatibility with older clients). When both are specified, they should be equivalent; if they differ the client's choice is undefined.
expires_in
(Optional) The duration in seconds since the token was issued that it will remain valid. When omitted, this defaults to 60 seconds. For compatibility with older clients, a token should never be returned with less than 60 seconds to live.
issued_at
(Optional) The RFC3339-serialized UTC standard time at which a given token was issued. If issued_at is omitted, the expiration is from when the token exchange completed.
#### Example For this example, the client makes an HTTP GET request to the following URL: ``` https://auth.docker.io/token?service=registry.docker.io&scope=repository:samalba/my-app:pull,push ``` The token server should first attempt to authenticate the client using any authentication credentials provided with the request. As of Docker 1.8, the registry client in the Docker Engine only supports Basic Authentication to these token servers. If an attempt to authenticate to the token server fails, the token server should return a `401 Unauthorized` response indicating that the provided credentials are invalid. Whether the token server requires authentication is up to the policy of that access control provider. Some requests may require authentication to determine access (such as pushing or pulling a private repository) while others may not (such as pulling from a public repository). After authenticating the client (which may simply be an anonymous client if no attempt was made to authenticate), the token server must next query its access control list to determine whether the client has the requested scope. In this example request, if I have authenticated as user `jlhawn`, the token server will determine what access I have to the repository `samalba/my-app` hosted by the entity `registry.docker.io`. Once the token server has determined what access the client has to the resources requested in the `scope` parameter, it will take the intersection of the set of requested actions on each resource and the set of actions that the client has in fact been granted. If the client only has a subset of the requested access **it must not be considered an error** as it is not the responsibility of the token server to indicate authorization errors as part of this workflow. Continuing with the example request, the token server will find that the client's set of granted access to the repository is `[pull, push]` which when intersected with the requested access `[pull, push]` yields an equal set. If the granted access set was found only to be `[pull]` then the intersected set would only be `[pull]`. If the client has no access to the repository then the intersected set would be empty, `[]`. It is this intersected set of access which is placed in the returned token. The server then constructs an implementation-specific token with this intersected set of access, and returns it to the Docker client to use to authenticate to the audience service (within the indicated window of time): ``` HTTP/1.1 200 OK Content-Type: application/json {"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IlBZWU86VEVXVTpWN0pIOjI2SlY6QVFUWjpMSkMzOlNYVko6WEdIQTozNEYyOjJMQVE6WlJNSzpaN1E2In0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJqbGhhd24iLCJhdWQiOiJyZWdpc3RyeS5kb2NrZXIuY29tIiwiZXhwIjoxNDE1Mzg3MzE1LCJuYmYiOjE0MTUzODcwMTUsImlhdCI6MTQxNTM4NzAxNSwianRpIjoidFlKQ08xYzZjbnl5N2tBbjBjN3JLUGdiVjFIMWJGd3MiLCJhY2Nlc3MiOlt7InR5cGUiOiJyZXBvc2l0b3J5IiwibmFtZSI6InNhbWFsYmEvbXktYXBwIiwiYWN0aW9ucyI6WyJwdXNoIl19XX0.QhflHPfbd6eVF4lM9bwYpFZIV0PfikbyXuLx959ykRTBpe3CYnzs6YBK8FToVb5R47920PVLrh8zuLzdCr9t3w", "expires_in": "3600","issued_at": "2009-11-10T23:00:00Z"} ``` ## Using the Bearer token Once the client has a token, it will try the registry request again with the token placed in the HTTP `Authorization` header like so: ``` Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6IkJWM0Q6MkFWWjpVQjVaOktJQVA6SU5QTDo1RU42Ok40SjQ6Nk1XTzpEUktFOkJWUUs6M0ZKTDpQT1RMIn0.eyJpc3MiOiJhdXRoLmRvY2tlci5jb20iLCJzdWIiOiJCQ0NZOk9VNlo6UUVKNTpXTjJDOjJBVkM6WTdZRDpBM0xZOjQ1VVc6NE9HRDpLQUxMOkNOSjU6NUlVTCIsImF1ZCI6InJlZ2lzdHJ5LmRvY2tlci5jb20iLCJleHAiOjE0MTUzODczMTUsIm5iZiI6MTQxNTM4NzAxNSwiaWF0IjoxNDE1Mzg3MDE1LCJqdGkiOiJ0WUpDTzFjNmNueXk3a0FuMGM3cktQZ2JWMUgxYkZ3cyIsInNjb3BlIjoiamxoYXduOnJlcG9zaXRvcnk6c2FtYWxiYS9teS1hcHA6cHVzaCxwdWxsIGpsaGF3bjpuYW1lc3BhY2U6c2FtYWxiYTpwdWxsIn0.Y3zZSwaZPqy4y9oRBVRImZyv3m_S9XDHF1tWwN7mL52C_IiA73SJkWVNsvNqpJIn5h7A2F8biv_S2ppQ1lgkbw ``` This is also described in [Section 2.1 of RFC 6750: The OAuth 2.0 Authorization Framework: Bearer Token Usage](https://tools.ietf.org/html/rfc6750#section-2.1) distribution-2.3.0/docs/spec/implementations.md000066400000000000000000000013331265472114500216560ustar00rootroot00000000000000 # Distribution API Implementations This is a list of known implementations of the Distribution API spec. ## [Docker Distribution Registry](https://github.com/docker/distribution) Docker distribution is the reference implementation of the distribution API specification. It aims to fully implement the entire specification. ### Releases #### 2.0.1 (_in development_) Implements API 2.0.1 _Known Issues_ - No resumable push support - Content ranges ignored - Blob upload status will always return a starting range of 0 #### 2.0.0 Implements API 2.0.0 _Known Issues_ - No resumable push support - No PATCH implementation for blob upload - Content ranges ignored distribution-2.3.0/docs/spec/json.md000066400000000000000000000053441265472114500174250ustar00rootroot00000000000000 # Docker Distribution JSON Canonicalization To provide consistent content hashing of JSON objects throughout Docker Distribution APIs, the specification defines a canonical JSON format. Adopting such a canonicalization also aids in caching JSON responses. Note that protocols should not be designed to depend on identical JSON being generated across different versions or clients. The canonicalization rules are merely useful for caching and consistency. ## Rules Compliant JSON should conform to the following rules: 1. All generated JSON should comply with [RFC 7159](http://www.ietf.org/rfc/rfc7159.txt). 2. Resulting "JSON text" shall always be encoded in UTF-8. 3. Unless a canonical key order is defined for a particular schema, object keys shall always appear in lexically sorted order. 4. All whitespace between tokens should be removed. 5. No "trailing commas" are allowed in object or array definitions. 6. The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e". Ampersand "&" is escaped to "\u0026". ## Examples The following is a simple example of a canonicalized JSON string: ```json {"asdf":1,"qwer":[],"zxcv":[{},true,1000000000,"tyui"]} ``` ## Reference ### Other Canonicalizations The OLPC project specifies [Canonical JSON](http://wiki.laptop.org/go/Canonical_JSON). While this is used in [TUF](http://theupdateframework.com/), which may be used with other distribution-related protocols, this alternative format has been proposed in case the original source changes. Specifications complying with either this specification or an alternative should explicitly call out the canonicalization format. Except for key ordering, this specification is mostly compatible. ### Go In Go, the [`encoding/json`](http://golang.org/pkg/encoding/json/) library will emit canonical JSON by default. Simply using `json.Marshal` will suffice in most cases: ```go incoming := map[string]interface{}{ "asdf": 1, "qwer": []interface{}{}, "zxcv": []interface{}{ map[string]interface{}{}, true, int(1e9), "tyui", }, } canonical, err := json.Marshal(incoming) if err != nil { // ... handle error } ``` To apply canonical JSON format spacing to an existing serialized JSON buffer, one can use [`json.Indent`](http://golang.org/src/encoding/json/indent.go?s=1918:1989#L65) with the following arguments: ```go incoming := getBytes() var canonical bytes.Buffer if err := json.Indent(&canonical, incoming, "", ""); err != nil { // ... handle error } ``` distribution-2.3.0/docs/spec/manifest-v2-1.md000066400000000000000000000206461265472114500207470ustar00rootroot00000000000000 # Image Manifest Version 2, Schema 1 This document outlines the format of of the V2 image manifest. The image manifest described herein was introduced in the Docker daemon in the [v1.3.0 release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453). It is a provisional manifest to provide a compatibility with the [V1 Image format](https://github.com/docker/docker/blob/master/image/spec/v1.md), as the requirements are defined for the [V2 Schema 2 image](https://github.com/docker/distribution/pull/62). Image manifests describe the various constituents of a docker image. Image manifests can be serialized to JSON format with the following media types: Manifest Type | Media Type ------------- | ------------- manifest | "application/vnd.docker.distribution.manifest.v1+json" signed manifest | "application/vnd.docker.distribution.manifest.v1+prettyjws" *Note that "application/json" will also be accepted for schema 1.* References: - [Proposal: JSON Registry API V2.1](https://github.com/docker/docker/issues/9015) - [Proposal: Provenance step 1 - Transform images for validation and verification](https://github.com/docker/docker/issues/8093) ## *Manifest* Field Descriptions Manifest provides the base accessible fields for working with V2 image format in the registry. - **`name`** *string* name is the name of the image's repository - **`tag`** *string* tag is the tag of the image - **`architecture`** *string* architecture is the host architecture on which this image is intended to run. This is for information purposes and not currently used by the engine - **`fsLayers`** *array* fsLayers is a list of filesystem layer blob sums contained in this image. An fsLayer is a struct consisting of the following fields - **`blobSum`** *digest.Digest* blobSum is the digest of the referenced filesystem image layer. A digest must be a sha256 hash. - **`history`** *array* history is a list of unstructured historical data for v1 compatibility. It contains ID of the image layer and ID of the layer's parent layers. history is a struct consisting of the following fields - **`v1Compatibility`** string V1Compatibility is the raw V1 compatibility information. This will contain the JSON object describing the V1 of this image. - **`schemaVersion`** *int* SchemaVersion is the image manifest schema that this image follows. >**Note**:the length of `history` must be equal to the length of `fsLayers` and >entries in each are correlated by index. ## Signed Manifests Signed manifests provides an envelope for a signed image manifest. A signed manifest consists of an image manifest along with an additional field containing the signature of the manifest. The docker client can verify signed manifests and displays a message to the user. ### Signing Manifests Image manifests can be signed in two different ways: with a *libtrust* private key or an x509 certificate chain. When signing with an x509 certificate chain, the public key of the first element in the chain must be the public key corresponding with the sign key. ### Signed Manifest Field Description Signed manifests include an image manifest and a list of signatures generated by *libtrust*. A signature consists of the following fields: - **`header`** *[JOSE](http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2)* A [JSON Web Signature](http://self-issued.info/docs/draft-ietf-jose-json-web-signature.html) - **`signature`** *string* A signature for the image manifest, signed by a *libtrust* private key - **`protected`** *string* The signed protected header ## Example Manifest *Example showing the official 'hello-world' image manifest.* ``` { "name": "hello-world", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" }, { "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" }, { "blobSum": "sha256:cc8567d70002e957612902a8e985ea129d831ebe04057d88fb644857caa45d11" }, { "blobSum": "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" } ], "history": [ { "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" }, { "v1Compatibility": "{\"id\":\"e45a5af57b00862e5ef5782a9925979a02ba2b12dff832fd0991335f4a11e5c5\",\"parent\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"created\":\"2014-12-31T22:57:59.178729048Z\",\"container\":\"27b45f8fb11795b52e9605b686159729b0d9ca92f76d40fb4f05a62e19c46b4f\",\"container_config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [/hello]\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"docker_version\":\"1.4.1\",\"config\":{\"Hostname\":\"8ce6509d66e2\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/hello\"],\"Image\":\"31cbccb51277105ba3ae35ce33c22b69c9e3f1002e76e4c736a2e8ebff9d7b5d\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"SecurityOpt\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" }, ], "schemaVersion": 1, "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "OD6I:6DRK:JXEJ:KBM4:255X:NSAA:MUSF:E4VM:ZI6W:CUN2:L4Z6:LSF4", "kty": "EC", "x": "3gAwX48IQ5oaYQAYSxor6rYYc_6yjuLCjtQ9LUakg4A", "y": "t72ge6kIA1XOjqjVoEOiPPAURltJFBMGDSQvEGVB010" }, "alg": "ES256" }, "signature": "XREm0L8WNn27Ga_iE_vRnTxVMhhYY0Zst_FfkKopg6gWSoTOZTuW4rK0fg_IqnKkEKlbD83tD46LKEGi5aIVFg", "protected": "eyJmb3JtYXRMZW5ndGgiOjY2MjgsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wNC0wOFQxODo1Mjo1OVoifQ" } ] } ``` distribution-2.3.0/docs/spec/manifest-v2-2.md000077500000000000000000000242571265472114500207550ustar00rootroot00000000000000 # Image Manifest Version 2, Schema 2 This document outlines the format of of the V2 image manifest, schema version 2. The original (and provisional) image manifest for V2 (schema 1), was introduced in the Docker daemon in the [v1.3.0 release](https://github.com/docker/docker/commit/9f482a66ab37ec396ac61ed0c00d59122ac07453) and is specified in the [schema 1 manifest definition](./manifest-v2-1.md) This second schema version has two primary goals. The first is to allow multi-architecture images, through a "fat manifest" which references image manifests for platform-specific versions of an image. The second is to move the Docker engine towards content-addressable images, by supporting an image model where the image's configuration can be hashed to generate an ID for the image. # Media Types The following media types are used by the manifest formats described here, and the resources they reference: - `application/vnd.docker.distribution.manifest.v1+json`: schema1 (existing manifest format) - `application/vnd.docker.distribution.manifest.v2+json`: New image manifest format (schemaVersion = 2) - `application/vnd.docker.distribution.manifest.list.v2+json`: Manifest list, aka "fat manifest" - `application/vnd.docker.image.rootfs.diff.tar.gzip`: "Layer", as a gzipped tar - `application/vnd.docker.container.image.v1+json`: Container config JSON ## Manifest List The manifest list is the "fat manifest" which points to specific image manifests for one or more platforms. Its use is optional, and relatively few images will use one of these manifests. A client will distinguish a manifest list from an image manifest based on the Content-Type returned in the HTTP response. ## *Manifest List* Field Descriptions - **`schemaVersion`** *int* This field specifies the image manifest schema version as an integer. This schema uses the version `2`. - **`mediaType`** *string* The MIME type of the manifest list. This should be set to `application/vnd.docker.distribution.manifest.list.v2+json`. - **`manifests`** *array* The manifests field contains a list of manifests for specific platforms. Fields of a object in the manifests list are: - **`mediaType`** *string* The MIME type of the referenced object. This will generally be `application/vnd.docker.image.manifest.v2+json`, but it could also be `application/vnd.docker.image.manifest.v1+json` if the manifest list references a legacy schema-1 manifest. - **`size`** *int* The size in bytes of the object. This field exists so that a client will have an expected size for the content before validating. If the length of the retrieved content does not match the specified length, the content should not be trusted. - **`digest`** *string* The digest of the content, as defined by the [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). - **`platform`** *object* The platform object describes the platform which the image in the manifest runs on. - **`architecture`** *string* The architecture field specifies the CPU architecture, for example `amd64` or `ppc64`. - **`os`** *string* The os field specifies the operating system, for example `linux` or `windows`. - **`variant`** *string* The optional variant field specifies a variant of the CPU, for example `ppc64le` to specify a little-endian version of a PowerPC CPU. - **`features`** *array* The optional features field specifies an array of strings, each listing a required CPU feature (for example `sse4` or `aes`). ## Example Manifest List *Example showing a simple manifest list pointing to image manifests for two platforms:* ```json { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7143, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", "platform": { "architecture": "ppc64", "os": "linux", "variant": "ppc64le", } }, { "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7682, "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", "platform": { "architecture": "x86-64", "os": "linux", "features": [ "sse4" ] } } ] } ``` # Image Manifest The image manifest provides a configuration and a set of layers for a container image. It's the direct replacement for the schema-1 manifest. ## *Image Manifest* Field Descriptions - **`schemaVersion`** *int* This field specifies the image manifest schema version as an integer. This schema uses version `2`. - **`mediaType`** *string* The MIME type of the manifest. This should be set to `application/vnd.docker.distribution.manifest.v2+json`. - **`config`** *object* The config field references a configuration object for a container, by digest. This configuration item is a JSON blob that the runtime uses to set up the container. This new schema uses a tweaked version of this configuration to allow image content-addressability on the daemon side. Fields of a config object are: - **`mediaType`** *string* The MIME type of the referenced object. This should generally be `application/vnd.docker.container.image.v1+json`. - **`size`** *int* The size in bytes of the object. This field exists so that a client will have an expected size for the content before validating. If the length of the retrieved content does not match the specified length, the content should not be trusted. - **`digest`** *string* The digest of the content, as defined by the [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). - **`layers`** *array* The layer list is ordered starting from the base image (opposite order of schema1). Fields of an item in the layers list are: - **`mediaType`** *string* The MIME type of the referenced object. This should generally be `application/vnd.docker.image.rootfs.diff.tar.gzip`. - **`size`** *int* The size in bytes of the object. This field exists so that a client will have an expected size for the content before validating. If the length of the retrieved content does not match the specified length, the content should not be trusted. - **`digest`** *string* The digest of the content, as defined by the [Registry V2 HTTP API Specificiation](https://docs.docker.com/registry/spec/api/#digest-parameter). ## Example Image Manifest *Example showing an image manifest:* ```json { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 7023, "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 32654, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 16724, "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 73109, "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" } ], } ``` # Backward compatibility The registry will continue to accept uploads of manifests in both the old and new formats. When pushing images, clients which support the new manifest format should first construct a manifest in the new format. If uploading this manifest fails, presumably because the registry only supports the old format, the client may fall back to uploading a manifest in the old format. When pulling images, clients indicate support for this new version of the manifest format by sending the `application/vnd.docker.distribution.manifest.v2+json` and `application/vnd.docker.distribution.manifest.list.v2+json` media types in an `Accept` header when making a request to the `manifests` endpoint. Updated clients should check the `Content-Type` header to see whether the manifest returned from the endpoint is in the old format, or is an image manifest or manifest list in the new format. If the manifest being requested uses the new format, and the appropriate media type is not present in an `Accept` header, the registry will assume that the client cannot handle the manifest as-is, and rewrite it on the fly into the old format. If the object that would otherwise be returned is a manifest list, the registry will look up the appropriate manifest for the x86-64 platform and linux OS, rewrite that manifest into the old format if necessary, and return the result to the client. If no suitable manifest is found in the manifest list, the registry will return a 404 error. One of the challenges in rewriting manifests to the old format is that the old format involves an image configuration for each layer in the manifest, but the new format only provides one image configuration. To work around this, the registry will create synthetic image configurations for all layers except the top layer. These image configurations will not result in runnable images on their own, but only serve to fill in the parent chain in a compatible way. The IDs in these synthetic configurations will be derived from hashes of their respective blobs. The registry will create these configurations and their IDs using the same scheme as Docker 1.10 when it creates a legacy manifest to push to a registry which doesn't support the new format. distribution-2.3.0/docs/storage-drivers/000077500000000000000000000000001265472114500203125ustar00rootroot00000000000000distribution-2.3.0/docs/storage-drivers/azure.md000066400000000000000000000035151265472114500217660ustar00rootroot00000000000000 # Microsoft Azure storage driver An implementation of the `storagedriver.StorageDriver` interface which uses [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/) for object storage. ## Parameters
Parameter Required Description
accountname yes Name of the Azure Storage Account.
accountkey yes Primary or Secondary Key for the Storage Account.
container yes Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api].
realm no Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this is core.windows.net.
## Related Information * To get information about [azure-blob-storage](http://azure.microsoft.com/en-us/services/storage/) visit the Microsoft website. * You can use Microsoft's [Blob Service REST API](https://msdn.microsoft.com/en-us/library/azure/dd135733.aspx) to [create a container] (https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx). distribution-2.3.0/docs/storage-drivers/filesystem.md000066400000000000000000000011171265472114500230200ustar00rootroot00000000000000 # Filesystem storage driver An implementation of the `storagedriver.StorageDriver` interface which uses the local filesystem. ## Parameters `rootdirectory`: (optional) The absolute path to a root directory tree in which to store all registry files. The registry stores all its data here so make sure there is adequate space available. Defaults to `/var/lib/registry`. distribution-2.3.0/docs/storage-drivers/gcs.md000066400000000000000000000034401265472114500214110ustar00rootroot00000000000000 # Google Cloud Storage driver An implementation of the `storagedriver.StorageDriver` interface which uses Google Cloud for object storage. ## Parameters
Parameter Required Description
bucket yes Storage bucket name.
keyfile no A private service account key file in JSON format. Instead of a key file Google Application Default Credentials can be used.
rootdirectory no This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary.
`bucket`: The name of your Google Cloud Storage bucket where you wish to store objects (needs to already be created prior to driver initialization). `keyfile`: (optional) A private key file in JSON format, used for [Service Account Authentication](https://cloud.google.com/storage/docs/authentication#service_accounts). **Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). distribution-2.3.0/docs/storage-drivers/index.md000066400000000000000000000002021265472114500217350ustar00rootroot00000000000000 distribution-2.3.0/docs/storage-drivers/inmemory.md000066400000000000000000000013551265472114500224770ustar00rootroot00000000000000 # In-memory storage driver (Testing Only) For purely tests purposes, you can use the `inmemory` storage driver. This driver is an implementation of the `storagedriver.StorageDriver` interface which uses local memory for object storage. If you would like to run a registry from volatile memory, use the [`filesystem` driver](filesystem.md) on a ramdisk. **IMPORTANT**: This storage driver *does not* persist data across runs. This is why it is only suitable for testing. *Never* use this driver in production. ## Parameters None distribution-2.3.0/docs/storage-drivers/oss.md000077500000000000000000000050661265472114500214520ustar00rootroot00000000000000 # Aliyun OSS storage driver An implementation of the `storagedriver.StorageDriver` interface which uses [Aliyun OSS](http://www.aliyun.com/product/oss) for object storage. ## Parameters
Parameter Required Description
accesskeyid yes Your access key ID.
accesskeysecret yes Your access key secret.
region yes The name of the OSS region in which you would like to store objects (for example `oss-cn-beijing`). For a list of regions, you can look at
endpoint no An endpoint which defaults to `..aliyuncs.com` or `.-internal.aliyuncs.com` (when `internal=true`). You can change the default endpoint by changing this value.
internal no An internal endpoint or the public endpoint for OSS access. The default is false. For a list of regions, you can look at
bucket yes The name of your OSS bucket where you wish to store objects (needs to already be created prior to driver initialization).
encrypt no Specifies whether you would like your data encrypted on the server side. Defaults to false if not specified.
secure no Specifies whether to transfer data to the bucket over ssl or not. If you omit this value, `true` is used.
chunksize no The default part size for multipart uploads (performed by WriteStream) to OSS. The default is 10 MB. Keep in mind that the minimum part size for OSS is 5MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to OSS.
rootdirectory no The root directory tree in which to store all registry files. Defaults to an empty string (bucket root).
distribution-2.3.0/docs/storage-drivers/rados.md000066400000000000000000000036301265472114500217460ustar00rootroot00000000000000 # Ceph RADOS storage driver An implementation of the `storagedriver.StorageDriver` interface which uses [Ceph RADOS Object Storage][rados] for storage backend. ## Parameters
Parameter Required Description
poolname yes Ceph pool name.
username no Ceph cluster user to connect as (i.e. admin, not client.admin).
chunksize no Size of the written RADOS objects. Default value is 4MB (4194304).
The following parameters must be used to configure the storage driver (case-sensitive): * `poolname`: Name of the Ceph pool * `username` *optional*: The user to connect as (i.e. admin, not client.admin) * `chunksize` *optional*: Size of the written RADOS objects. Default value is 4MB (4194304). This drivers loads the [Ceph client configuration][rados-config] from the following regular paths (the first found is used): * `$CEPH_CONF` (environment variable) * `/etc/ceph/ceph.conf` * `~/.ceph/config` * `ceph.conf` (in the current working directory) ## Developing To include this driver when building Docker Distribution, use the build tag `include_rados`. Please see the [building documentation][building] for details. [rados]: http://ceph.com/docs/master/rados/ [rados-config]: http://ceph.com/docs/master/rados/configuration/ceph-conf/ [building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags distribution-2.3.0/docs/storage-drivers/s3.md000066400000000000000000000144611265472114500211670ustar00rootroot00000000000000 # S3 storage driver An implementation of the `storagedriver.StorageDriver` interface which uses Amazon S3 for object storage. ## Parameters
Parameter Required Description
accesskey yes Your AWS Access Key.
secretkey yes Your AWS Secret Key.
region yes The AWS region in which your bucket exists. For the moment, the Go AWS library in use does not use the newer DNS based bucket routing.
bucket yes The bucket name in which you want to store the registry's data.
encrypt no Specifies whether the registry stores the image in encrypted format or not. A boolean value. The default is false.
secure no Indicates whether to use HTTPS instead of HTTP. A boolean value. The default is true.
v4auth no Indicates whether the registry uses Version 4 of AWS's authentication. Generally, you should set this to true. By default, this is false.
chunksize no The S3 API requires multipart upload chunks to be at least 5MB. This value should be a number that is larger than 5*1024*1024.
rootdirectory no This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary.
`accesskey`: Your aws access key. `secretkey`: Your aws secret key. **Note** You can provide empty strings for your access and secret keys if you plan on running the driver on an ec2 instance and will handle authentication with the instance's credentials. `region`: The name of the aws region in which you would like to store objects (for example `us-east-1`). For a list of regions, you can look at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html `bucket`: The name of your S3 bucket where you wish to store objects. The bucket must exist prior to the driver initialization. `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). `secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. `v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) `chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. `rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). # CloudFront as Middleware with S3 backend ## Use Case Adding CloudFront as a middleware for your S3 backed registry can dramatically improve pull times. Your registry will have the ability to retrieve your images from edge servers, rather than the geographically limited location of your S3 bucket. The farther your registry is from your bucket, the more improvements you will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). ## Configuring CloudFront for Distribution If you are unfamiliar with creating a CloudFront distribution, see [Getting Started with Cloudfront](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/GettingStarted.html). Defaults can be kept in most areas except: ### Origin: The CloudFront distribution must be created such that the `Origin Path` is set to the directory level of the root "docker" key in S3. If your registry exists on the root of the bucket, this path should be left blank. ### Behaviors: - Viewer Protocol Policy: HTTPS Only - Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE - Cached HTTP Methods: OPTIONS (checked) - Restrict Viewer Access (Use Signed URLs or Signed Cookies): Yes - Trusted Signers: Self (Can add other accounts as long as you have access to CloudFront Key Pairs for those additional accounts) ## Registry configuration Here the `middleware` option is used. It is still important to keep the `storage` option as CloudFront will only handle `pull` actions; `push` actions are still directly written to S3. The following example shows what you will need at minimum: ``` ... storage: s3: region: us-east-1 bucket: docker.myregistry.com middleware: storage: - name: cloudfront options: baseurl: https://abcdefghijklmn.cloudfront.net/ privatekey: /etc/docker/cloudfront/pk-ABCEDFGHIJKLMNOPQRST.pem keypairid: ABCEDFGHIJKLMNOPQRST ... ``` ## CloudFront Key-Pair A CloudFront key-pair is required for all AWS accounts needing access to your CloudFront distribution. For information, please see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). distribution-2.3.0/docs/storage-drivers/swift.md000066400000000000000000000163431265472114500217770ustar00rootroot00000000000000 # OpenStack Swift storage driver An implementation of the `storagedriver.StorageDriver` interface that uses [OpenStack Swift](http://docs.openstack.org/developer/swift/) for object storage. ## Parameters
Parameter Required Description
authurl yes URL for obtaining an auth token. https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth
username yes Your Openstack user name.
password yes Your Openstack password.
region no The Openstack region in which your container exists.
container yes The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization.
tenant no Your Openstack tenant name. You can either use tenant or tenantid.
tenantid no Your Openstack tenant id. You can either use tenant or tenantid.
domain no Your Openstack domain name for Identity v3 API. You can either use domain or domainid.
domainid no Your Openstack domain id for Identity v3 API. You can either use domain or domainid.
trustid no Your Openstack trust id for Identity v3 API.
insecureskipverify no true to skip TLS verification, false by default.
chunksize no Size of the data segments for the Swift Dynamic Large Objects. This value should be a number (defaults to 5M).
prefix no This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.
secretkey no The secret key used to generate temporary URLs.
accesskey no The access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.
authurl

URL for obtaining an auth token.

username

Your OpenStack user name.

password

Your OpenStack password.

container

The name of your Swift container where you wish to store the registry's data. The driver creates the named container during its initialization.

tenant

Optionally, your OpenStack tenant name. You can either use tenant or tenantid.

tenantid

Optionally, your OpenStack tenant id. You can either use tenant or tenantid.

domain

Optionally, your OpenStack domain name for Identity v3 API. You can either use domain or domainid.

domainid

Optionally, your OpenStack domain id for Identity v3 API. You can either use domain or domainid.

trustid

Optionally, your OpenStack trust id for Identity v3 API.

insecureskipverify

Optionally, set insecureskipverify to true to skip TLS verification for your OpenStack provider. The driver uses false by default.

region

Optionally, specify the OpenStack region name in which you would like to store objects (for example fr).

chunksize

Optionally, specify the segment size for Dynamic Large Objects uploads (performed by WriteStream) to Swift. The default is 5 MB. You might experience better performance for larger chunk sizes depending on the speed of your connection to Swift.

prefix

Optionally, supply a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root.

secretkey

Optionally, the secret key used to generate temporary URLs.

accesskey

Optionally, the access key to generate temporary URLs. It is used by HP Cloud Object Storage in addition to the `secretkey` parameter.

The features supported by the Swift server are queried by requesting the `/info` URL on the server. In case the administrator disabled that feature, the configuration file can specify the following optional parameters :
tempurlcontainerkey

Specify whether to use container secret key to generate temporary URL when set to true, or the account secret key otherwise.

tempurlmethods

Array of HTTP methods that are supported by the TempURL middleware of the Swift server. Example:

- tempurlmethods: - GET - PUT - HEAD - POST - DELETE

distribution-2.3.0/docs/storagedrivers.md000066400000000000000000000072151265472114500205640ustar00rootroot00000000000000 # Docker Registry Storage Driver This document describes the registry storage driver model, implementation, and explains how to contribute new storage drivers. ## Provided Drivers This storage driver package comes bundled with several drivers: - [inmemory](storage-drivers/inmemory.md): A temporary storage driver using a local inmemory map. This exists solely for reference and testing. - [filesystem](storage-drivers/filesystem.md): A local storage driver configured to use a directory tree in the local filesystem. - [s3](storage-drivers/s3.md): A driver storing objects in an Amazon Simple Storage Solution (S3) bucket. - [azure](storage-drivers/azure.md): A driver storing objects in [Microsoft Azure Blob Storage](http://azure.microsoft.com/en-us/services/storage/). - [rados](storage-drivers/rados.md): A driver storing objects in a [Ceph Object Storage](http://ceph.com/docs/master/rados/) pool. - [swift](storage-drivers/swift.md): A driver storing objects in [Openstack Swift](http://docs.openstack.org/developer/swift/). - [oss](storage-drivers/oss.md): A driver storing objects in [Aliyun OSS](http://www.aliyun.com/product/oss). - [gcs](storage-drivers/gcs.md): A driver storing objects in a [Google Cloud Storage](https://cloud.google.com/storage/) bucket. ## Storage Driver API The storage driver API is designed to model a filesystem-like key/value storage in a manner abstract enough to support a range of drivers from the local filesystem to Amazon S3 or other distributed object storage systems. Storage drivers are required to implement the `storagedriver.StorageDriver` interface provided in `storagedriver.go`, which includes methods for reading, writing, and deleting content, as well as listing child objects of a specified prefix key. Storage drivers are intended to be written in Go, providing compile-time validation of the `storagedriver.StorageDriver` interface. ## Driver Selection and Configuration The preferred method of selecting a storage driver is using the `StorageDriverFactory` interface in the `storagedriver/factory` package. These factories provide a common interface for constructing storage drivers with a parameters map. The factory model is based off of the [Register](http://golang.org/pkg/database/sql/#Register) and [Open](http://golang.org/pkg/database/sql/#Open) methods in the builtin [database/sql](http://golang.org/pkg/database/sql) package. Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no such storage driver can be found, `factory.Create` will return an `InvalidStorageDriverError`. ## Driver Contribution ### Writing new storage drivers To create a valid storage driver, one must implement the `storagedriver.StorageDriver` interface and make sure to expose this driver via the factory system. #### Registering Storage drivers should call `factory.Register` with their driver name in an `init` method, allowing callers of `factory.New` to construct instances of this driver without requiring modification of imports throughout the codebase. ## Testing Storage driver test suites are provided in `storagedriver/testsuites/testsuites.go` and may be used for any storage driver written in Go. Tests can be registered using the `RegisterSuite` function, which run the same set of tests for any registered drivers. distribution-2.3.0/errors.go000066400000000000000000000061271265472114500161130ustar00rootroot00000000000000package distribution import ( "errors" "fmt" "strings" "github.com/docker/distribution/digest" ) // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") // ErrUnsupported is returned when an unimplemented or unsupported action is // performed var ErrUnsupported = errors.New("operation unsupported") // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string } func (err ErrTagUnknown) Error() string { return fmt.Sprintf("unknown tag=%s", err.Tag) } // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { Name string } func (err ErrRepositoryUnknown) Error() string { return fmt.Sprintf("unknown repository name=%s", err.Name) } // ErrRepositoryNameInvalid should be used to denote an invalid repository // name. Reason may set, indicating the cause of invalidity. type ErrRepositoryNameInvalid struct { Name string Reason error } func (err ErrRepositoryNameInvalid) Error() string { return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) } // ErrManifestUnknown is returned if the manifest is not known by the // registry. type ErrManifestUnknown struct { Name string Tag string } func (err ErrManifestUnknown) Error() string { return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) } // ErrManifestUnknownRevision is returned when a manifest cannot be found by // revision within a repository. type ErrManifestUnknownRevision struct { Name string Revision digest.Digest } func (err ErrManifestUnknownRevision) Error() string { return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) } // ErrManifestUnverified is returned when the registry is unable to verify // the manifest. type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { return fmt.Sprintf("unverified manifest") } // ErrManifestVerification provides a type to collect errors encountered // during manifest verification. Currently, it accepts errors of all types, // but it may be narrowed to those involving manifest verification. type ErrManifestVerification []error func (errs ErrManifestVerification) Error() string { var parts []string for _, err := range errs { parts = append(parts, err.Error()) } return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) } // ErrManifestBlobUnknown returned when a referenced blob cannot be found. type ErrManifestBlobUnknown struct { Digest digest.Digest } func (err ErrManifestBlobUnknown) Error() string { return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } // ErrManifestNameInvalid should be used to denote an invalid manifest // name. Reason may set, indicating the cause of invalidity. type ErrManifestNameInvalid struct { Name string Reason error } func (err ErrManifestNameInvalid) Error() string { return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) } distribution-2.3.0/health/000077500000000000000000000000001265472114500155075ustar00rootroot00000000000000distribution-2.3.0/health/api/000077500000000000000000000000001265472114500162605ustar00rootroot00000000000000distribution-2.3.0/health/api/api.go000066400000000000000000000015201265472114500173560ustar00rootroot00000000000000package api import ( "errors" "net/http" "github.com/docker/distribution/health" ) var ( updater = health.NewStatusUpdater() ) // DownHandler registers a manual_http_status that always returns an Error func DownHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { updater.Update(errors.New("Manual Check")) } else { w.WriteHeader(http.StatusNotFound) } } // UpHandler registers a manual_http_status that always returns nil func UpHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { updater.Update(nil) } else { w.WriteHeader(http.StatusNotFound) } } // init sets up the two endpoints to bring the service up and down func init() { health.Register("manual_http_status", updater) http.HandleFunc("/debug/health/down", DownHandler) http.HandleFunc("/debug/health/up", UpHandler) } distribution-2.3.0/health/api/api_test.go000066400000000000000000000042301265472114500204160ustar00rootroot00000000000000package api import ( "net/http" "net/http/httptest" "testing" "github.com/docker/distribution/health" ) // TestGETDownHandlerDoesNotChangeStatus ensures that calling the endpoint // /debug/health/down with METHOD GET returns a 404 func TestGETDownHandlerDoesNotChangeStatus(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/down", nil) if err != nil { t.Errorf("Failed to create request.") } DownHandler(recorder, req) if recorder.Code != 404 { t.Errorf("Did not get a 404.") } } // TestGETUpHandlerDoesNotChangeStatus ensures that calling the endpoint // /debug/health/down with METHOD GET returns a 404 func TestGETUpHandlerDoesNotChangeStatus(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health/up", nil) if err != nil { t.Errorf("Failed to create request.") } DownHandler(recorder, req) if recorder.Code != 404 { t.Errorf("Did not get a 404.") } } // TestPOSTDownHandlerChangeStatus ensures the endpoint /debug/health/down changes // the status code of the response to 503 // This test is order dependent, and should come before TestPOSTUpHandlerChangeStatus func TestPOSTDownHandlerChangeStatus(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/down", nil) if err != nil { t.Errorf("Failed to create request.") } DownHandler(recorder, req) if recorder.Code != 200 { t.Errorf("Did not get a 200.") } if len(health.CheckStatus()) != 1 { t.Errorf("DownHandler didn't add an error check.") } } // TestPOSTUpHandlerChangeStatus ensures the endpoint /debug/health/up changes // the status code of the response to 200 func TestPOSTUpHandlerChangeStatus(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("POST", "https://fakeurl.com/debug/health/up", nil) if err != nil { t.Errorf("Failed to create request.") } UpHandler(recorder, req) if recorder.Code != 200 { t.Errorf("Did not get a 200.") } if len(health.CheckStatus()) != 0 { t.Errorf("UpHandler didn't remove the error check.") } } distribution-2.3.0/health/checks/000077500000000000000000000000001265472114500167475ustar00rootroot00000000000000distribution-2.3.0/health/checks/checks.go000066400000000000000000000030451265472114500205400ustar00rootroot00000000000000package checks import ( "errors" "net" "net/http" "os" "strconv" "time" "github.com/docker/distribution/health" ) // FileChecker checks the existence of a file and returns an error // if the file exists. func FileChecker(f string) health.Checker { return health.CheckFunc(func() error { if _, err := os.Stat(f); err == nil { return errors.New("file exists") } return nil }) } // HTTPChecker does a HEAD request and verifies that the HTTP status code // returned matches statusCode. func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker { return health.CheckFunc(func() error { client := http.Client{ Timeout: timeout, } req, err := http.NewRequest("HEAD", r, nil) if err != nil { return errors.New("error creating request: " + r) } for headerName, headerValues := range headers { for _, headerValue := range headerValues { req.Header.Add(headerName, headerValue) } } response, err := client.Do(req) if err != nil { return errors.New("error while checking: " + r) } if response.StatusCode != statusCode { return errors.New("downstream service returned unexpected status: " + strconv.Itoa(response.StatusCode)) } return nil }) } // TCPChecker attempts to open a TCP connection. func TCPChecker(addr string, timeout time.Duration) health.Checker { return health.CheckFunc(func() error { conn, err := net.DialTimeout("tcp", addr, timeout) if err != nil { return errors.New("connection to " + addr + " failed") } conn.Close() return nil }) } distribution-2.3.0/health/checks/checks_test.go000066400000000000000000000012421265472114500215740ustar00rootroot00000000000000package checks import ( "testing" ) func TestFileChecker(t *testing.T) { if err := FileChecker("/tmp").Check(); err == nil { t.Errorf("/tmp was expected as exists") } if err := FileChecker("NoSuchFileFromMoon").Check(); err != nil { t.Errorf("NoSuchFileFromMoon was expected as not exists, error:%v", err) } } func TestHTTPChecker(t *testing.T) { if err := HTTPChecker("https://www.google.cybertron", 200, 0, nil).Check(); err == nil { t.Errorf("Google on Cybertron was expected as not exists") } if err := HTTPChecker("https://www.google.pt", 200, 0, nil).Check(); err != nil { t.Errorf("Google at Portugal was expected as exists, error:%v", err) } } distribution-2.3.0/health/doc.go000066400000000000000000000124441265472114500166100ustar00rootroot00000000000000// Package health provides a generic health checking framework. // The health package works expvar style. By importing the package the debug // server is getting a "/debug/health" endpoint that returns the current // status of the application. // If there are no errors, "/debug/health" will return a HTTP 200 status, // together with an empty JSON reply "{}". If there are any checks // with errors, the JSON reply will include all the failed checks, and the // response will be have an HTTP 503 status. // // A Check can either be run synchronously, or asynchronously. We recommend // that most checks are registered as an asynchronous check, so a call to the // "/debug/health" endpoint always returns immediately. This pattern is // particularly useful for checks that verify upstream connectivity or // database status, since they might take a long time to return/timeout. // // Installing // // To install health, just import it in your application: // // import "github.com/docker/distribution/health" // // You can also (optionally) import "health/api" that will add two convenience // endpoints: "/debug/health/down" and "/debug/health/up". These endpoints add // "manual" checks that allow the service to quickly be brought in/out of // rotation. // // import _ "github.com/docker/distribution/registry/health/api" // // # curl localhost:5001/debug/health // {} // # curl -X POST localhost:5001/debug/health/down // # curl localhost:5001/debug/health // {"manual_http_status":"Manual Check"} // // After importing these packages to your main application, you can start // registering checks. // // Registering Checks // // The recommended way of registering checks is using a periodic Check. // PeriodicChecks run on a certain schedule and asynchronously update the // status of the check. This allows CheckStatus to return without blocking // on an expensive check. // // A trivial example of a check that runs every 5 seconds and shuts down our // server if the current minute is even, could be added as follows: // // func currentMinuteEvenCheck() error { // m := time.Now().Minute() // if m%2 == 0 { // return errors.New("Current minute is even!") // } // return nil // } // // health.RegisterPeriodicFunc("minute_even", currentMinuteEvenCheck, time.Second*5) // // Alternatively, you can also make use of "RegisterPeriodicThresholdFunc" to // implement the exact same check, but add a threshold of failures after which // the check will be unhealthy. This is particularly useful for flaky Checks, // ensuring some stability of the service when handling them. // // health.RegisterPeriodicThresholdFunc("minute_even", currentMinuteEvenCheck, time.Second*5, 4) // // The lowest-level way to interact with the health package is calling // "Register" directly. Register allows you to pass in an arbitrary string and // something that implements "Checker" and runs your check. If your method // returns an error with nil, it is considered a healthy check, otherwise it // will make the health check endpoint "/debug/health" start returning a 503 // and list the specific check that failed. // // Assuming you wish to register a method called "currentMinuteEvenCheck() // error" you could do that by doing: // // health.Register("even_minute", health.CheckFunc(currentMinuteEvenCheck)) // // CheckFunc is a convenience type that implements Checker. // // Another way of registering a check could be by using an anonymous function // and the convenience method RegisterFunc. An example that makes the status // endpoint always return an error: // // health.RegisterFunc("my_check", func() error { // return Errors.new("This is an error!") // })) // // Examples // // You could also use the health checker mechanism to ensure your application // only comes up if certain conditions are met, or to allow the developer to // take the service out of rotation immediately. An example that checks // database connectivity and immediately takes the server out of rotation on // err: // // updater = health.NewStatusUpdater() // health.RegisterFunc("database_check", func() error { // return updater.Check() // })) // // conn, err := Connect(...) // database call here // if err != nil { // updater.Update(errors.New("Error connecting to the database: " + err.Error())) // } // // You can also use the predefined Checkers that come included with the health // package. First, import the checks: // // import "github.com/docker/distribution/health/checks // // After that you can make use of any of the provided checks. An example of // using a `FileChecker` to take the application out of rotation if a certain // file exists can be done as follows: // // health.Register("fileChecker", health.PeriodicChecker(checks.FileChecker("/tmp/disable"), time.Second*5)) // // After registering the check, it is trivial to take an application out of // rotation from the console: // // # curl localhost:5001/debug/health // {} // # touch /tmp/disable // # curl localhost:5001/debug/health // {"fileChecker":"file exists"} // // You could also test the connectivity to a downstream service by using a // "HTTPChecker", but ensure that you only mark the test unhealthy if there // are a minimum of two failures in a row: // // health.Register("httpChecker", health.PeriodicThresholdChecker(checks.HTTPChecker("https://www.google.pt"), time.Second*5, 2)) package health distribution-2.3.0/health/health.go000066400000000000000000000213651265472114500173120ustar00rootroot00000000000000package health import ( "encoding/json" "fmt" "net/http" "sync" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" ) // A Registry is a collection of checks. Most applications will use the global // registry defined in DefaultRegistry. However, unit tests may need to create // separate registries to isolate themselves from other tests. type Registry struct { mu sync.RWMutex registeredChecks map[string]Checker } // NewRegistry creates a new registry. This isn't necessary for normal use of // the package, but may be useful for unit tests so individual tests have their // own set of checks. func NewRegistry() *Registry { return &Registry{ registeredChecks: make(map[string]Checker), } } // DefaultRegistry is the default registry where checks are registered. It is // the registry used by the HTTP handler. var DefaultRegistry *Registry // Checker is the interface for a Health Checker type Checker interface { // Check returns nil if the service is okay. Check() error } // CheckFunc is a convenience type to create functions that implement // the Checker interface type CheckFunc func() error // Check Implements the Checker interface to allow for any func() error method // to be passed as a Checker func (cf CheckFunc) Check() error { return cf() } // Updater implements a health check that is explicitly set. type Updater interface { Checker // Update updates the current status of the health check. Update(status error) } // updater implements Checker and Updater, providing an asynchronous Update // method. // This allows us to have a Checker that returns the Check() call immediately // not blocking on a potentially expensive check. type updater struct { mu sync.Mutex status error } // Check implements the Checker interface func (u *updater) Check() error { u.mu.Lock() defer u.mu.Unlock() return u.status } // Update implements the Updater interface, allowing asynchronous access to // the status of a Checker. func (u *updater) Update(status error) { u.mu.Lock() defer u.mu.Unlock() u.status = status } // NewStatusUpdater returns a new updater func NewStatusUpdater() Updater { return &updater{} } // thresholdUpdater implements Checker and Updater, providing an asynchronous Update // method. // This allows us to have a Checker that returns the Check() call immediately // not blocking on a potentially expensive check. type thresholdUpdater struct { mu sync.Mutex status error threshold int count int } // Check implements the Checker interface func (tu *thresholdUpdater) Check() error { tu.mu.Lock() defer tu.mu.Unlock() if tu.count >= tu.threshold { return tu.status } return nil } // thresholdUpdater implements the Updater interface, allowing asynchronous // access to the status of a Checker. func (tu *thresholdUpdater) Update(status error) { tu.mu.Lock() defer tu.mu.Unlock() if status == nil { tu.count = 0 } else if tu.count < tu.threshold { tu.count++ } tu.status = status } // NewThresholdStatusUpdater returns a new thresholdUpdater func NewThresholdStatusUpdater(t int) Updater { return &thresholdUpdater{threshold: t} } // PeriodicChecker wraps an updater to provide a periodic checker func PeriodicChecker(check Checker, period time.Duration) Checker { u := NewStatusUpdater() go func() { t := time.NewTicker(period) for { <-t.C u.Update(check.Check()) } }() return u } // PeriodicThresholdChecker wraps an updater to provide a periodic checker that // uses a threshold before it changes status func PeriodicThresholdChecker(check Checker, period time.Duration, threshold int) Checker { tu := NewThresholdStatusUpdater(threshold) go func() { t := time.NewTicker(period) for { <-t.C tu.Update(check.Check()) } }() return tu } // CheckStatus returns a map with all the current health check errors func (registry *Registry) CheckStatus() map[string]string { // TODO(stevvooe) this needs a proper type registry.mu.RLock() defer registry.mu.RUnlock() statusKeys := make(map[string]string) for k, v := range registry.registeredChecks { err := v.Check() if err != nil { statusKeys[k] = err.Error() } } return statusKeys } // CheckStatus returns a map with all the current health check errors from the // default registry. func CheckStatus() map[string]string { return DefaultRegistry.CheckStatus() } // Register associates the checker with the provided name. func (registry *Registry) Register(name string, check Checker) { if registry == nil { registry = DefaultRegistry } registry.mu.Lock() defer registry.mu.Unlock() _, ok := registry.registeredChecks[name] if ok { panic("Check already exists: " + name) } registry.registeredChecks[name] = check } // Register associates the checker with the provided name in the default // registry. func Register(name string, check Checker) { DefaultRegistry.Register(name, check) } // RegisterFunc allows the convenience of registering a checker directly from // an arbitrary func() error. func (registry *Registry) RegisterFunc(name string, check func() error) { registry.Register(name, CheckFunc(check)) } // RegisterFunc allows the convenience of registering a checker in the default // registry directly from an arbitrary func() error. func RegisterFunc(name string, check func() error) { DefaultRegistry.RegisterFunc(name, check) } // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker // from an arbitrary func() error. func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { registry.Register(name, PeriodicChecker(CheckFunc(check), period)) } // RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker // in the default registry from an arbitrary func() error. func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) { DefaultRegistry.RegisterPeriodicFunc(name, period, check) } // RegisterPeriodicThresholdFunc allows the convenience of registering a // PeriodicChecker from an arbitrary func() error. func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold)) } // RegisterPeriodicThresholdFunc allows the convenience of registering a // PeriodicChecker in the default registry from an arbitrary func() error. func RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) { DefaultRegistry.RegisterPeriodicThresholdFunc(name, period, threshold, check) } // StatusHandler returns a JSON blob with all the currently registered Health Checks // and their corresponding status. // Returns 503 if any Error status exists, 200 otherwise func StatusHandler(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { checks := CheckStatus() status := http.StatusOK // If there is an error, return 503 if len(checks) != 0 { status = http.StatusServiceUnavailable } statusResponse(w, r, status, checks) } else { http.NotFound(w, r) } } // Handler returns a handler that will return 503 response code if the health // checks have failed. If everything is okay with the health checks, the // handler will pass through to the provided handler. Use this handler to // disable a web application when the health checks fail. func Handler(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { checks := CheckStatus() if len(checks) != 0 { errcode.ServeJSON(w, errcode.ErrorCodeUnavailable. WithDetail("health check failed: please see /debug/health")) return } handler.ServeHTTP(w, r) // pass through }) } // statusResponse completes the request with a response describing the health // of the service. func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) { p, err := json.Marshal(checks) if err != nil { context.GetLogger(context.Background()).Errorf("error serializing health status: %v", err) p, err = json.Marshal(struct { ServerError string `json:"server_error"` }{ ServerError: "Could not parse error message", }) status = http.StatusInternalServerError if err != nil { context.GetLogger(context.Background()).Errorf("error serializing health status failure message: %v", err) return } } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.WriteHeader(status) if _, err := w.Write(p); err != nil { context.GetLogger(context.Background()).Errorf("error writing health status response body: %v", err) } } // Registers global /debug/health api endpoint, creates default registry func init() { DefaultRegistry = NewRegistry() http.HandleFunc("/debug/health", StatusHandler) } distribution-2.3.0/health/health_test.go000066400000000000000000000056331265472114500203510ustar00rootroot00000000000000package health import ( "errors" "fmt" "net/http" "net/http/httptest" "testing" ) // TestReturns200IfThereAreNoChecks ensures that the result code of the health // endpoint is 200 if there are not currently registered checks. func TestReturns200IfThereAreNoChecks(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) if err != nil { t.Errorf("Failed to create request.") } StatusHandler(recorder, req) if recorder.Code != 200 { t.Errorf("Did not get a 200.") } } // TestReturns500IfThereAreErrorChecks ensures that the result code of the // health endpoint is 500 if there are health checks with errors func TestReturns503IfThereAreErrorChecks(t *testing.T) { recorder := httptest.NewRecorder() req, err := http.NewRequest("GET", "https://fakeurl.com/debug/health", nil) if err != nil { t.Errorf("Failed to create request.") } // Create a manual error Register("some_check", CheckFunc(func() error { return errors.New("This Check did not succeed") })) StatusHandler(recorder, req) if recorder.Code != 503 { t.Errorf("Did not get a 503.") } } // TestHealthHandler ensures that our handler implementation correct protects // the web application when things aren't so healthy. func TestHealthHandler(t *testing.T) { // clear out existing checks. DefaultRegistry = NewRegistry() // protect an http server handler := http.Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) })) // wrap it in our health handler handler = Handler(handler) // use this swap check status updater := NewStatusUpdater() Register("test_check", updater) // now, create a test server server := httptest.NewServer(handler) checkUp := func(t *testing.T, message string) { resp, err := http.Get(server.URL) if err != nil { t.Fatalf("error getting success status: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusNoContent { t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusNoContent) } // NOTE(stevvooe): we really don't care about the body -- the format is // not standardized or supported, yet. } checkDown := func(t *testing.T, message string) { resp, err := http.Get(server.URL) if err != nil { t.Fatalf("error getting down status: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusServiceUnavailable { t.Fatalf("unexpected response code from server when %s: %d != %d", message, resp.StatusCode, http.StatusServiceUnavailable) } } // server should be up checkUp(t, "initial health check") // now, we fail the health check updater.Update(fmt.Errorf("the server is now out of commission")) checkDown(t, "server should be down") // should be down // bring server back up updater.Update(nil) checkUp(t, "when server is back up") // now we should be back up. } distribution-2.3.0/manifest/000077500000000000000000000000001265472114500160505ustar00rootroot00000000000000distribution-2.3.0/manifest/doc.go000066400000000000000000000000211265472114500171350ustar00rootroot00000000000000package manifest distribution-2.3.0/manifest/manifestlist/000077500000000000000000000000001265472114500205525ustar00rootroot00000000000000distribution-2.3.0/manifest/manifestlist/manifestlist.go000066400000000000000000000105731265472114500236110ustar00rootroot00000000000000package manifestlist import ( "encoding/json" "errors" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) // MediaTypeManifestList specifies the mediaType for manifest lists. const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. var SchemaVersion = manifest.Versioned{ SchemaVersion: 2, MediaType: MediaTypeManifestList, } func init() { manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifestList) err := m.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err } err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // PlatformSpec specifies a platform where a particular image manifest is // applicable. type PlatformSpec struct { // Architecture field specifies the CPU architecture, for example // `amd64` or `ppc64`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. OS string `json:"os"` // Variant is an optional field specifying a variant of the CPU, for // example `ppc64le` to specify a little-endian version of a PowerPC CPU. Variant string `json:"variant,omitempty"` // Features is an optional field specifuing an array of strings, each // listing a required CPU feature (for example `sse4` or `aes`). Features []string `json:"features,omitempty"` } // A ManifestDescriptor references a platform-specific manifest. type ManifestDescriptor struct { distribution.Descriptor // Platform specifies which platform the manifest pointed to by the // descriptor runs on. Platform PlatformSpec `json:"platform"` } // ManifestList references manifests for various platforms. type ManifestList struct { manifest.Versioned // Config references the image configuration as a blob. Manifests []ManifestDescriptor `json:"manifests"` } // References returnes the distribution descriptors for the referenced image // manifests. func (m ManifestList) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(m.Manifests)) for i := range m.Manifests { dependencies[i] = m.Manifests[i].Descriptor } return dependencies } // DeserializedManifestList wraps ManifestList with a copy of the original // JSON. type DeserializedManifestList struct { ManifestList // canonical is the canonical byte representation of the Manifest. canonical []byte } // FromDescriptors takes a slice of descriptors, and returns a // DeserializedManifestList which contains the resulting manifest list // and its JSON representation. func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { m := ManifestList{ Versioned: SchemaVersion, } m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) copy(m.Manifests, descriptors) deserialized := DeserializedManifestList{ ManifestList: m, } var err error deserialized.canonical, err = json.MarshalIndent(&m, "", " ") return &deserialized, err } // UnmarshalJSON populates a new ManifestList struct from JSON data. func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { m.canonical = make([]byte, len(b), len(b)) // store manifest list in canonical copy(m.canonical, b) // Unmarshal canonical JSON into ManifestList object var manifestList ManifestList if err := json.Unmarshal(m.canonical, &manifestList); err != nil { return err } m.ManifestList = manifestList return nil } // MarshalJSON returns the contents of canonical. If canonical is empty, // marshals the inner contents. func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { if len(m.canonical) > 0 { return m.canonical, nil } return nil, errors.New("JSON representation not initialized in DeserializedManifestList") } // Payload returns the raw content of the manifest list. The contents can be // used to calculate the content identifier. func (m DeserializedManifestList) Payload() (string, []byte, error) { return m.MediaType, m.canonical, nil } distribution-2.3.0/manifest/manifestlist/manifestlist_test.go000066400000000000000000000063371265472114500246530ustar00rootroot00000000000000package manifestlist import ( "bytes" "encoding/json" "reflect" "testing" "github.com/docker/distribution" ) var expectedManifestListSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 985, "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", "platform": { "architecture": "amd64", "os": "linux", "features": [ "sse4" ] } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 2392, "digest": "sha256:6346340964309634683409684360934680934608934608934608934068934608", "platform": { "architecture": "sun4m", "os": "sunos" } } ] }`) func TestManifestList(t *testing.T) { manifestDescriptors := []ManifestDescriptor{ { Descriptor: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, MediaType: "application/vnd.docker.distribution.manifest.v2+json", }, Platform: PlatformSpec{ Architecture: "amd64", OS: "linux", Features: []string{"sse4"}, }, }, { Descriptor: distribution.Descriptor{ Digest: "sha256:6346340964309634683409684360934680934608934608934608934068934608", Size: 2392, MediaType: "application/vnd.docker.distribution.manifest.v2+json", }, Platform: PlatformSpec{ Architecture: "sun4m", OS: "sunos", }, }, } deserialized, err := FromDescriptors(manifestDescriptors) if err != nil { t.Fatalf("error creating DeserializedManifestList: %v", err) } mediaType, canonical, err := deserialized.Payload() if mediaType != MediaTypeManifestList { t.Fatalf("unexpected media type: %s", mediaType) } // Check that the canonical field is the same as json.MarshalIndent // with these parameters. p, err := json.MarshalIndent(&deserialized.ManifestList, "", " ") if err != nil { t.Fatalf("error marshaling manifest list: %v", err) } if !bytes.Equal(p, canonical) { t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) } // Check that the canonical field has the expected value. if !bytes.Equal(expectedManifestListSerialization, canonical) { t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestListSerialization)) } var unmarshalled DeserializedManifestList if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { t.Fatalf("error unmarshaling manifest: %v", err) } if !reflect.DeepEqual(&unmarshalled, deserialized) { t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) } references := deserialized.References() if len(references) != 2 { t.Fatalf("unexpected number of references: %d", len(references)) } for i := range references { if !reflect.DeepEqual(references[i], manifestDescriptors[i].Descriptor) { t.Fatalf("unexpected value %d returned by References: %v", i, references[i]) } } } distribution-2.3.0/manifest/schema1/000077500000000000000000000000001265472114500173715ustar00rootroot00000000000000distribution-2.3.0/manifest/schema1/config_builder.go000066400000000000000000000204241265472114500226750ustar00rootroot00000000000000package schema1 import ( "crypto/sha512" "encoding/json" "errors" "fmt" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/libtrust" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) type diffID digest.Digest // gzippedEmptyTar is a gzip-compressed version of an empty tar file // (1024 NULL bytes) var gzippedEmptyTar = []byte{ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, } // digestSHA256GzippedEmptyTar is the canonical sha256 digest of // gzippedEmptyTar const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") // configManifestBuilder is a type for constructing manifests from an image // configuration and generic descriptors. type configManifestBuilder struct { // bs is a BlobService used to create empty layer tars in the // blob store if necessary. bs distribution.BlobService // pk is the libtrust private key used to sign the final manifest. pk libtrust.PrivateKey // configJSON is configuration supplied when the ManifestBuilder was // created. configJSON []byte // ref contains the name and optional tag provided to NewConfigManifestBuilder. ref reference.Named // descriptors is the set of descriptors referencing the layers. descriptors []distribution.Descriptor // emptyTarDigest is set to a valid digest if an empty tar has been // put in the blob store; otherwise it is empty. emptyTarDigest digest.Digest } // NewConfigManifestBuilder is used to build new manifests for the current // schema version from an image configuration and a set of descriptors. // It takes a BlobService so that it can add an empty tar to the blob store // if the resulting manifest needs empty layers. func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { return &configManifestBuilder{ bs: bs, pk: pk, configJSON: configJSON, ref: ref, } } // Build produces a final manifest from the given references func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { type imageRootFS struct { Type string `json:"type"` DiffIDs []diffID `json:"diff_ids,omitempty"` BaseLayer string `json:"base_layer,omitempty"` } type imageHistory struct { Created time.Time `json:"created"` Author string `json:"author,omitempty"` CreatedBy string `json:"created_by,omitempty"` Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } type imageConfig struct { RootFS *imageRootFS `json:"rootfs,omitempty"` History []imageHistory `json:"history,omitempty"` Architecture string `json:"architecture,omitempty"` } var img imageConfig if err := json.Unmarshal(mb.configJSON, &img); err != nil { return nil, err } if len(img.History) == 0 { return nil, errors.New("empty history when trying to create schema1 manifest") } if len(img.RootFS.DiffIDs) != len(mb.descriptors) { return nil, errors.New("number of descriptors and number of layers in rootfs must match") } // Generate IDs for each layer // For non-top-level layers, create fake V1Compatibility strings that // fit the format and don't collide with anything else, but don't // result in runnable images on their own. type v1Compatibility struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` ContainerConfig struct { Cmd []string } `json:"container_config,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } fsLayerList := make([]FSLayer, len(img.History)) history := make([]History, len(img.History)) parent := "" layerCounter := 0 for i, h := range img.History[:len(img.History)-1] { var blobsum digest.Digest if h.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest layerCounter++ } v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() if i == 0 && img.RootFS.BaseLayer != "" { // windows-only baselayer setup baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) parent = fmt.Sprintf("%x", baseID[:32]) } v1Compatibility := v1Compatibility{ ID: v1ID, Parent: parent, Comment: h.Comment, Created: h.Created, } v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} if h.EmptyLayer { v1Compatibility.ThrowAway = true } jsonBytes, err := json.Marshal(&v1Compatibility) if err != nil { return nil, err } reversedIndex := len(img.History) - i - 1 history[reversedIndex].V1Compatibility = string(jsonBytes) fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} parent = v1ID } latestHistory := img.History[len(img.History)-1] var blobsum digest.Digest if latestHistory.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest } fsLayerList[0] = FSLayer{BlobSum: blobsum} dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) // Top-level v1compatibility string should be a modified version of the // image config. transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) if err != nil { return nil, err } history[0].V1Compatibility = string(transformedConfig) tag := "" if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { tag = tagged.Tag() } mfst := Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: mb.ref.Name(), Tag: tag, Architecture: img.Architecture, FSLayers: fsLayerList, History: history, } return Sign(&mfst, mb.pk) } // emptyTar pushes a compressed empty tar to the blob store if one doesn't // already exist, and returns its blobsum. func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { if mb.emptyTarDigest != "" { // Already put an empty tar return mb.emptyTarDigest, nil } descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) switch err { case nil: mb.emptyTarDigest = descriptor.Digest return descriptor.Digest, nil case distribution.ErrBlobUnknown: // nop default: return "", err } // Add gzipped empty tar to the blob store descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) if err != nil { return "", err } mb.emptyTarDigest = descriptor.Digest return descriptor.Digest, nil } // AppendReference adds a reference to the current ManifestBuilder func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { // todo: verification here? mb.descriptors = append(mb.descriptors, d.Descriptor()) return nil } // References returns the current references added to this builder func (mb *configManifestBuilder) References() []distribution.Descriptor { return mb.descriptors } // MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Top-level v1compatibility string should be a modified version of the // image config. var configAsMap map[string]*json.RawMessage if err := json.Unmarshal(configJSON, &configAsMap); err != nil { return nil, err } // Delete fields that didn't exist in old manifest delete(configAsMap, "rootfs") delete(configAsMap, "history") configAsMap["id"] = rawJSON(v1ID) if parentV1ID != "" { configAsMap["parent"] = rawJSON(parentV1ID) } if throwaway { configAsMap["throwaway"] = rawJSON(true) } return json.Marshal(configAsMap) } func rawJSON(value interface{}) *json.RawMessage { jsonval, err := json.Marshal(value) if err != nil { return nil } return (*json.RawMessage)(&jsonval) } distribution-2.3.0/manifest/schema1/config_builder_test.go000066400000000000000000000257131265472114500237420ustar00rootroot00000000000000package schema1 import ( "bytes" "compress/gzip" "io" "reflect" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" ) type mockBlobService struct { descriptors map[digest.Digest]distribution.Descriptor } func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if descriptor, ok := bs.descriptors[dgst]; ok { return descriptor, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown } func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { panic("not implemented") } func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { panic("not implemented") } func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { d := distribution.Descriptor{ Digest: digest.FromBytes(p), Size: int64(len(p)), MediaType: mediaType, } bs.descriptors[d.Digest] = d return d, nil } func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func TestEmptyTar(t *testing.T) { // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. var decompressed [2048]byte gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) if err != nil { t.Fatalf("NewReader returned error: %v", err) } n, err := gzipReader.Read(decompressed[:]) if n != 1024 { t.Fatalf("read returned %d bytes; expected 1024", n) } n, err = gzipReader.Read(decompressed[1024:]) if n != 0 { t.Fatalf("read returned %d bytes; expected 0", n) } if err != io.EOF { t.Fatal("read did not return io.EOF") } gzipReader.Close() for _, b := range decompressed[:1024] { if b != 0 { t.Fatal("nonzero byte in decompressed tar") } } // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. dgst := digest.FromBytes(gzippedEmptyTar) if dgst != digestSHA256GzippedEmptyTar { t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) } } func TestConfigBuilder(t *testing.T) { imgJSON := `{ "architecture": "amd64", "config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/bin/sh", "-c", "echo hi" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "derived=true", "asdf=true" ], "Hostname": "23304fc829f9", "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", "Labels": {}, "OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", "container_config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/bin/sh", "-c", "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "derived=true", "asdf=true" ], "Hostname": "23304fc829f9", "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", "Labels": {}, "OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "created": "2015-11-04T23:06:32.365666163Z", "docker_version": "1.9.0-dev", "history": [ { "created": "2015-10-31T22:22:54.690851953Z", "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" }, { "created": "2015-10-31T22:22:55.613815829Z", "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" }, { "created": "2015-11-04T23:06:30.934316144Z", "created_by": "/bin/sh -c #(nop) ENV derived=true", "empty_layer": true }, { "created": "2015-11-04T23:06:31.192097572Z", "created_by": "/bin/sh -c #(nop) ENV asdf=true", "empty_layer": true }, { "created": "2015-11-04T23:06:32.083868454Z", "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" }, { "created": "2015-11-04T23:06:32.365666163Z", "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", "empty_layer": true } ], "os": "linux", "rootfs": { "diff_ids": [ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" ], "type": "layers" } }` descriptors := []distribution.Descriptor{ {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, {Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("could not generate key for testing: %v", err) } bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} ref, err := reference.ParseNamed("testrepo:testtag") if err != nil { t.Fatalf("could not parse reference: %v", err) } builder := NewConfigManifestBuilder(bs, pk, ref, []byte(imgJSON)) for _, d := range descriptors { if err := builder.AppendReference(d); err != nil { t.Fatalf("AppendReference returned error: %v", err) } } signed, err := builder.Build(context.Background()) if err != nil { t.Fatalf("Build returned error: %v", err) } // Check that the gzipped empty layer tar was put in the blob store _, err = bs.Stat(context.Background(), digestSHA256GzippedEmptyTar) if err != nil { t.Fatal("gzipped empty tar was not put in the blob store") } manifest := signed.(*SignedManifest).Manifest if manifest.Versioned.SchemaVersion != 1 { t.Fatal("SchemaVersion != 1") } if manifest.Name != "testrepo" { t.Fatal("incorrect name in manifest") } if manifest.Tag != "testtag" { t.Fatal("incorrect tag in manifest") } if manifest.Architecture != "amd64" { t.Fatal("incorrect arch in manifest") } expectedFSLayers := []FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, } if len(manifest.FSLayers) != len(expectedFSLayers) { t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers)) } if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) { t.Fatal("wrong FSLayers list") } expectedV1Compatibility := []string{ `{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"0850bfdeb7b060b1004a09099846c2f023a3f2ecbf33f56b4774384b00ce0323","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`, `{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`, `{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`, `{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`, `{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`, `{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`, } if len(manifest.History) != len(expectedV1Compatibility) { t.Fatalf("wrong number of history entries: %d", len(manifest.History)) } for i := range expectedV1Compatibility { if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] { t.Errorf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility) } } } distribution-2.3.0/manifest/schema1/manifest.go000066400000000000000000000127551265472114500215400ustar00rootroot00000000000000package schema1 import ( "encoding/json" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/libtrust" ) const ( // MediaTypeManifest specifies the mediaType for the current version. Note // that for schema version 1, the the media is optionally "application/json". MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" // MediaTypeManifestLayer specifies the media type for manifest layers MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" ) var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ SchemaVersion: 1, } ) func init() { schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { sm := new(SignedManifest) err := sm.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } desc := distribution.Descriptor{ Digest: digest.FromBytes(sm.Canonical), Size: int64(len(sm.Canonical)), MediaType: MediaTypeSignedManifest, } return sm, desc, err } err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("application/json", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // FSLayer is a container struct for BlobSums defined in an image manifest type FSLayer struct { // BlobSum is the tarsum of the referenced filesystem image layer BlobSum digest.Digest `json:"blobSum"` } // History stores unstructured v1 compatibility information type History struct { // V1Compatibility is the raw v1 compatibility information V1Compatibility string `json:"v1Compatibility"` } // Manifest provides the base accessible fields for working with V2 image // format in the registry. type Manifest struct { manifest.Versioned // Name is the name of the image's repository Name string `json:"name"` // Tag is the tag of the image specified by this manifest Tag string `json:"tag"` // Architecture is the host architecture on which this image is intended to // run Architecture string `json:"architecture"` // FSLayers is a list of filesystem layer blobSums contained in this image FSLayers []FSLayer `json:"fsLayers"` // History is a list of unstructured historical data for v1 compatibility History []History `json:"history"` } // SignedManifest provides an envelope for a signed image manifest, including // the format sensitive raw bytes. type SignedManifest struct { Manifest // Canonical is the canonical byte representation of the ImageManifest, // without any attached signatures. The manifest byte // representation cannot change or it will have to be re-signed. Canonical []byte `json:"-"` // all contains the byte representation of the Manifest including signatures // and is retuend by Payload() all []byte } // UnmarshalJSON populates a new SignedManifest struct from JSON data. func (sm *SignedManifest) UnmarshalJSON(b []byte) error { sm.all = make([]byte, len(b), len(b)) // store manifest and signatures in all copy(sm.all, b) jsig, err := libtrust.ParsePrettySignature(b, "signatures") if err != nil { return err } // Resolve the payload in the manifest. bytes, err := jsig.Payload() if err != nil { return err } // sm.Canonical stores the canonical manifest JSON sm.Canonical = make([]byte, len(bytes), len(bytes)) copy(sm.Canonical, bytes) // Unmarshal canonical JSON into Manifest object var manifest Manifest if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { return err } sm.Manifest = manifest return nil } // References returnes the descriptors of this manifests references func (sm SignedManifest) References() []distribution.Descriptor { dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) for i, fsLayer := range sm.FSLayers { dependencies[i] = distribution.Descriptor{ MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", Digest: fsLayer.BlobSum, } } return dependencies } // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner // contents. Applications requiring a marshaled signed manifest should simply // use Raw directly, since the the content produced by json.Marshal will be // compacted and will fail signature checks. func (sm *SignedManifest) MarshalJSON() ([]byte, error) { if len(sm.all) > 0 { return sm.all, nil } // If the raw data is not available, just dump the inner content. return json.Marshal(&sm.Manifest) } // Payload returns the signed content of the signed manifest. func (sm SignedManifest) Payload() (string, []byte, error) { return MediaTypeSignedManifest, sm.all, nil } // Signatures returns the signatures as provided by // (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws // signatures. func (sm *SignedManifest) Signatures() ([][]byte, error) { jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { return nil, err } // Resolve the payload in the manifest. return jsig.Signatures() } distribution-2.3.0/manifest/schema1/manifest_test.go000066400000000000000000000052361265472114500225730ustar00rootroot00000000000000package schema1 import ( "bytes" "encoding/json" "reflect" "testing" "github.com/docker/libtrust" ) type testEnv struct { name, tag string invalidSigned *SignedManifest signed *SignedManifest pk libtrust.PrivateKey } func TestManifestMarshaling(t *testing.T) { env := genEnv(t) // Check that the all field is the same as json.MarshalIndent with these // parameters. p, err := json.MarshalIndent(env.signed, "", " ") if err != nil { t.Fatalf("error marshaling manifest: %v", err) } if !bytes.Equal(p, env.signed.all) { t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.all), string(p)) } } func TestManifestUnmarshaling(t *testing.T) { env := genEnv(t) var signed SignedManifest if err := json.Unmarshal(env.signed.all, &signed); err != nil { t.Fatalf("error unmarshaling signed manifest: %v", err) } if !reflect.DeepEqual(&signed, env.signed) { t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) } } func TestManifestVerification(t *testing.T) { env := genEnv(t) publicKeys, err := Verify(env.signed) if err != nil { t.Fatalf("error verifying manifest: %v", err) } if len(publicKeys) == 0 { t.Fatalf("no public keys found in signature") } var found bool publicKey := env.pk.PublicKey() // ensure that one of the extracted public keys matches the private key. for _, candidate := range publicKeys { if candidate.KeyID() == publicKey.KeyID() { found = true break } } if !found { t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) } // Check that an invalid manifest fails verification _, err = Verify(env.invalidSigned) if err != nil { t.Fatalf("Invalid manifest should not pass Verify()") } } func genEnv(t *testing.T) *testEnv { pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("error generating test key: %v", err) } name, tag := "foo/bar", "test" invalid := Manifest{ Versioned: SchemaVersion, Name: name, Tag: tag, FSLayers: []FSLayer{ { BlobSum: "asdf", }, { BlobSum: "qwer", }, }, } valid := Manifest{ Versioned: SchemaVersion, Name: name, Tag: tag, FSLayers: []FSLayer{ { BlobSum: "asdf", }, }, History: []History{ { V1Compatibility: "", }, }, } sm, err := Sign(&valid, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } invalidSigned, err := Sign(&invalid, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } return &testEnv{ name: name, tag: tag, invalidSigned: invalidSigned, signed: sm, pk: pk, } } distribution-2.3.0/manifest/schema1/reference_builder.go000066400000000000000000000054061265472114500233710ustar00rootroot00000000000000package schema1 import ( "fmt" "errors" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" ) // referenceManifestBuilder is a type for constructing manifests from schema1 // dependencies. type referenceManifestBuilder struct { Manifest pk libtrust.PrivateKey } // NewReferenceManifestBuilder is used to build new manifests for the current // schema version using schema1 dependencies. func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { tag := "" if tagged, isTagged := ref.(reference.Tagged); isTagged { tag = tagged.Tag() } return &referenceManifestBuilder{ Manifest: Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: ref.Name(), Tag: tag, Architecture: architecture, }, pk: pk, } } func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { m := mb.Manifest if len(m.FSLayers) == 0 { return nil, errors.New("cannot build manifest with zero layers or history") } m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) m.History = make([]History, len(mb.Manifest.History)) copy(m.FSLayers, mb.Manifest.FSLayers) copy(m.History, mb.Manifest.History) return Sign(&m, mb.pk) } // AppendReference adds a reference to the current ManifestBuilder func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { r, ok := d.(Reference) if !ok { return fmt.Errorf("Unable to add non-reference type to v1 builder") } // Entries need to be prepended mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) return nil } // References returns the current references added to this builder func (mb *referenceManifestBuilder) References() []distribution.Descriptor { refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) for i := range mb.Manifest.FSLayers { layerDigest := mb.Manifest.FSLayers[i].BlobSum history := mb.Manifest.History[i] ref := Reference{layerDigest, 0, history} refs[i] = ref.Descriptor() } return refs } // Reference describes a manifest v2, schema version 1 dependency. // An FSLayer associated with a history entry. type Reference struct { Digest digest.Digest Size int64 // if we know it, set it for the descriptor. History History } // Descriptor describes a reference func (r Reference) Descriptor() distribution.Descriptor { return distribution.Descriptor{ MediaType: MediaTypeManifestLayer, Digest: r.Digest, Size: r.Size, } } distribution-2.3.0/manifest/schema1/reference_builder_test.go000066400000000000000000000050151265472114500244240ustar00rootroot00000000000000package schema1 import ( "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/reference" "github.com/docker/libtrust" ) func makeSignedManifest(t *testing.T, pk libtrust.PrivateKey, refs []Reference) *SignedManifest { u := &Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: "foo/bar", Tag: "latest", Architecture: "amd64", } for i := len(refs) - 1; i >= 0; i-- { u.FSLayers = append(u.FSLayers, FSLayer{ BlobSum: refs[i].Digest, }) u.History = append(u.History, History{ V1Compatibility: refs[i].History.V1Compatibility, }) } signedManifest, err := Sign(u, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } return signedManifest } func TestReferenceBuilder(t *testing.T) { pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } r1 := Reference{ Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: 1, History: History{V1Compatibility: "{\"a\" : 1 }"}, } r2 := Reference{ Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", Size: 2, History: History{V1Compatibility: "{\"\a\" : 2 }"}, } handCrafted := makeSignedManifest(t, pk, []Reference{r1, r2}) ref, err := reference.ParseNamed(handCrafted.Manifest.Name) if err != nil { t.Fatalf("could not parse reference: %v", err) } ref, err = reference.WithTag(ref, handCrafted.Manifest.Tag) if err != nil { t.Fatalf("could not add tag: %v", err) } b := NewReferenceManifestBuilder(pk, ref, handCrafted.Manifest.Architecture) _, err = b.Build(context.Background()) if err == nil { t.Fatal("Expected error building zero length manifest") } err = b.AppendReference(r1) if err != nil { t.Fatal(err) } err = b.AppendReference(r2) if err != nil { t.Fatal(err) } refs := b.References() if len(refs) != 2 { t.Fatalf("Unexpected reference count : %d != %d", 2, len(refs)) } // Ensure ordering if refs[0].Digest != r2.Digest { t.Fatalf("Unexpected reference : %v", refs[0]) } m, err := b.Build(context.Background()) if err != nil { t.Fatal(err) } built, ok := m.(*SignedManifest) if !ok { t.Fatalf("unexpected type from Build() : %T", built) } d1 := digest.FromBytes(built.Canonical) d2 := digest.FromBytes(handCrafted.Canonical) if d1 != d2 { t.Errorf("mismatching canonical JSON") } } distribution-2.3.0/manifest/schema1/sign.go000066400000000000000000000026451265472114500206670ustar00rootroot00000000000000package schema1 import ( "crypto/x509" "encoding/json" "github.com/docker/libtrust" ) // Sign signs the manifest with the provided private key, returning a // SignedManifest. This typically won't be used within the registry, except // for testing. func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } js, err := libtrust.NewJSONSignature(p) if err != nil { return nil, err } if err := js.Sign(pk); err != nil { return nil, err } pretty, err := js.PrettySignature("signatures") if err != nil { return nil, err } return &SignedManifest{ Manifest: *m, all: pretty, Canonical: p, }, nil } // SignWithChain signs the manifest with the given private key and x509 chain. // The public key of the first element in the chain must be the public key // corresponding with the sign key. func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { p, err := json.MarshalIndent(m, "", " ") if err != nil { return nil, err } js, err := libtrust.NewJSONSignature(p) if err != nil { return nil, err } if err := js.SignWithChain(key, chain); err != nil { return nil, err } pretty, err := js.PrettySignature("signatures") if err != nil { return nil, err } return &SignedManifest{ Manifest: *m, all: pretty, Canonical: p, }, nil } distribution-2.3.0/manifest/schema1/verify.go000066400000000000000000000015541265472114500212310ustar00rootroot00000000000000package schema1 import ( "crypto/x509" "github.com/Sirupsen/logrus" "github.com/docker/libtrust" ) // Verify verifies the signature of the signed manifest returning the public // keys used during signing. func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") return nil, err } return js.Verify() } // VerifyChains verifies the signature of the signed manifest against the // certificate pool returning the list of verified chains. Signatures without // an x509 chain are not checked. func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { js, err := libtrust.ParsePrettySignature(sm.all, "signatures") if err != nil { return nil, err } return js.VerifyChains(ca) } distribution-2.3.0/manifest/schema2/000077500000000000000000000000001265472114500173725ustar00rootroot00000000000000distribution-2.3.0/manifest/schema2/builder.go000066400000000000000000000036231265472114500213530ustar00rootroot00000000000000package schema2 import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) // builder is a type for constructing manifests. type builder struct { // bs is a BlobService used to publish the configuration blob. bs distribution.BlobService // configJSON references configJSON []byte // layers is a list of layer descriptors that gets built by successive // calls to AppendReference. layers []distribution.Descriptor } // NewManifestBuilder is used to build new manifests for the current schema // version. It takes a BlobService so it can publish the configuration blob // as part of the Build process. func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { mb := &builder{ bs: bs, configJSON: make([]byte, len(configJSON)), } copy(mb.configJSON, configJSON) return mb } // Build produces a final manifest from the given references. func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, Layers: make([]distribution.Descriptor, len(mb.layers)), } copy(m.Layers, mb.layers) configDigest := digest.FromBytes(mb.configJSON) var err error m.Config, err = mb.bs.Stat(ctx, configDigest) switch err { case nil: return FromStruct(m) case distribution.ErrBlobUnknown: // nop default: return nil, err } // Add config to the blob store m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) if err != nil { return nil, err } return FromStruct(m) } // AppendReference adds a reference to the current ManifestBuilder. func (mb *builder) AppendReference(d distribution.Describable) error { mb.layers = append(mb.layers, d.Descriptor()) return nil } // References returns the current references added to this builder. func (mb *builder) References() []distribution.Descriptor { return mb.layers } distribution-2.3.0/manifest/schema2/builder_test.go000066400000000000000000000142771265472114500224210ustar00rootroot00000000000000package schema2 import ( "reflect" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) type mockBlobService struct { descriptors map[digest.Digest]distribution.Descriptor } func (bs *mockBlobService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if descriptor, ok := bs.descriptors[dgst]; ok { return descriptor, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown } func (bs *mockBlobService) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { panic("not implemented") } func (bs *mockBlobService) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { panic("not implemented") } func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { d := distribution.Descriptor{ Digest: digest.FromBytes(p), Size: int64(len(p)), MediaType: mediaType, } bs.descriptors[d.Digest] = d return d, nil } func (bs *mockBlobService) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *mockBlobService) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func TestBuilder(t *testing.T) { imgJSON := []byte(`{ "architecture": "amd64", "config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/bin/sh", "-c", "echo hi" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "derived=true", "asdf=true" ], "Hostname": "23304fc829f9", "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", "Labels": {}, "OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001", "container_config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, "Cmd": [ "/bin/sh", "-c", "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "derived=true", "asdf=true" ], "Hostname": "23304fc829f9", "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246", "Labels": {}, "OnBuild": [], "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": null, "WorkingDir": "" }, "created": "2015-11-04T23:06:32.365666163Z", "docker_version": "1.9.0-dev", "history": [ { "created": "2015-10-31T22:22:54.690851953Z", "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" }, { "created": "2015-10-31T22:22:55.613815829Z", "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" }, { "created": "2015-11-04T23:06:30.934316144Z", "created_by": "/bin/sh -c #(nop) ENV derived=true", "empty_layer": true }, { "created": "2015-11-04T23:06:31.192097572Z", "created_by": "/bin/sh -c #(nop) ENV asdf=true", "empty_layer": true }, { "created": "2015-11-04T23:06:32.083868454Z", "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024" }, { "created": "2015-11-04T23:06:32.365666163Z", "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]", "empty_layer": true } ], "os": "linux", "rootfs": { "diff_ids": [ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef", "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49" ], "type": "layers" } }`) configDigest := digest.FromBytes(imgJSON) descriptors := []distribution.Descriptor{ { Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), Size: 5312, MediaType: MediaTypeLayer, }, { Digest: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), Size: 235231, MediaType: MediaTypeLayer, }, { Digest: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), Size: 639152, MediaType: MediaTypeLayer, }, } bs := &mockBlobService{descriptors: make(map[digest.Digest]distribution.Descriptor)} builder := NewManifestBuilder(bs, imgJSON) for _, d := range descriptors { if err := builder.AppendReference(d); err != nil { t.Fatalf("AppendReference returned error: %v", err) } } built, err := builder.Build(context.Background()) if err != nil { t.Fatalf("Build returned error: %v", err) } // Check that the config was put in the blob store _, err = bs.Stat(context.Background(), configDigest) if err != nil { t.Fatal("config was not put in the blob store") } manifest := built.(*DeserializedManifest).Manifest if manifest.Versioned.SchemaVersion != 2 { t.Fatal("SchemaVersion != 2") } target := manifest.Target() if target.Digest != configDigest { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } if target.MediaType != MediaTypeConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 3153 { t.Fatalf("unexpected size in target: %d", target.Size) } references := manifest.References() if !reflect.DeepEqual(references, descriptors) { t.Fatal("References() does not match the descriptors added") } } distribution-2.3.0/manifest/schema2/manifest.go000066400000000000000000000070171265472114500215340ustar00rootroot00000000000000package schema2 import ( "encoding/json" "errors" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" ) const ( // MediaTypeManifest specifies the mediaType for the current version. MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" // MediaTypeConfig specifies the mediaType for the image configuration. MediaTypeConfig = "application/vnd.docker.container.image.v1+json" // MediaTypeLayer is the mediaType used for layers referenced by the // manifest. MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" ) var ( // SchemaVersion provides a pre-initialized version structure for this // packages version of the manifest. SchemaVersion = manifest.Versioned{ SchemaVersion: 2, MediaType: MediaTypeManifest, } ) func init() { schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { m := new(DeserializedManifest) err := m.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } dgst := digest.FromBytes(b) return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err } err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } } // Manifest defines a schema2 manifest. type Manifest struct { manifest.Versioned // Config references the image configuration as a blob. Config distribution.Descriptor `json:"config"` // Layers lists descriptors for the layers referenced by the // configuration. Layers []distribution.Descriptor `json:"layers"` } // References returnes the descriptors of this manifests references. func (m Manifest) References() []distribution.Descriptor { return m.Layers } // Target returns the target of this signed manifest. func (m Manifest) Target() distribution.Descriptor { return m.Config } // DeserializedManifest wraps Manifest with a copy of the original JSON. // It satisfies the distribution.Manifest interface. type DeserializedManifest struct { Manifest // canonical is the canonical byte representation of the Manifest. canonical []byte } // FromStruct takes a Manifest structure, marshals it to JSON, and returns a // DeserializedManifest which contains the manifest and its JSON representation. func FromStruct(m Manifest) (*DeserializedManifest, error) { var deserialized DeserializedManifest deserialized.Manifest = m var err error deserialized.canonical, err = json.MarshalIndent(&m, "", " ") return &deserialized, err } // UnmarshalJSON populates a new Manifest struct from JSON data. func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { m.canonical = make([]byte, len(b), len(b)) // store manifest in canonical copy(m.canonical, b) // Unmarshal canonical JSON into Manifest object var manifest Manifest if err := json.Unmarshal(m.canonical, &manifest); err != nil { return err } m.Manifest = manifest return nil } // MarshalJSON returns the contents of canonical. If canonical is empty, // marshals the inner contents. func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { if len(m.canonical) > 0 { return m.canonical, nil } return nil, errors.New("JSON representation not initialized in DeserializedManifest") } // Payload returns the raw content of the manifest. The contents can be used to // calculate the content identifier. func (m DeserializedManifest) Payload() (string, []byte, error) { return m.MediaType, m.canonical, nil } distribution-2.3.0/manifest/schema2/manifest_test.go000066400000000000000000000063061265472114500225730ustar00rootroot00000000000000package schema2 import ( "bytes" "encoding/json" "reflect" "testing" "github.com/docker/distribution" ) var expectedManifestSerialization = []byte(`{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 985, "digest": "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 153263, "digest": "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" } ] }`) func TestManifest(t *testing.T) { manifest := Manifest{ Versioned: SchemaVersion, Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 985, MediaType: MediaTypeConfig, }, Layers: []distribution.Descriptor{ { Digest: "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b", Size: 153263, MediaType: MediaTypeLayer, }, }, } deserialized, err := FromStruct(manifest) if err != nil { t.Fatalf("error creating DeserializedManifest: %v", err) } mediaType, canonical, err := deserialized.Payload() if mediaType != MediaTypeManifest { t.Fatalf("unexpected media type: %s", mediaType) } // Check that the canonical field is the same as json.MarshalIndent // with these parameters. p, err := json.MarshalIndent(&manifest, "", " ") if err != nil { t.Fatalf("error marshaling manifest: %v", err) } if !bytes.Equal(p, canonical) { t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(p)) } // Check that canonical field matches expected value. if !bytes.Equal(expectedManifestSerialization, canonical) { t.Fatalf("manifest bytes not equal: %q != %q", string(canonical), string(expectedManifestSerialization)) } var unmarshalled DeserializedManifest if err := json.Unmarshal(deserialized.canonical, &unmarshalled); err != nil { t.Fatalf("error unmarshaling manifest: %v", err) } if !reflect.DeepEqual(&unmarshalled, deserialized) { t.Fatalf("manifests are different after unmarshaling: %v != %v", unmarshalled, *deserialized) } target := deserialized.Target() if target.Digest != "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b" { t.Fatalf("unexpected digest in target: %s", target.Digest.String()) } if target.MediaType != MediaTypeConfig { t.Fatalf("unexpected media type in target: %s", target.MediaType) } if target.Size != 985 { t.Fatalf("unexpected size in target: %d", target.Size) } references := deserialized.References() if len(references) != 1 { t.Fatalf("unexpected number of references: %d", len(references)) } if references[0].Digest != "sha256:62d8908bee94c202b2d35224a221aaa2058318bfa9879fa541efaecba272331b" { t.Fatalf("unexpected digest in reference: %s", references[0].Digest.String()) } if references[0].MediaType != MediaTypeLayer { t.Fatalf("unexpected media type in reference: %s", references[0].MediaType) } if references[0].Size != 153263 { t.Fatalf("unexpected size in reference: %d", references[0].Size) } } distribution-2.3.0/manifest/versioned.go000066400000000000000000000006561265472114500204040ustar00rootroot00000000000000package manifest // Versioned provides a struct with the manifest schemaVersion and . Incoming // content with unknown schema version can be decoded against this struct to // check the version. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` // MediaType is the media type of this schema. MediaType string `json:"mediaType,omitempty"` } distribution-2.3.0/manifests.go000066400000000000000000000100441265472114500165610ustar00rootroot00000000000000package distribution import ( "fmt" "mime" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. // The references are strictly ordered from base to head. A reference // is anything which can be represented by a distribution.Descriptor References() []Descriptor // Payload provides the serialized format of the manifest, in addition to // the mediatype. Payload() (mediatype string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. // Instances can be obtained from a version-specific manifest package. Manifest // specific data is passed into the function which creates the builder. type ManifestBuilder interface { // Build creates the manifest from his builder. Build(ctx context.Context) (Manifest, error) // References returns a list of objects which have been added to this // builder. The dependencies are returned in the order they were added, // which should be from base to head. References() []Descriptor // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. AppendReference(dependency Describable) error } // ManifestService describes operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. Exists(ctx context.Context, dgst digest.Digest) (bool, error) // Get retrieves the manifest specified by the given digest Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) // Put creates or updates the given manifest returning the manifest digest Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error // Enumerate fills 'manifests' with the manifests in this service up // to the size of 'manifests' and returns 'n' for the number of entries // which were filled. 'last' contains an offset in the manifest set // and can be used to resume iteration. //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) } // Describable is an interface for descriptors type Describable interface { Descriptor() Descriptor } // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { if t != "" { mediaTypes = append(mediaTypes, t) } } return } // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshall functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediatype string if ctHeader != "" { var err error mediatype, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } unmarshalFunc, ok := mappings[mediatype] if !ok { unmarshalFunc, ok = mappings[""] if !ok { return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) } } return unmarshalFunc(p) } // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { if _, ok := mappings[mediatype]; ok { return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) } mappings[mediatype] = u return nil } distribution-2.3.0/notifications/000077500000000000000000000000001265472114500171135ustar00rootroot00000000000000distribution-2.3.0/notifications/bridge.go000066400000000000000000000113221265472114500206750ustar00rootroot00000000000000package notifications import ( "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/uuid" ) type bridge struct { ub URLBuilder actor ActorRecord source SourceRecord request RequestRecord sink Sink } var _ Listener = &bridge{} // URLBuilder defines a subset of url builder to be used by the event listener. type URLBuilder interface { BuildManifestURL(name reference.Named) (string, error) BuildBlobURL(ref reference.Canonical) (string, error) } // NewBridge returns a notification listener that writes records to sink, // using the actor and source. Any urls populated in the events created by // this bridge will be created using the URLBuilder. // TODO(stevvooe): Update this to simply take a context.Context object. func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink Sink) Listener { return &bridge{ ub: ub, actor: actor, source: source, request: request, sink: sink, } } // NewRequestRecord builds a RequestRecord for use in NewBridge from an // http.Request, associating it with a request id. func NewRequestRecord(id string, r *http.Request) RequestRecord { return RequestRecord{ ID: id, Addr: context.RemoteAddr(r), Host: r.Host, Method: r.Method, UserAgent: r.UserAgent(), } } func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest) error { return b.createManifestEventAndWrite(EventActionPush, repo, sm) } func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest) error { return b.createManifestEventAndWrite(EventActionPull, repo, sm) } func (b *bridge) ManifestDeleted(repo reference.Named, sm distribution.Manifest) error { return b.createManifestEventAndWrite(EventActionDelete, repo, sm) } func (b *bridge) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { return b.createBlobEventAndWrite(EventActionPush, repo, desc) } func (b *bridge) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { return b.createBlobEventAndWrite(EventActionPull, repo, desc) } func (b *bridge) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { event, err := b.createBlobEvent(EventActionMount, repo, desc) if err != nil { return err } event.Target.FromRepository = fromRepo.Name() return b.sink.Write(*event) } func (b *bridge) BlobDeleted(repo reference.Named, desc distribution.Descriptor) error { return b.createBlobEventAndWrite(EventActionDelete, repo, desc) } func (b *bridge) createManifestEventAndWrite(action string, repo reference.Named, sm distribution.Manifest) error { manifestEvent, err := b.createManifestEvent(action, repo, sm) if err != nil { return err } return b.sink.Write(*manifestEvent) } func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) { event := b.createEvent(action) event.Target.Repository = repo.Name() mt, p, err := sm.Payload() if err != nil { return nil, err } // Ensure we have the canonical manifest descriptor here _, desc, err := distribution.UnmarshalManifest(mt, p) if err != nil { return nil, err } event.Target.MediaType = mt event.Target.Length = desc.Size event.Target.Size = desc.Size event.Target.Digest = desc.Digest ref, err := reference.WithDigest(repo, event.Target.Digest) if err != nil { return nil, err } event.Target.URL, err = b.ub.BuildManifestURL(ref) if err != nil { return nil, err } return event, nil } func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc distribution.Descriptor) error { event, err := b.createBlobEvent(action, repo, desc) if err != nil { return err } return b.sink.Write(*event) } func (b *bridge) createBlobEvent(action string, repo reference.Named, desc distribution.Descriptor) (*Event, error) { event := b.createEvent(action) event.Target.Descriptor = desc event.Target.Length = desc.Size event.Target.Repository = repo.Name() ref, err := reference.WithDigest(repo, desc.Digest) if err != nil { return nil, err } event.Target.URL, err = b.ub.BuildBlobURL(ref) if err != nil { return nil, err } return event, nil } // createEvent creates an event with actor and source populated. func (b *bridge) createEvent(action string) *Event { event := createEvent(action) event.Source = b.source event.Actor = b.actor event.Request = b.request return event } // createEvent returns a new event, timestamped, with the specified action. func createEvent(action string) *Event { return &Event{ ID: uuid.Generate().String(), Timestamp: time.Now(), Action: action, } } distribution-2.3.0/notifications/bridge_test.go000066400000000000000000000074311265472114500217420ustar00rootroot00000000000000package notifications import ( "testing" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" ) var ( // common environment for expected manifest events. repo = "test/repo" source = SourceRecord{ Addr: "remote.test", InstanceID: uuid.Generate().String(), } ub = mustUB(v2.NewURLBuilderFromString("http://test.example.com/")) actor = ActorRecord{ Name: "test", } request = RequestRecord{} m = schema1.Manifest{ Name: repo, Tag: "latest", } sm *schema1.SignedManifest payload []byte dgst digest.Digest ) func TestEventBridgeManifestPulled(t *testing.T) { l := createTestEnv(t, testSinkFn(func(events ...Event) error { checkCommonManifest(t, EventActionPull, events...) return nil })) repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPulled(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } func TestEventBridgeManifestPushed(t *testing.T) { l := createTestEnv(t, testSinkFn(func(events ...Event) error { checkCommonManifest(t, EventActionPush, events...) return nil })) repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestPushed(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } func TestEventBridgeManifestDeleted(t *testing.T) { l := createTestEnv(t, testSinkFn(func(events ...Event) error { checkCommonManifest(t, EventActionDelete, events...) return nil })) repoRef, _ := reference.ParseNamed(repo) if err := l.ManifestDeleted(repoRef, sm); err != nil { t.Fatalf("unexpected error notifying manifest pull: %v", err) } } func createTestEnv(t *testing.T, fn testSinkFn) Listener { pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("error generating private key: %v", err) } sm, err = schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } payload = sm.Canonical dgst = digest.FromBytes(payload) return NewBridge(ub, source, actor, request, fn) } func checkCommonManifest(t *testing.T, action string, events ...Event) { checkCommon(t, events...) event := events[0] if event.Action != action { t.Fatalf("unexpected event action: %q != %q", event.Action, action) } repoRef, _ := reference.ParseNamed(repo) ref, _ := reference.WithDigest(repoRef, dgst) u, err := ub.BuildManifestURL(ref) if err != nil { t.Fatalf("error building expected url: %v", err) } if event.Target.URL != u { t.Fatalf("incorrect url passed: \n%q != \n%q", event.Target.URL, u) } } func checkCommon(t *testing.T, events ...Event) { if len(events) != 1 { t.Fatalf("unexpected number of events: %v != 1", len(events)) } event := events[0] if event.Source != source { t.Fatalf("source not equal: %#v != %#v", event.Source, source) } if event.Request != request { t.Fatalf("request not equal: %#v != %#v", event.Request, request) } if event.Actor != actor { t.Fatalf("request not equal: %#v != %#v", event.Actor, actor) } if event.Target.Digest != dgst { t.Fatalf("unexpected digest on event target: %q != %q", event.Target.Digest, dgst) } if event.Target.Length != int64(len(payload)) { t.Fatalf("unexpected target length: %v != %v", event.Target.Length, len(payload)) } if event.Target.Repository != repo { t.Fatalf("unexpected repository: %q != %q", event.Target.Repository, repo) } } type testSinkFn func(events ...Event) error func (tsf testSinkFn) Write(events ...Event) error { return tsf(events...) } func (tsf testSinkFn) Close() error { return nil } func mustUB(ub *v2.URLBuilder, err error) *v2.URLBuilder { if err != nil { panic(err) } return ub } distribution-2.3.0/notifications/endpoint.go000066400000000000000000000040451265472114500212650ustar00rootroot00000000000000package notifications import ( "net/http" "time" ) // EndpointConfig covers the optional configuration parameters for an active // endpoint. type EndpointConfig struct { Headers http.Header Timeout time.Duration Threshold int Backoff time.Duration } // defaults set any zero-valued fields to a reasonable default. func (ec *EndpointConfig) defaults() { if ec.Timeout <= 0 { ec.Timeout = time.Second } if ec.Threshold <= 0 { ec.Threshold = 10 } if ec.Backoff <= 0 { ec.Backoff = time.Second } } // Endpoint is a reliable, queued, thread-safe sink that notify external http // services when events are written. Writes are non-blocking and always // succeed for callers but events may be queued internally. type Endpoint struct { Sink url string name string EndpointConfig metrics *safeMetrics } // NewEndpoint returns a running endpoint, ready to receive events. func NewEndpoint(name, url string, config EndpointConfig) *Endpoint { var endpoint Endpoint endpoint.name = name endpoint.url = url endpoint.EndpointConfig = config endpoint.defaults() endpoint.metrics = newSafeMetrics() // Configures the inmemory queue, retry, http pipeline. endpoint.Sink = newHTTPSink( endpoint.url, endpoint.Timeout, endpoint.Headers, endpoint.metrics.httpStatusListener()) endpoint.Sink = newRetryingSink(endpoint.Sink, endpoint.Threshold, endpoint.Backoff) endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener()) register(&endpoint) return &endpoint } // Name returns the name of the endpoint, generally used for debugging. func (e *Endpoint) Name() string { return e.name } // URL returns the url of the endpoint. func (e *Endpoint) URL() string { return e.url } // ReadMetrics populates em with metrics from the endpoint. func (e *Endpoint) ReadMetrics(em *EndpointMetrics) { e.metrics.Lock() defer e.metrics.Unlock() *em = e.metrics.EndpointMetrics // Map still need to copied in a threadsafe manner. em.Statuses = make(map[string]int) for k, v := range e.metrics.Statuses { em.Statuses[k] = v } } distribution-2.3.0/notifications/event.go000066400000000000000000000127401265472114500205670ustar00rootroot00000000000000package notifications import ( "fmt" "time" "github.com/docker/distribution" ) // EventAction constants used in action field of Event. const ( EventActionPull = "pull" EventActionPush = "push" EventActionMount = "mount" EventActionDelete = "delete" ) const ( // EventsMediaType is the mediatype for the json event envelope. If the // Event, ActorRecord, SourceRecord or Envelope structs change, the version // number should be incremented. EventsMediaType = "application/vnd.docker.distribution.events.v1+json" // LayerMediaType is the media type for image rootfs diffs (aka "layers") // used by Docker. We don't expect this to change for quite a while. layerMediaType = "application/vnd.docker.container.image.rootfs.diff+x-gtar" ) // Envelope defines the fields of a json event envelope message that can hold // one or more events. type Envelope struct { // Events make up the contents of the envelope. Events present in a single // envelope are not necessarily related. Events []Event `json:"events,omitempty"` } // TODO(stevvooe): The event type should be separate from the json format. It // should be defined as an interface. Leaving as is for now since we don't // need that at this time. If we make this change, the struct below would be // called "EventRecord". // Event provides the fields required to describe a registry event. type Event struct { // ID provides a unique identifier for the event. ID string `json:"id,omitempty"` // Timestamp is the time at which the event occurred. Timestamp time.Time `json:"timestamp,omitempty"` // Action indicates what action encompasses the provided event. Action string `json:"action,omitempty"` // Target uniquely describes the target of the event. Target struct { // TODO(stevvooe): Use http.DetectContentType for layers, maybe. distribution.Descriptor // Length in bytes of content. Same as Size field in Descriptor. // Provided for backwards compatibility. Length int64 `json:"length,omitempty"` // Repository identifies the named repository. Repository string `json:"repository,omitempty"` // FromRepository identifies the named repository which a blob was mounted // from if appropriate. FromRepository string `json:"fromRepository,omitempty"` // URL provides a direct link to the content. URL string `json:"url,omitempty"` } `json:"target,omitempty"` // Request covers the request that generated the event. Request RequestRecord `json:"request,omitempty"` // Actor specifies the agent that initiated the event. For most // situations, this could be from the authorizaton context of the request. Actor ActorRecord `json:"actor,omitempty"` // Source identifies the registry node that generated the event. Put // differently, while the actor "initiates" the event, the source // "generates" it. Source SourceRecord `json:"source,omitempty"` } // ActorRecord specifies the agent that initiated the event. For most // situations, this could be from the authorizaton context of the request. // Data in this record can refer to both the initiating client and the // generating request. type ActorRecord struct { // Name corresponds to the subject or username associated with the // request context that generated the event. Name string `json:"name,omitempty"` // TODO(stevvooe): Look into setting a session cookie to get this // without docker daemon. // SessionID // TODO(stevvooe): Push the "Docker-Command" header to replace cookie and // get the actual command. // Command } // RequestRecord covers the request that generated the event. type RequestRecord struct { // ID uniquely identifies the request that initiated the event. ID string `json:"id"` // Addr contains the ip or hostname and possibly port of the client // connection that initiated the event. This is the RemoteAddr from // the standard http request. Addr string `json:"addr,omitempty"` // Host is the externally accessible host name of the registry instance, // as specified by the http host header on incoming requests. Host string `json:"host,omitempty"` // Method has the request method that generated the event. Method string `json:"method"` // UserAgent contains the user agent header of the request. UserAgent string `json:"useragent"` } // SourceRecord identifies the registry node that generated the event. Put // differently, while the actor "initiates" the event, the source "generates" // it. type SourceRecord struct { // Addr contains the ip or hostname and the port of the registry node // that generated the event. Generally, this will be resolved by // os.Hostname() along with the running port. Addr string `json:"addr,omitempty"` // InstanceID identifies a running instance of an application. Changes // after each restart. InstanceID string `json:"instanceID,omitempty"` } var ( // ErrSinkClosed is returned if a write is issued to a sink that has been // closed. If encountered, the error should be considered terminal and // retries will not be successful. ErrSinkClosed = fmt.Errorf("sink: closed") ) // Sink accepts and sends events. type Sink interface { // Write writes one or more events to the sink. If no error is returned, // the caller will assume that all events have been committed and will not // try to send them again. If an error is received, the caller may retry // sending the event. The caller should cede the slice of memory to the // sink and not modify it after calling this method. Write(events ...Event) error // Close the sink, possibly waiting for pending events to flush. Close() error } distribution-2.3.0/notifications/event_test.go000066400000000000000000000116671265472114500216350ustar00rootroot00000000000000package notifications import ( "encoding/json" "strings" "testing" "time" "github.com/docker/distribution/manifest/schema1" ) // TestEventJSONFormat provides silly test to detect if the event format or // envelope has changed. If this code fails, the revision of the protocol may // need to be incremented. func TestEventEnvelopeJSONFormat(t *testing.T) { var expected = strings.TrimSpace(` { "events": [ { "id": "asdf-asdf-asdf-asdf-0", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.distribution.manifest.v1+prettyjws", "size": 1, "digest": "sha256:0123456789abcdef0", "length": 1, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } }, { "id": "asdf-asdf-asdf-asdf-1", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "size": 2, "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", "length": 2, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } }, { "id": "asdf-asdf-asdf-asdf-2", "timestamp": "2006-01-02T15:04:05Z", "action": "push", "target": { "mediaType": "application/vnd.docker.container.image.rootfs.diff+x-gtar", "size": 3, "digest": "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6", "length": 3, "repository": "library/test", "url": "http://example.com/v2/library/test/manifests/latest" }, "request": { "id": "asdfasdf", "addr": "client.local", "host": "registrycluster.local", "method": "PUT", "useragent": "test/0.1" }, "actor": { "name": "test-actor" }, "source": { "addr": "hostname.local:port" } } ] } `) tm, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) if err != nil { t.Fatalf("error creating time: %v", err) } var prototype Event prototype.Action = EventActionPush prototype.Timestamp = tm prototype.Actor.Name = "test-actor" prototype.Request.ID = "asdfasdf" prototype.Request.Addr = "client.local" prototype.Request.Host = "registrycluster.local" prototype.Request.Method = "PUT" prototype.Request.UserAgent = "test/0.1" prototype.Source.Addr = "hostname.local:port" var manifestPush Event manifestPush = prototype manifestPush.ID = "asdf-asdf-asdf-asdf-0" manifestPush.Target.Digest = "sha256:0123456789abcdef0" manifestPush.Target.Length = 1 manifestPush.Target.Size = 1 manifestPush.Target.MediaType = schema1.MediaTypeSignedManifest manifestPush.Target.Repository = "library/test" manifestPush.Target.URL = "http://example.com/v2/library/test/manifests/latest" var layerPush0 Event layerPush0 = prototype layerPush0.ID = "asdf-asdf-asdf-asdf-1" layerPush0.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5" layerPush0.Target.Length = 2 layerPush0.Target.Size = 2 layerPush0.Target.MediaType = layerMediaType layerPush0.Target.Repository = "library/test" layerPush0.Target.URL = "http://example.com/v2/library/test/manifests/latest" var layerPush1 Event layerPush1 = prototype layerPush1.ID = "asdf-asdf-asdf-asdf-2" layerPush1.Target.Digest = "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d6" layerPush1.Target.Length = 3 layerPush1.Target.Size = 3 layerPush1.Target.MediaType = layerMediaType layerPush1.Target.Repository = "library/test" layerPush1.Target.URL = "http://example.com/v2/library/test/manifests/latest" var envelope Envelope envelope.Events = append(envelope.Events, manifestPush, layerPush0, layerPush1) p, err := json.MarshalIndent(envelope, "", " ") if err != nil { t.Fatalf("unexpected error marshaling envelope: %v", err) } if string(p) != expected { t.Fatalf("format has changed\n%s\n != \n%s", string(p), expected) } } distribution-2.3.0/notifications/http.go000066400000000000000000000073361265472114500204320ustar00rootroot00000000000000package notifications import ( "bytes" "encoding/json" "fmt" "net/http" "sync" "time" ) // httpSink implements a single-flight, http notification endpoint. This is // very lightweight in that it only makes an attempt at an http request. // Reliability should be provided by the caller. type httpSink struct { url string mu sync.Mutex closed bool client *http.Client listeners []httpStatusListener // TODO(stevvooe): Allow one to configure the media type accepted by this // sink and choose the serialization based on that. } // newHTTPSink returns an unreliable, single-flight http sink. Wrap in other // sinks for increased reliability. func newHTTPSink(u string, timeout time.Duration, headers http.Header, listeners ...httpStatusListener) *httpSink { return &httpSink{ url: u, listeners: listeners, client: &http.Client{ Transport: &headerRoundTripper{ Transport: http.DefaultTransport.(*http.Transport), headers: headers, }, Timeout: timeout, }, } } // httpStatusListener is called on various outcomes of sending notifications. type httpStatusListener interface { success(status int, events ...Event) failure(status int, events ...Event) err(err error, events ...Event) } // Accept makes an attempt to notify the endpoint, returning an error if it // fails. It is the caller's responsibility to retry on error. The events are // accepted or rejected as a group. func (hs *httpSink) Write(events ...Event) error { hs.mu.Lock() defer hs.mu.Unlock() defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections() if hs.closed { return ErrSinkClosed } envelope := Envelope{ Events: events, } // TODO(stevvooe): It is not ideal to keep re-encoding the request body on // retry but we are going to do it to keep the code simple. It is likely // we could change the event struct to manage its own buffer. p, err := json.MarshalIndent(envelope, "", " ") if err != nil { for _, listener := range hs.listeners { listener.err(err, events...) } return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err) } body := bytes.NewReader(p) resp, err := hs.client.Post(hs.url, EventsMediaType, body) if err != nil { for _, listener := range hs.listeners { listener.err(err, events...) } return fmt.Errorf("%v: error posting: %v", hs, err) } defer resp.Body.Close() // The notifier will treat any 2xx or 3xx response as accepted by the // endpoint. switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: for _, listener := range hs.listeners { listener.success(resp.StatusCode, events...) } // TODO(stevvooe): This is a little accepting: we may want to support // unsupported media type responses with retries using the correct // media type. There may also be cases that will never work. return nil default: for _, listener := range hs.listeners { listener.failure(resp.StatusCode, events...) } return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status) } } // Close the endpoint func (hs *httpSink) Close() error { hs.mu.Lock() defer hs.mu.Unlock() if hs.closed { return fmt.Errorf("httpsink: already closed") } hs.closed = true return nil } func (hs *httpSink) String() string { return fmt.Sprintf("httpSink{%s}", hs.url) } type headerRoundTripper struct { *http.Transport // must be transport to support CancelRequest headers http.Header } func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { var nreq http.Request nreq = *req nreq.Header = make(http.Header) merge := func(headers http.Header) { for k, v := range headers { nreq.Header[k] = append(nreq.Header[k], v...) } } merge(req.Header) merge(hrt.headers) return hrt.Transport.RoundTrip(&nreq) } distribution-2.3.0/notifications/http_test.go000066400000000000000000000076161265472114500214720ustar00rootroot00000000000000package notifications import ( "encoding/json" "fmt" "mime" "net/http" "net/http/httptest" "reflect" "strconv" "testing" "github.com/docker/distribution/manifest/schema1" ) // TestHTTPSink mocks out an http endpoint and notifies it under a couple of // conditions, ensuring correct behavior. func TestHTTPSink(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if r.Method != "POST" { w.WriteHeader(http.StatusMethodNotAllowed) t.Fatalf("unexpected request method: %v", r.Method) return } // Extract the content type and make sure it matches contentType := r.Header.Get("Content-Type") mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { w.WriteHeader(http.StatusBadRequest) t.Fatalf("error parsing media type: %v, contenttype=%q", err, contentType) return } if mediaType != EventsMediaType { w.WriteHeader(http.StatusUnsupportedMediaType) t.Fatalf("incorrect media type: %q != %q", mediaType, EventsMediaType) return } var envelope Envelope dec := json.NewDecoder(r.Body) if err := dec.Decode(&envelope); err != nil { w.WriteHeader(http.StatusBadRequest) t.Fatalf("error decoding request body: %v", err) return } // Let caller choose the status status, err := strconv.Atoi(r.FormValue("status")) if err != nil { t.Logf("error parsing status: %v", err) // May just be empty, set status to 200 status = http.StatusOK } w.WriteHeader(status) })) metrics := newSafeMetrics() sink := newHTTPSink(server.URL, 0, nil, &endpointMetricsHTTPStatusListener{safeMetrics: metrics}) var expectedMetrics EndpointMetrics expectedMetrics.Statuses = make(map[string]int) for _, tc := range []struct { events []Event // events to send url string failure bool // true if there should be a failure. statusCode int // if not set, no status code should be incremented. }{ { statusCode: http.StatusOK, events: []Event{ createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest)}, }, { statusCode: http.StatusOK, events: []Event{ createTestEvent("push", "library/test", schema1.MediaTypeSignedManifest), createTestEvent("push", "library/test", layerMediaType), createTestEvent("push", "library/test", layerMediaType), }, }, { statusCode: http.StatusTemporaryRedirect, }, { statusCode: http.StatusBadRequest, failure: true, }, { // Case where connection never goes through. url: "http://shoudlntresolve/", failure: true, }, } { if tc.failure { expectedMetrics.Failures += len(tc.events) } else { expectedMetrics.Successes += len(tc.events) } if tc.statusCode > 0 { expectedMetrics.Statuses[fmt.Sprintf("%d %s", tc.statusCode, http.StatusText(tc.statusCode))] += len(tc.events) } url := tc.url if url == "" { url = server.URL + "/" } // setup endpoint to respond with expected status code. url += fmt.Sprintf("?status=%v", tc.statusCode) sink.url = url t.Logf("testcase: %v, fail=%v", url, tc.failure) // Try a simple event emission. err := sink.Write(tc.events...) if !tc.failure { if err != nil { t.Fatalf("unexpected error send event: %v", err) } } else { if err == nil { t.Fatalf("the endpoint should have rejected the request") } } if !reflect.DeepEqual(metrics.EndpointMetrics, expectedMetrics) { t.Fatalf("metrics not as expected: %#v != %#v", metrics.EndpointMetrics, expectedMetrics) } } if err := sink.Close(); err != nil { t.Fatalf("unexpected error closing http sink: %v", err) } // double close returns error if err := sink.Close(); err == nil { t.Fatalf("second close should have returned error: %v", err) } } func createTestEvent(action, repo, typ string) Event { event := createEvent(action) event.Target.MediaType = typ event.Target.Repository = repo return *event } distribution-2.3.0/notifications/listener.go000066400000000000000000000151071265472114500212730ustar00rootroot00000000000000package notifications import ( "net/http" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" ) // ManifestListener describes a set of methods for listening to events related to manifests. type ManifestListener interface { ManifestPushed(repo reference.Named, sm distribution.Manifest) error ManifestPulled(repo reference.Named, sm distribution.Manifest) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. ManifestDeleted(repo reference.Named, sm distribution.Manifest) error } // BlobListener describes a listener that can respond to layer related events. type BlobListener interface { BlobPushed(repo reference.Named, desc distribution.Descriptor) error BlobPulled(repo reference.Named, desc distribution.Descriptor) error BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error // TODO(stevvooe): Please note that delete support is still a little shaky // and we'll need to propagate these in the future. BlobDeleted(repo reference.Named, desc distribution.Descriptor) error } // Listener combines all repository events into a single interface. type Listener interface { ManifestListener BlobListener } type repositoryListener struct { distribution.Repository listener Listener } // Listen dispatches events on the repository to the listener. func Listen(repo distribution.Repository, listener Listener) distribution.Repository { return &repositoryListener{ Repository: repo, listener: listener, } } func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { manifests, err := rl.Repository.Manifests(ctx, options...) if err != nil { return nil, err } return &manifestServiceListener{ ManifestService: manifests, parent: rl, }, nil } func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore { return &blobServiceListener{ BlobStore: rl.Repository.Blobs(ctx), parent: rl, } } type manifestServiceListener struct { distribution.ManifestService parent *repositoryListener } func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { sm, err := msl.ManifestService.Get(ctx, dgst) if err == nil { if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Name(), sm); err != nil { logrus.Errorf("error dispatching manifest pull to listener: %v", err) } } return sm, err } func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { dgst, err := msl.ManifestService.Put(ctx, sm, options...) if err == nil { if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Name(), sm); err != nil { logrus.Errorf("error dispatching manifest push to listener: %v", err) } } return dgst, err } type blobServiceListener struct { distribution.BlobStore parent *repositoryListener } var _ distribution.BlobStore = &blobServiceListener{} func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { p, err := bsl.BlobStore.Get(ctx, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } return p, err } func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { rc, err := bsl.BlobStore.Open(ctx, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } return rc, err } func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst) if err == nil { if desc, err := bsl.Stat(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err) } else { if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Name(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } } return err } func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { desc, err := bsl.BlobStore.Put(ctx, mediaType, p) if err == nil { if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Name(), desc); err != nil { context.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err) } } return desc, err } func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { wr, err := bsl.BlobStore.Create(ctx, options...) switch err := err.(type) { case distribution.ErrBlobMounted: if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Name(), err.Descriptor, err.From); err != nil { context.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err) } return nil, err } return bsl.decorateWriter(wr), err } func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { wr, err := bsl.BlobStore.Resume(ctx, id) return bsl.decorateWriter(wr), err } func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter { return &blobWriterListener{ BlobWriter: wr, parent: bsl, } } type blobWriterListener struct { distribution.BlobWriter parent *blobServiceListener } func (bwl *blobWriterListener) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { committed, err := bwl.BlobWriter.Commit(ctx, desc) if err == nil { if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Name(), committed); err != nil { context.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err) } } return committed, err } distribution-2.3.0/notifications/listener_test.go000066400000000000000000000116101265472114500223250ustar00rootroot00000000000000package notifications import ( "io" "reflect" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" ) func TestListener(t *testing.T) { ctx := context.Background() registry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } tl := &testListener{ ops: make(map[string]int), } repoRef, _ := reference.ParseNamed("foo/bar") repository, err := registry.Repository(ctx, repoRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } repository = Listen(repository, tl) // Now take the registry through a number of operations checkExerciseRepository(t, repository) expectedOps := map[string]int{ "manifest:push": 1, "manifest:pull": 1, // "manifest:delete": 0, // deletes not supported for now "layer:push": 2, "layer:pull": 2, // "layer:delete": 0, // deletes not supported for now } if !reflect.DeepEqual(tl.ops, expectedOps) { t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps) } } type testListener struct { ops map[string]int } func (tl *testListener) ManifestPushed(repo reference.Named, m distribution.Manifest) error { tl.ops["manifest:push"]++ return nil } func (tl *testListener) ManifestPulled(repo reference.Named, m distribution.Manifest) error { tl.ops["manifest:pull"]++ return nil } func (tl *testListener) ManifestDeleted(repo reference.Named, m distribution.Manifest) error { tl.ops["manifest:delete"]++ return nil } func (tl *testListener) BlobPushed(repo reference.Named, desc distribution.Descriptor) error { tl.ops["layer:push"]++ return nil } func (tl *testListener) BlobPulled(repo reference.Named, desc distribution.Descriptor) error { tl.ops["layer:pull"]++ return nil } func (tl *testListener) BlobMounted(repo reference.Named, desc distribution.Descriptor, fromRepo reference.Named) error { tl.ops["layer:mount"]++ return nil } func (tl *testListener) BlobDeleted(repo reference.Named, desc distribution.Descriptor) error { tl.ops["layer:delete"]++ return nil } // checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. ctx := context.Background() tag := "thetag" // todo: change this to use Builder m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: repository.Name().Name(), Tag: tag, } blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } io.Copy(wr, rs) if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) m.History = append(m.History, schema1.History{ V1Compatibility: "", }) // Then fetch the blobs if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) } else { defer rc.Close() } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } manifests, err := repository.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } var digestPut digest.Digest if digestPut, err = manifests.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } dgst := digest.FromBytes(sm.Canonical) if dgst != digestPut { t.Fatalf("mismatching digest from payload and put") } _, err = manifests.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } } distribution-2.3.0/notifications/metrics.go000066400000000000000000000100451265472114500211100ustar00rootroot00000000000000package notifications import ( "expvar" "fmt" "net/http" "sync" ) // EndpointMetrics track various actions taken by the endpoint, typically by // number of events. The goal of this to export it via expvar but we may find // some other future solution to be better. type EndpointMetrics struct { Pending int // events pending in queue Events int // total events incoming Successes int // total events written successfully Failures int // total events failed Errors int // total events errored Statuses map[string]int // status code histogram, per call event } // safeMetrics guards the metrics implementation with a lock and provides a // safe update function. type safeMetrics struct { EndpointMetrics sync.Mutex // protects statuses map } // newSafeMetrics returns safeMetrics with map allocated. func newSafeMetrics() *safeMetrics { var sm safeMetrics sm.Statuses = make(map[string]int) return &sm } // httpStatusListener returns the listener for the http sink that updates the // relevent counters. func (sm *safeMetrics) httpStatusListener() httpStatusListener { return &endpointMetricsHTTPStatusListener{ safeMetrics: sm, } } // eventQueueListener returns a listener that maintains queue related counters. func (sm *safeMetrics) eventQueueListener() eventQueueListener { return &endpointMetricsEventQueueListener{ safeMetrics: sm, } } // endpointMetricsHTTPStatusListener increments counters related to http sinks // for the relevent events. type endpointMetricsHTTPStatusListener struct { *safeMetrics } var _ httpStatusListener = &endpointMetricsHTTPStatusListener{} func (emsl *endpointMetricsHTTPStatusListener) success(status int, events ...Event) { emsl.safeMetrics.Lock() defer emsl.safeMetrics.Unlock() emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Successes += len(events) } func (emsl *endpointMetricsHTTPStatusListener) failure(status int, events ...Event) { emsl.safeMetrics.Lock() defer emsl.safeMetrics.Unlock() emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))] += len(events) emsl.Failures += len(events) } func (emsl *endpointMetricsHTTPStatusListener) err(err error, events ...Event) { emsl.safeMetrics.Lock() defer emsl.safeMetrics.Unlock() emsl.Errors += len(events) } // endpointMetricsEventQueueListener maintains the incoming events counter and // the queues pending count. type endpointMetricsEventQueueListener struct { *safeMetrics } func (eqc *endpointMetricsEventQueueListener) ingress(events ...Event) { eqc.Lock() defer eqc.Unlock() eqc.Events += len(events) eqc.Pending += len(events) } func (eqc *endpointMetricsEventQueueListener) egress(events ...Event) { eqc.Lock() defer eqc.Unlock() eqc.Pending -= len(events) } // endpoints is global registry of endpoints used to report metrics to expvar var endpoints struct { registered []*Endpoint mu sync.Mutex } // register places the endpoint into expvar so that stats are tracked. func register(e *Endpoint) { endpoints.mu.Lock() defer endpoints.mu.Unlock() endpoints.registered = append(endpoints.registered, e) } func init() { // NOTE(stevvooe): Setup registry metrics structure to report to expvar. // Ideally, we do more metrics through logging but we need some nice // realtime metrics for queue state for now. registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } var notifications expvar.Map notifications.Init() notifications.Set("endpoints", expvar.Func(func() interface{} { endpoints.mu.Lock() defer endpoints.mu.Unlock() var names []interface{} for _, v := range endpoints.registered { var epjson struct { Name string `json:"name"` URL string `json:"url"` EndpointConfig Metrics EndpointMetrics } epjson.Name = v.Name() epjson.URL = v.URL() epjson.EndpointConfig = v.EndpointConfig v.ReadMetrics(&epjson.Metrics) names = append(names, epjson) } return names })) registry.(*expvar.Map).Set("notifications", ¬ifications) } distribution-2.3.0/notifications/sinks.go000066400000000000000000000205631265472114500205770ustar00rootroot00000000000000package notifications import ( "container/list" "fmt" "sync" "time" "github.com/Sirupsen/logrus" ) // NOTE(stevvooe): This file contains definitions for several utility sinks. // Typically, the broadcaster is the only sink that should be required // externally, but others are suitable for export if the need arises. Albeit, // the tight integration with endpoint metrics should be removed. // Broadcaster sends events to multiple, reliable Sinks. The goal of this // component is to dispatch events to configured endpoints. Reliability can be // provided by wrapping incoming sinks. type Broadcaster struct { sinks []Sink events chan []Event closed chan chan struct{} } // NewBroadcaster ... // Add appends one or more sinks to the list of sinks. The broadcaster // behavior will be affected by the properties of the sink. Generally, the // sink should accept all messages and deal with reliability on its own. Use // of EventQueue and RetryingSink should be used here. func NewBroadcaster(sinks ...Sink) *Broadcaster { b := Broadcaster{ sinks: sinks, events: make(chan []Event), closed: make(chan chan struct{}), } // Start the broadcaster go b.run() return &b } // Write accepts a block of events to be dispatched to all sinks. This method // will never fail and should never block (hopefully!). The caller cedes the // slice memory to the broadcaster and should not modify it after calling // write. func (b *Broadcaster) Write(events ...Event) error { select { case b.events <- events: case <-b.closed: return ErrSinkClosed } return nil } // Close the broadcaster, ensuring that all messages are flushed to the // underlying sink before returning. func (b *Broadcaster) Close() error { logrus.Infof("broadcaster: closing") select { case <-b.closed: // already closed return fmt.Errorf("broadcaster: already closed") default: // do a little chan handoff dance to synchronize closing closed := make(chan struct{}) b.closed <- closed close(b.closed) <-closed return nil } } // run is the main broadcast loop, started when the broadcaster is created. // Under normal conditions, it waits for events on the event channel. After // Close is called, this goroutine will exit. func (b *Broadcaster) run() { for { select { case block := <-b.events: for _, sink := range b.sinks { if err := sink.Write(block...); err != nil { logrus.Errorf("broadcaster: error writing events to %v, these events will be lost: %v", sink, err) } } case closing := <-b.closed: // close all the underlying sinks for _, sink := range b.sinks { if err := sink.Close(); err != nil { logrus.Errorf("broadcaster: error closing sink %v: %v", sink, err) } } closing <- struct{}{} logrus.Debugf("broadcaster: closed") return } } } // eventQueue accepts all messages into a queue for asynchronous consumption // by a sink. It is unbounded and thread safe but the sink must be reliable or // events will be dropped. type eventQueue struct { sink Sink events *list.List listeners []eventQueueListener cond *sync.Cond mu sync.Mutex closed bool } // eventQueueListener is called when various events happen on the queue. type eventQueueListener interface { ingress(events ...Event) egress(events ...Event) } // newEventQueue returns a queue to the provided sink. If the updater is non- // nil, it will be called to update pending metrics on ingress and egress. func newEventQueue(sink Sink, listeners ...eventQueueListener) *eventQueue { eq := eventQueue{ sink: sink, events: list.New(), listeners: listeners, } eq.cond = sync.NewCond(&eq.mu) go eq.run() return &eq } // Write accepts the events into the queue, only failing if the queue has // beend closed. func (eq *eventQueue) Write(events ...Event) error { eq.mu.Lock() defer eq.mu.Unlock() if eq.closed { return ErrSinkClosed } for _, listener := range eq.listeners { listener.ingress(events...) } eq.events.PushBack(events) eq.cond.Signal() // signal waiters return nil } // Close shutsdown the event queue, flushing func (eq *eventQueue) Close() error { eq.mu.Lock() defer eq.mu.Unlock() if eq.closed { return fmt.Errorf("eventqueue: already closed") } // set closed flag eq.closed = true eq.cond.Signal() // signal flushes queue eq.cond.Wait() // wait for signal from last flush return eq.sink.Close() } // run is the main goroutine to flush events to the target sink. func (eq *eventQueue) run() { for { block := eq.next() if block == nil { return // nil block means event queue is closed. } if err := eq.sink.Write(block...); err != nil { logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err) } for _, listener := range eq.listeners { listener.egress(block...) } } } // next encompasses the critical section of the run loop. When the queue is // empty, it will block on the condition. If new data arrives, it will wake // and return a block. When closed, a nil slice will be returned. func (eq *eventQueue) next() []Event { eq.mu.Lock() defer eq.mu.Unlock() for eq.events.Len() < 1 { if eq.closed { eq.cond.Broadcast() return nil } eq.cond.Wait() } front := eq.events.Front() block := front.Value.([]Event) eq.events.Remove(front) return block } // retryingSink retries the write until success or an ErrSinkClosed is // returned. Underlying sink must have p > 0 of succeeding or the sink will // block. Internally, it is a circuit breaker retries to manage reset. // Concurrent calls to a retrying sink are serialized through the sink, // meaning that if one is in-flight, another will not proceed. type retryingSink struct { mu sync.Mutex sink Sink closed bool // circuit breaker heuristics failures struct { threshold int recent int last time.Time backoff time.Duration // time after which we retry after failure. } } type retryingSinkListener interface { active(events ...Event) retry(events ...Event) } // TODO(stevvooe): We are using circuit break here, which actually doesn't // make a whole lot of sense for this use case, since we always retry. Move // this to use bounded exponential backoff. // newRetryingSink returns a sink that will retry writes to a sink, backing // off on failure. Parameters threshold and backoff adjust the behavior of the // circuit breaker. func newRetryingSink(sink Sink, threshold int, backoff time.Duration) *retryingSink { rs := &retryingSink{ sink: sink, } rs.failures.threshold = threshold rs.failures.backoff = backoff return rs } // Write attempts to flush the events to the downstream sink until it succeeds // or the sink is closed. func (rs *retryingSink) Write(events ...Event) error { rs.mu.Lock() defer rs.mu.Unlock() retry: if rs.closed { return ErrSinkClosed } if !rs.proceed() { logrus.Warnf("%v encountered too many errors, backing off", rs.sink) rs.wait(rs.failures.backoff) goto retry } if err := rs.write(events...); err != nil { if err == ErrSinkClosed { // terminal! return err } logrus.Errorf("retryingsink: error writing events: %v, retrying", err) goto retry } return nil } // Close closes the sink and the underlying sink. func (rs *retryingSink) Close() error { rs.mu.Lock() defer rs.mu.Unlock() if rs.closed { return fmt.Errorf("retryingsink: already closed") } rs.closed = true return rs.sink.Close() } // write provides a helper that dispatches failure and success properly. Used // by write as the single-flight write call. func (rs *retryingSink) write(events ...Event) error { if err := rs.sink.Write(events...); err != nil { rs.failure() return err } rs.reset() return nil } // wait backoff time against the sink, unlocking so others can proceed. Should // only be called by methods that currently have the mutex. func (rs *retryingSink) wait(backoff time.Duration) { rs.mu.Unlock() defer rs.mu.Lock() // backoff here time.Sleep(backoff) } // reset marks a successful call. func (rs *retryingSink) reset() { rs.failures.recent = 0 rs.failures.last = time.Time{} } // failure records a failure. func (rs *retryingSink) failure() { rs.failures.recent++ rs.failures.last = time.Now().UTC() } // proceed returns true if the call should proceed based on circuit breaker // heuristics. func (rs *retryingSink) proceed() bool { return rs.failures.recent < rs.failures.threshold || time.Now().UTC().After(rs.failures.last.Add(rs.failures.backoff)) } distribution-2.3.0/notifications/sinks_test.go000066400000000000000000000106311265472114500216310ustar00rootroot00000000000000package notifications import ( "fmt" "math/rand" "sync" "time" "github.com/Sirupsen/logrus" "testing" ) func TestBroadcaster(t *testing.T) { const nEvents = 1000 var sinks []Sink for i := 0; i < 10; i++ { sinks = append(sinks, &testSink{}) } b := NewBroadcaster(sinks...) var block []Event var wg sync.WaitGroup for i := 1; i <= nEvents; i++ { block = append(block, createTestEvent("push", "library/test", "blob")) if i%10 == 0 && i > 0 { wg.Add(1) go func(block ...Event) { if err := b.Write(block...); err != nil { t.Fatalf("error writing block of length %d: %v", len(block), err) } wg.Done() }(block...) block = nil } } wg.Wait() // Wait until writes complete checkClose(t, b) // Iterate through the sinks and check that they all have the expected length. for _, sink := range sinks { ts := sink.(*testSink) ts.mu.Lock() defer ts.mu.Unlock() if len(ts.events) != nEvents { t.Fatalf("not all events ended up in testsink: len(testSink) == %d, not %d", len(ts.events), nEvents) } if !ts.closed { t.Fatalf("sink should have been closed") } } } func TestEventQueue(t *testing.T) { const nevents = 1000 var ts testSink metrics := newSafeMetrics() eq := newEventQueue( // delayed sync simulates destination slower than channel comms &delayedSink{ Sink: &ts, delay: time.Millisecond * 1, }, metrics.eventQueueListener()) var wg sync.WaitGroup var block []Event for i := 1; i <= nevents; i++ { block = append(block, createTestEvent("push", "library/test", "blob")) if i%10 == 0 && i > 0 { wg.Add(1) go func(block ...Event) { if err := eq.Write(block...); err != nil { t.Fatalf("error writing event block: %v", err) } wg.Done() }(block...) block = nil } } wg.Wait() checkClose(t, eq) ts.mu.Lock() defer ts.mu.Unlock() metrics.Lock() defer metrics.Unlock() if len(ts.events) != nevents { t.Fatalf("events did not make it to the sink: %d != %d", len(ts.events), 1000) } if !ts.closed { t.Fatalf("sink should have been closed") } if metrics.Events != nevents { t.Fatalf("unexpected ingress count: %d != %d", metrics.Events, nevents) } if metrics.Pending != 0 { t.Fatalf("unexpected egress count: %d != %d", metrics.Pending, 0) } } func TestRetryingSink(t *testing.T) { // Make a sync that fails most of the time, ensuring that all the events // make it through. var ts testSink flaky := &flakySink{ rate: 1.0, // start out always failing. Sink: &ts, } s := newRetryingSink(flaky, 3, 10*time.Millisecond) var wg sync.WaitGroup var block []Event for i := 1; i <= 100; i++ { block = append(block, createTestEvent("push", "library/test", "blob")) // Above 50, set the failure rate lower if i > 50 { s.mu.Lock() flaky.rate = 0.90 s.mu.Unlock() } if i%10 == 0 && i > 0 { wg.Add(1) go func(block ...Event) { defer wg.Done() if err := s.Write(block...); err != nil { t.Fatalf("error writing event block: %v", err) } }(block...) block = nil } } wg.Wait() checkClose(t, s) ts.mu.Lock() defer ts.mu.Unlock() if len(ts.events) != 100 { t.Fatalf("events not propagated: %d != %d", len(ts.events), 100) } } type testSink struct { events []Event mu sync.Mutex closed bool } func (ts *testSink) Write(events ...Event) error { ts.mu.Lock() defer ts.mu.Unlock() ts.events = append(ts.events, events...) return nil } func (ts *testSink) Close() error { ts.mu.Lock() defer ts.mu.Unlock() ts.closed = true logrus.Infof("closing testSink") return nil } type delayedSink struct { Sink delay time.Duration } func (ds *delayedSink) Write(events ...Event) error { time.Sleep(ds.delay) return ds.Sink.Write(events...) } type flakySink struct { Sink rate float64 } func (fs *flakySink) Write(events ...Event) error { if rand.Float64() < fs.rate { return fmt.Errorf("error writing %d events", len(events)) } return fs.Sink.Write(events...) } func checkClose(t *testing.T, sink Sink) { if err := sink.Close(); err != nil { t.Fatalf("unexpected error closing: %v", err) } // second close should not crash but should return an error. if err := sink.Close(); err == nil { t.Fatalf("no error on double close") } // Write after closed should be an error if err := sink.Write([]Event{}...); err == nil { t.Fatalf("write after closed did not have an error") } else if err != ErrSinkClosed { t.Fatalf("error should be ErrSinkClosed") } } distribution-2.3.0/project/000077500000000000000000000000001265472114500157105ustar00rootroot00000000000000distribution-2.3.0/project/dev-image/000077500000000000000000000000001265472114500175465ustar00rootroot00000000000000distribution-2.3.0/project/dev-image/Dockerfile000066400000000000000000000011271265472114500215410ustar00rootroot00000000000000FROM ubuntu:14.04 ENV GOLANG_VERSION 1.4rc1 ENV GOPATH /var/cache/drone ENV GOROOT /usr/local/go ENV PATH $PATH:$GOROOT/bin:$GOPATH/bin ENV LANG C ENV LC_ALL C RUN apt-get update && apt-get install -y \ wget ca-certificates git mercurial bzr \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* RUN wget https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz --quiet && \ tar -C /usr/local -xzf go$GOLANG_VERSION.linux-amd64.tar.gz && \ rm go${GOLANG_VERSION}.linux-amd64.tar.gz RUN go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint distribution-2.3.0/project/hooks/000077500000000000000000000000001265472114500170335ustar00rootroot00000000000000distribution-2.3.0/project/hooks/README.md000066400000000000000000000011751265472114500203160ustar00rootroot00000000000000Git Hooks ========= To enforce valid and properly-formatted code, there is CI in place which runs `gofmt`, `golint`, and `go vet` against code in the repository. As an aid to prevent committing invalid code in the first place, a git pre-commit hook has been added to the repository, found in [pre-commit](./pre-commit). As it is impossible to automatically add linked hooks to a git repository, this hook should be linked into your `.git/hooks/pre-commit`, which can be done by running the `configure-hooks.sh` script in this directory. This script is the preferred method of configuring hooks, as it will be updated as more are added.distribution-2.3.0/project/hooks/configure-hooks.sh000077500000000000000000000006351265472114500225000ustar00rootroot00000000000000#!/bin/sh cd $(dirname $0) REPO_ROOT=$(git rev-parse --show-toplevel) RESOLVE_REPO_ROOT_STATUS=$? if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then echo -e "Unable to resolve repository root. Error:\n$REPO_ROOT" > /dev/stderr exit $RESOLVE_REPO_ROOT_STATUS fi set -e set -x # Just in case the directory doesn't exist mkdir -p $REPO_ROOT/.git/hooks ln -f -s $(pwd)/pre-commit $REPO_ROOT/.git/hooks/pre-commitdistribution-2.3.0/project/hooks/pre-commit000077500000000000000000000015701265472114500210400ustar00rootroot00000000000000#!/bin/sh REPO_ROOT=$(git rev-parse --show-toplevel) RESOLVE_REPO_ROOT_STATUS=$? if [ "$RESOLVE_REPO_ROOT_STATUS" -ne "0" ]; then printf "Unable to resolve repository root. Error:\n%s\n" "$RESOLVE_REPO_ROOT_STATUS" > /dev/stderr exit $RESOLVE_REPO_ROOT_STATUS fi cd $REPO_ROOT GOFMT_ERRORS=$(gofmt -s -l . 2>&1) if [ -n "$GOFMT_ERRORS" ]; then printf 'gofmt failed for the following files:\n%s\n\nPlease run "gofmt -s -l ." in the root of your repository before committing\n' "$GOFMT_ERRORS" > /dev/stderr exit 1 fi GOLINT_ERRORS=$(golint ./... 2>&1) if [ -n "$GOLINT_ERRORS" ]; then printf "golint failed with the following errors:\n%s\n" "$GOLINT_ERRORS" > /dev/stderr exit 1 fi GOVET_ERRORS=$(go vet ./... 2>&1) GOVET_STATUS=$? if [ "$GOVET_STATUS" -ne "0" ]; then printf "govet failed with the following errors:\n%s\n" "$GOVET_ERRORS" > /dev/stderr exit $GOVET_STATUS fi distribution-2.3.0/reference/000077500000000000000000000000001265472114500162005ustar00rootroot00000000000000distribution-2.3.0/reference/reference.go000066400000000000000000000205571265472114500204760ustar00rootroot00000000000000// Package reference provides a general type to represent any way of referencing images within the registry. // Its main purpose is to abstract tags and digests (content-addressable hash). // // Grammar // // reference := repository [ ":" tag ] [ "@" digest ] // name := [hostname '/'] component ['/' component]* // hostname := hostcomponent ['.' hostcomponent]* [':' port-number] // hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ // port-number := /[0-9]+/ // component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value package reference import ( "errors" "fmt" "github.com/docker/distribution/digest" ) const ( // NameTotalLengthMax is the maximum total number of characters in a repository name. NameTotalLengthMax = 255 ) var ( // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. ErrReferenceInvalidFormat = errors.New("invalid reference format") // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. ErrTagInvalidFormat = errors.New("invalid tag format") // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ) // Reference is an opaque object reference identifier that may include // modifiers such as a hostname, name, tag, and digest. type Reference interface { // String returns the full reference String() string } // Field provides a wrapper type for resolving correct reference types when // working with encoding. type Field struct { reference Reference } // AsField wraps a reference in a Field for encoding. func AsField(reference Reference) Field { return Field{reference} } // Reference unwraps the reference type from the field to // return the Reference object. This object should be // of the appropriate type to further check for different // reference types. func (f Field) Reference() Reference { return f.reference } // MarshalText serializes the field to byte text which // is the string of the reference. func (f Field) MarshalText() (p []byte, err error) { return []byte(f.reference.String()), nil } // UnmarshalText parses text bytes by invoking the // reference parser to ensure the appropriately // typed reference object is wrapped by field. func (f *Field) UnmarshalText(p []byte) error { r, err := Parse(string(p)) if err != nil { return err } f.reference = r return nil } // Named is an object with a full name type Named interface { Reference Name() string } // Tagged is an object which has a tag type Tagged interface { Reference Tag() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Digested is an object which has a digest // in which it can be referenced by type Digested interface { Reference Digest() digest.Digest } // Canonical reference is an object with a fully unique // name including a name with hostname and digest type Canonical interface { Named Digest() digest.Digest } // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name func SplitHostname(named Named) (string, string) { name := named.Name() match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return "", name } return match[1], match[2] } // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. // NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { if s == "" { return nil, ErrNameEmpty } // TODO(dmcgowan): Provide more specific and helpful error return nil, ErrReferenceInvalidFormat } if len(matches[1]) > NameTotalLengthMax { return nil, ErrNameTooLong } ref := reference{ name: matches[1], tag: matches[2], } if matches[3] != "" { var err error ref.digest, err = digest.ParseDigest(matches[3]) if err != nil { return nil, err } } r := getBestReferenceType(ref) if r == nil { return nil, ErrNameEmpty } return r, nil } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name, otherwise an error is // returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { ref, err := Parse(s) if err != nil { return nil, err } named, isNamed := ref.(Named) if !isNamed { return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } if !anchoredNameRegexp.MatchString(name) { return nil, ErrReferenceInvalidFormat } return repository(name), nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } return taggedReference{ name: name.Name(), tag: tag, }, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } return canonicalReference{ name: name.Name(), digest: digest, }, nil } func getBestReferenceType(ref reference) Reference { if ref.name == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) } return nil } if ref.tag == "" { if ref.digest != "" { return canonicalReference{ name: ref.name, digest: ref.digest, } } return repository(ref.name) } if ref.digest == "" { return taggedReference{ name: ref.name, tag: ref.tag, } } return ref } type reference struct { name string tag string digest digest.Digest } func (r reference) String() string { return r.name + ":" + r.tag + "@" + r.digest.String() } func (r reference) Name() string { return r.name } func (r reference) Tag() string { return r.tag } func (r reference) Digest() digest.Digest { return r.digest } type repository string func (r repository) String() string { return string(r) } func (r repository) Name() string { return string(r) } type digestReference digest.Digest func (d digestReference) String() string { return d.String() } func (d digestReference) Digest() digest.Digest { return digest.Digest(d) } type taggedReference struct { name string tag string } func (t taggedReference) String() string { return t.name + ":" + t.tag } func (t taggedReference) Name() string { return t.name } func (t taggedReference) Tag() string { return t.tag } type canonicalReference struct { name string digest digest.Digest } func (c canonicalReference) String() string { return c.name + "@" + c.digest.String() } func (c canonicalReference) Name() string { return c.name } func (c canonicalReference) Digest() digest.Digest { return c.digest } distribution-2.3.0/reference/reference_test.go000066400000000000000000000326711265472114500215350ustar00rootroot00000000000000package reference import ( "encoding/json" "strconv" "strings" "testing" "github.com/docker/distribution/digest" ) func TestReferenceParse(t *testing.T) { // referenceTestcases is a unified set of testcases for // testing the parsing of references referenceTestcases := []struct { // input is the repository name or name component testcase input string // err is the error expected from Parse, or nil err error // repository is the string representation for the reference repository string // hostname is the hostname expected in the reference hostname string // tag is the tag for the reference tag string // digest is the digest for the reference (enforces digest reference) digest string }{ { input: "test_com", repository: "test_com", }, { input: "test.com:tag", repository: "test.com", tag: "tag", }, { input: "test.com:5000", repository: "test.com", tag: "5000", }, { input: "test.com/repo:tag", hostname: "test.com", repository: "test.com/repo", tag: "tag", }, { input: "test:5000/repo", hostname: "test:5000", repository: "test:5000/repo", }, { input: "test:5000/repo:tag", hostname: "test:5000", repository: "test:5000/repo", tag: "tag", }, { input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", hostname: "test:5000", repository: "test:5000/repo", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", hostname: "test:5000", repository: "test:5000/repo", tag: "tag", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo", hostname: "test:5000", repository: "test:5000/repo", }, { input: "", err: ErrNameEmpty, }, { input: ":justtag", err: ErrReferenceInvalidFormat, }, { input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: "repo@sha256:ffffffffffffffffffffffffffffffffff", err: digest.ErrDigestInvalidLength, }, { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: digest.ErrDigestUnsupported, }, { input: strings.Repeat("a/", 128) + "a:tag", err: ErrNameTooLong, }, { input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", hostname: "a", repository: strings.Repeat("a/", 127) + "a", tag: "tag-puts-this-over-max", }, { input: "aa/asdf$$^/aa", err: ErrReferenceInvalidFormat, }, { input: "sub-dom1.foo.com/bar/baz/quux", hostname: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", }, { input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", hostname: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", tag: "some-long-tag", }, { input: "b.gcr.io/test.example.com/my-app:test.example.com", hostname: "b.gcr.io", repository: "b.gcr.io/test.example.com/my-app", tag: "test.example.com", }, { input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode hostname: "xn--n3h.com", repository: "xn--n3h.com/myimage", tag: "xn--n3h.com", }, { input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode hostname: "xn--7o8h.com", repository: "xn--7o8h.com/myimage", tag: "xn--7o8h.com", digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "foo_bar.com:8080", repository: "foo_bar.com", tag: "8080", }, { input: "foo/foo_bar.com:8080", hostname: "foo", repository: "foo/foo_bar.com", tag: "8080", }, } for _, testcase := range referenceTestcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } repo, err := Parse(testcase.input) if testcase.err != nil { if err == nil { failf("missing expected error: %v", testcase.err) } else if testcase.err != err { failf("mismatched error: got %v, expected %v", err, testcase.err) } continue } else if err != nil { failf("unexpected parse error: %v", err) continue } if repo.String() != testcase.input { failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) } if named, ok := repo.(Named); ok { if named.Name() != testcase.repository { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) } hostname, _ := SplitHostname(named) if hostname != testcase.hostname { failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) } } else if testcase.repository != "" || testcase.hostname != "" { failf("expected named type, got %T", repo) } tagged, ok := repo.(Tagged) if testcase.tag != "" { if ok { if tagged.Tag() != testcase.tag { failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) } } else { failf("expected tagged type, got %T", repo) } } else if ok { failf("unexpected tagged type") } digested, ok := repo.(Digested) if testcase.digest != "" { if ok { if digested.Digest().String() != testcase.digest { failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) } } else { failf("expected digested type, got %T", repo) } } else if ok { failf("unexpected digested type") } } } // TestWithNameFailure tests cases where WithName should fail. Cases where it // should succeed are covered by TestSplitHostname, below. func TestWithNameFailure(t *testing.T) { testcases := []struct { input string err error }{ { input: "", err: ErrNameEmpty, }, { input: ":justtag", err: ErrReferenceInvalidFormat, }, { input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: strings.Repeat("a/", 128) + "a:tag", err: ErrNameTooLong, }, { input: "aa/asdf$$^/aa", err: ErrReferenceInvalidFormat, }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } _, err := WithName(testcase.input) if err == nil { failf("no error parsing name. expected: %s", testcase.err) } } } func TestSplitHostname(t *testing.T) { testcases := []struct { input string hostname string name string }{ { input: "test.com/foo", hostname: "test.com", name: "foo", }, { input: "test_com/foo", hostname: "", name: "test_com/foo", }, { input: "test:8080/foo", hostname: "test:8080", name: "foo", }, { input: "test.com:8080/foo", hostname: "test.com:8080", name: "foo", }, { input: "test-com:8080/foo", hostname: "test-com:8080", name: "foo", }, { input: "xn--n3h.com:18080/foo", hostname: "xn--n3h.com:18080", name: "foo", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } named, err := WithName(testcase.input) if err != nil { failf("error parsing name: %s", err) } hostname, name := SplitHostname(named) if hostname != testcase.hostname { failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) } } } type serializationType struct { Description string Field Field } func TestSerialization(t *testing.T) { testcases := []struct { description string input string name string tag string digest string err error }{ { description: "empty value", err: ErrNameEmpty, }, { description: "just a name", input: "example.com:8000/named", name: "example.com:8000/named", }, { description: "name with a tag", input: "example.com:8000/named:tagged", name: "example.com:8000/named", tag: "tagged", }, { description: "name with digest", input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", name: "other.com/named", digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } m := map[string]string{ "Description": testcase.description, "Field": testcase.input, } b, err := json.Marshal(m) if err != nil { failf("error marshalling: %v", err) } t := serializationType{} if err := json.Unmarshal(b, &t); err != nil { if testcase.err == nil { failf("error unmarshalling: %v", err) } if err != testcase.err { failf("wrong error, expected %v, got %v", testcase.err, err) } continue } else if testcase.err != nil { failf("expected error unmarshalling: %v", testcase.err) } if t.Description != testcase.description { failf("wrong description, expected %q, got %q", testcase.description, t.Description) } ref := t.Field.Reference() if named, ok := ref.(Named); ok { if named.Name() != testcase.name { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) } } else if testcase.name != "" { failf("expected named type, got %T", ref) } tagged, ok := ref.(Tagged) if testcase.tag != "" { if ok { if tagged.Tag() != testcase.tag { failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) } } else { failf("expected tagged type, got %T", ref) } } else if ok { failf("unexpected tagged type") } digested, ok := ref.(Digested) if testcase.digest != "" { if ok { if digested.Digest().String() != testcase.digest { failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) } } else { failf("expected digested type, got %T", ref) } } else if ok { failf("unexpected digested type") } t = serializationType{ Description: testcase.description, Field: AsField(ref), } b2, err := json.Marshal(t) if err != nil { failf("error marshing serialization type: %v", err) } if string(b) != string(b2) { failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) } // Ensure t.Field is not implementing "Reference" directly, getting // around the Reference type system var fieldInterface interface{} = t.Field if _, ok := fieldInterface.(Reference); ok { failf("field should not implement Reference interface") } } } func TestWithTag(t *testing.T) { testcases := []struct { name string tag string combined string }{ { name: "test.com/foo", tag: "tag", combined: "test.com/foo:tag", }, { name: "foo", tag: "tag2", combined: "foo:tag2", }, { name: "test.com:8000/foo", tag: "tag4", combined: "test.com:8000/foo:tag4", }, { name: "test.com:8000/foo", tag: "TAG5", combined: "test.com:8000/foo:TAG5", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.name)+": "+format, v...) t.Fail() } named, err := WithName(testcase.name) if err != nil { failf("error parsing name: %s", err) } tagged, err := WithTag(named, testcase.tag) if err != nil { failf("WithTag failed: %s", err) } if tagged.String() != testcase.combined { failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) } } } func TestWithDigest(t *testing.T) { testcases := []struct { name string digest digest.Digest combined string }{ { name: "test.com/foo", digest: "sha256:1234567890098765432112345667890098765", combined: "test.com/foo@sha256:1234567890098765432112345667890098765", }, { name: "foo", digest: "sha256:1234567890098765432112345667890098765", combined: "foo@sha256:1234567890098765432112345667890098765", }, { name: "test.com:8000/foo", digest: "sha256:1234567890098765432112345667890098765", combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.name)+": "+format, v...) t.Fail() } named, err := WithName(testcase.name) if err != nil { failf("error parsing name: %s", err) } digested, err := WithDigest(named, testcase.digest) if err != nil { failf("WithDigest failed: %s", err) } if digested.String() != testcase.combined { failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) } } } distribution-2.3.0/reference/regexp.go000066400000000000000000000105071265472114500200240ustar00rootroot00000000000000package reference import "regexp" var ( // alphaNumericRegexp defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. alphaNumericRegexp = match(`[a-z0-9]+`) // separatorRegexp defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple // dashes. separatorRegexp = match(`(?:[._]|__|[-]*)`) // nameComponentRegexp restricts registry path component names to start // with at least one letter or number, with following parts able to be // separated by one period, one or two underscore and multiple dashes. nameComponentRegexp = expression( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) // hostnameComponentRegexp restricts the registry hostname component of a // repository name to start with a component as defined by hostnameRegexp // and followed by an optional port. hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) // hostnameRegexp defines the structure of potential hostname components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. hostnameRegexp = expression( hostnameComponentRegexp, optional(repeated(literal(`.`), hostnameComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting // the seperating forward slash from either. NameRegexp = expression( optional(hostnameRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // hostname and trailing components. anchoredNameRegexp = anchored( optional(capture(hostnameRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) ) // match compiles the string to a regular expression. var match = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. func literal(s string) *regexp.Regexp { re := match(regexp.QuoteMeta(s)) if _, complete := re.LiteralPrefix(); !complete { panic("must be a literal") } return re } // expression defines a full expression, where each regular expression must // follow the previous. func expression(res ...*regexp.Regexp) *regexp.Regexp { var s string for _, re := range res { s += re.String() } return match(s) } // optional wraps the expression in a non-capturing group and makes the // production optional. func optional(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `?`) } // repeated wraps the regexp in a non-capturing group to get one or more // matches. func repeated(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `+`) } // group wraps the regexp in a non-capturing group. func group(res ...*regexp.Regexp) *regexp.Regexp { return match(`(?:` + expression(res...).String() + `)`) } // capture wraps the expression in a capturing group. func capture(res ...*regexp.Regexp) *regexp.Regexp { return match(`(` + expression(res...).String() + `)`) } // anchored anchors the regular expression by adding start and end delimiters. func anchored(res ...*regexp.Regexp) *regexp.Regexp { return match(`^` + expression(res...).String() + `$`) } distribution-2.3.0/reference/regexp_test.go000066400000000000000000000226061265472114500210660ustar00rootroot00000000000000package reference import ( "regexp" "strings" "testing" ) type regexpMatch struct { input string match bool subs []string } func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { matches := r.FindStringSubmatch(m.input) if m.match && matches != nil { if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { t.Fatalf("Bad match result %#v for %q", matches, m.input) } if len(matches) < (len(m.subs) + 1) { t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) } for i := range m.subs { if m.subs[i] != matches[i+1] { t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) } } } else if m.match { t.Errorf("Expected match for %q", m.input) } else if matches != nil { t.Errorf("Unexpected match for %q", m.input) } } func TestHostRegexp(t *testing.T) { hostcases := []regexpMatch{ { input: "test.com", match: true, }, { input: "test.com:10304", match: true, }, { input: "test.com:http", match: false, }, { input: "localhost", match: true, }, { input: "localhost:8080", match: true, }, { input: "a", match: true, }, { input: "a.b", match: true, }, { input: "ab.cd.com", match: true, }, { input: "a-b.com", match: true, }, { input: "-ab.com", match: false, }, { input: "ab-.com", match: false, }, { input: "ab.c-om", match: true, }, { input: "ab.-com", match: false, }, { input: "ab.com-", match: false, }, { input: "0101.com", match: true, // TODO(dmcgowan): valid if this should be allowed }, { input: "001a.com", match: true, }, { input: "b.gbc.io:443", match: true, }, { input: "b.gbc.io", match: true, }, { input: "xn--n3h.com", // ☃.com in punycode match: true, }, } r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) for i := range hostcases { checkRegexp(t, r, hostcases[i]) } } func TestFullNameRegexp(t *testing.T) { if anchoredNameRegexp.NumSubexp() != 2 { t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) } testcases := []regexpMatch{ { input: "", match: false, }, { input: "short", match: true, subs: []string{"", "short"}, }, { input: "simple/name", match: true, subs: []string{"simple", "name"}, }, { input: "library/ubuntu", match: true, subs: []string{"library", "ubuntu"}, }, { input: "docker/stevvooe/app", match: true, subs: []string{"docker", "stevvooe/app"}, }, { input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", match: true, subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, }, { input: "aa/aa/bb/bb/bb", match: true, subs: []string{"aa", "aa/bb/bb/bb"}, }, { input: "a/a/a/a", match: true, subs: []string{"a", "a/a/a"}, }, { input: "a/a/a/a/", match: false, }, { input: "a//a/a", match: false, }, { input: "a", match: true, subs: []string{"", "a"}, }, { input: "a/aa", match: true, subs: []string{"a", "aa"}, }, { input: "a/aa/a", match: true, subs: []string{"a", "aa/a"}, }, { input: "foo.com", match: true, subs: []string{"", "foo.com"}, }, { input: "foo.com/", match: false, }, { input: "foo.com:8080/bar", match: true, subs: []string{"foo.com:8080", "bar"}, }, { input: "foo.com:http/bar", match: false, }, { input: "foo.com/bar", match: true, subs: []string{"foo.com", "bar"}, }, { input: "foo.com/bar/baz", match: true, subs: []string{"foo.com", "bar/baz"}, }, { input: "localhost:8080/bar", match: true, subs: []string{"localhost:8080", "bar"}, }, { input: "sub-dom1.foo.com/bar/baz/quux", match: true, subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, }, { input: "blog.foo.com/bar/baz", match: true, subs: []string{"blog.foo.com", "bar/baz"}, }, { input: "a^a", match: false, }, { input: "aa/asdf$$^/aa", match: false, }, { input: "asdf$$^/aa", match: false, }, { input: "aa-a/a", match: true, subs: []string{"aa-a", "a"}, }, { input: strings.Repeat("a/", 128) + "a", match: true, subs: []string{"a", strings.Repeat("a/", 127) + "a"}, }, { input: "a-/a/a/a", match: false, }, { input: "foo.com/a-/a/a", match: false, }, { input: "-foo/bar", match: false, }, { input: "foo/bar-", match: false, }, { input: "foo-/bar", match: false, }, { input: "foo/-bar", match: false, }, { input: "_foo/bar", match: false, }, { input: "foo_bar", match: true, subs: []string{"", "foo_bar"}, }, { input: "foo_bar.com", match: true, subs: []string{"", "foo_bar.com"}, }, { input: "foo_bar.com:8080", match: false, }, { input: "foo_bar.com:8080/app", match: false, }, { input: "foo.com/foo_bar", match: true, subs: []string{"foo.com", "foo_bar"}, }, { input: "____/____", match: false, }, { input: "_docker/_docker", match: false, }, { input: "docker_/docker_", match: false, }, { input: "b.gcr.io/test.example.com/my-app", match: true, subs: []string{"b.gcr.io", "test.example.com/my-app"}, }, { input: "xn--n3h.com/myimage", // ☃.com in punycode match: true, subs: []string{"xn--n3h.com", "myimage"}, }, { input: "xn--7o8h.com/myimage", // 🐳.com in punycode match: true, subs: []string{"xn--7o8h.com", "myimage"}, }, { input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode match: true, subs: []string{"example.com", "xn--7o8h.com/myimage"}, }, { input: "example.com/some_separator__underscore/myimage", match: true, subs: []string{"example.com", "some_separator__underscore/myimage"}, }, { input: "example.com/__underscore/myimage", match: false, }, { input: "example.com/..dots/myimage", match: false, }, { input: "example.com/.dots/myimage", match: false, }, { input: "example.com/nodouble..dots/myimage", match: false, }, { input: "example.com/nodouble..dots/myimage", match: false, }, { input: "docker./docker", match: false, }, { input: ".docker/docker", match: false, }, { input: "docker-/docker", match: false, }, { input: "-docker/docker", match: false, }, { input: "do..cker/docker", match: false, }, { input: "do__cker:8080/docker", match: false, }, { input: "do__cker/docker", match: true, subs: []string{"", "do__cker/docker"}, }, { input: "b.gcr.io/test.example.com/my-app", match: true, subs: []string{"b.gcr.io", "test.example.com/my-app"}, }, { input: "registry.io/foo/project--id.module--name.ver---sion--name", match: true, subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, }, } for i := range testcases { checkRegexp(t, anchoredNameRegexp, testcases[i]) } } func TestReferenceRegexp(t *testing.T) { if ReferenceRegexp.NumSubexp() != 3 { t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", ReferenceRegexp, ReferenceRegexp.NumSubexp()) } testcases := []regexpMatch{ { input: "registry.com:8080/myapp:tag", match: true, subs: []string{"registry.com:8080/myapp", "tag", ""}, }, { input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp@sha256:badbadbadbad", match: false, }, { input: "registry.com:8080/myapp:invalid~tag", match: false, }, { input: "bad_hostname.com:8080/myapp:tag", match: false, }, { input:// localhost treated as name, missing tag with 8080 as tag "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: false, }, { // localhost will be treated as an image name without a host input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp@bad", match: false, }, { input: "registry.com:8080/myapp@2bad", match: false, // TODO(dmcgowan): Support this as valid }, } for i := range testcases { checkRegexp(t, ReferenceRegexp, testcases[i]) } } distribution-2.3.0/registry.go000066400000000000000000000051561265472114500164500ustar00rootroot00000000000000package distribution import ( "github.com/docker/distribution/context" "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. type Scope interface { // Contains returns true if the name belongs to the namespace. Contains(name string) bool } type fullScope struct{} func (f fullScope) Contains(string) bool { return true } // GlobalScope represents the full namespace scope which contains // all other scopes. var GlobalScope = Scope(fullScope{}) // Namespace represents a collection of repositories, addressable by name. // Generally, a namespace is backed by a set of one or more services, // providing facilities such as registry access, trust, and indexing. type Namespace interface { // Scope describes the names that can be used with this Namespace. The // global namespace will have a scope that matches all names. The scope // effectively provides an identity for the namespace. Scope() Scope // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error } // Repository is a named collection of manifests and layers. type Repository interface { // Name returns the name of the repository. Name() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) // Blobs returns a reference to this repository's blob service. Blobs(ctx context.Context) BlobStore // TODO(stevvooe): The above BlobStore return can probably be relaxed to // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. // Tags returns a reference to this repositories tag service Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships. distribution-2.3.0/registry/000077500000000000000000000000001265472114500161125ustar00rootroot00000000000000distribution-2.3.0/registry/api/000077500000000000000000000000001265472114500166635ustar00rootroot00000000000000distribution-2.3.0/registry/api/errcode/000077500000000000000000000000001265472114500203065ustar00rootroot00000000000000distribution-2.3.0/registry/api/errcode/errors.go000066400000000000000000000150411265472114500221520ustar00rootroot00000000000000package errcode import ( "encoding/json" "fmt" "strings" ) // ErrorCoder is the base interface for ErrorCode and Error allowing // users of each to just call ErrorCode to get the real ID of each type ErrorCoder interface { ErrorCode() ErrorCode } // ErrorCode represents the error type. The errors are serialized via strings // and the integer format may change and should *never* be exported. type ErrorCode int var _ error = ErrorCode(0) // ErrorCode just returns itself func (ec ErrorCode) ErrorCode() ErrorCode { return ec } // Error returns the ID/Value func (ec ErrorCode) Error() string { // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. func (ec ErrorCode) Descriptor() ErrorDescriptor { d, ok := errorCodeToDescriptors[ec] if !ok { return ErrorCodeUnknown.Descriptor() } return d } // String returns the canonical identifier for this error code. func (ec ErrorCode) String() string { return ec.Descriptor().Value } // Message returned the human-readable error message for this error code. func (ec ErrorCode) Message() string { return ec.Descriptor().Message } // MarshalText encodes the receiver into UTF-8-encoded text and returns the // result. func (ec ErrorCode) MarshalText() (text []byte, err error) { return []byte(ec.String()), nil } // UnmarshalText decodes the form generated by MarshalText. func (ec *ErrorCode) UnmarshalText(text []byte) error { desc, ok := idToDescriptors[string(text)] if !ok { desc = ErrorCodeUnknown.Descriptor() } *ec = desc.Code return nil } // WithMessage creates a new Error struct based on the passed-in info and // overrides the Message property. func (ec ErrorCode) WithMessage(message string) Error { return Error{ Code: ec, Message: message, } } // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithDetail(detail) } // WithArgs creates a new Error struct and sets the Args slice func (ec ErrorCode) WithArgs(args ...interface{}) Error { return Error{ Code: ec, Message: ec.Message(), }.WithArgs(args...) } // Error provides a wrapper around ErrorCode with extra Details provided. type Error struct { Code ErrorCode `json:"code"` Message string `json:"message"` Detail interface{} `json:"detail,omitempty"` // TODO(duglin): See if we need an "args" property so we can do the // variable substitution right before showing the message to the user } var _ error = Error{} // ErrorCode returns the ID/Value of this Error func (e Error) ErrorCode() ErrorCode { return e.Code } // Error returns a human readable representation of the error. func (e Error) Error() string { return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with // some Detail info added func (e Error) WithDetail(detail interface{}) Error { return Error{ Code: e.Code, Message: e.Message, Detail: detail, } } // WithArgs uses the passed-in list of interface{} as the substitution // variables in the Error's Message string, but returns a new Error func (e Error) WithArgs(args ...interface{}) Error { return Error{ Code: e.Code, Message: fmt.Sprintf(e.Code.Message(), args...), Detail: e.Detail, } } // ErrorDescriptor provides relevant information about a given error code. type ErrorDescriptor struct { // Code is the error code that this descriptor describes. Code ErrorCode // Value provides a unique, string key, often captilized with // underscores, to identify the error code. This value is used as the // keyed value when serializing api errors. Value string // Message is a short, human readable decription of the error condition // included in API responses. Message string // Description provides a complete account of the errors purpose, suitable // for use in documentation. Description string // HTTPStatusCode provides the http status code that is associated with // this error condition. HTTPStatusCode int } // ParseErrorCode returns the value by the string error code. // `ErrorCodeUnknown` will be returned if the error is not known. func ParseErrorCode(value string) ErrorCode { ed, ok := idToDescriptors[value] if ok { return ed.Code } return ErrorCodeUnknown } // Errors provides the envelope for multiple errors and a few sugar methods // for use within the application. type Errors []error var _ error = Errors{} func (errs Errors) Error() string { switch len(errs) { case 0: return "" case 1: return errs[0].Error() default: msg := "errors:\n" for _, err := range errs { msg += err.Error() + "\n" } return msg } } // Len returns the current number of errors. func (errs Errors) Len() int { return len(errs) } // MarshalJSON converts slice of error, ErrorCode or Error into a // slice of Error - then serializes func (errs Errors) MarshalJSON() ([]byte, error) { var tmpErrs struct { Errors []Error `json:"errors,omitempty"` } for _, daErr := range errs { var err Error switch daErr.(type) { case ErrorCode: err = daErr.(ErrorCode).WithDetail(nil) case Error: err = daErr.(Error) default: err = ErrorCodeUnknown.WithDetail(daErr) } // If the Error struct was setup and they forgot to set the // Message field (meaning its "") then grab it from the ErrCode msg := err.Message if msg == "" { msg = err.Code.Message() } tmpErrs.Errors = append(tmpErrs.Errors, Error{ Code: err.Code, Message: msg, Detail: err.Detail, }) } return json.Marshal(tmpErrs) } // UnmarshalJSON deserializes []Error and then converts it into slice of // Error or ErrorCode func (errs *Errors) UnmarshalJSON(data []byte) error { var tmpErrs struct { Errors []Error } if err := json.Unmarshal(data, &tmpErrs); err != nil { return err } var newErrs Errors for _, daErr := range tmpErrs.Errors { // If Message is empty or exactly matches the Code's message string // then just use the Code, no need for a full Error struct if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { // Error's w/o details get converted to ErrorCode newErrs = append(newErrs, daErr.Code) } else { // Error's w/ details are untouched newErrs = append(newErrs, Error{ Code: daErr.Code, Message: daErr.Message, Detail: daErr.Detail, }) } } *errs = newErrs return nil } distribution-2.3.0/registry/api/errcode/errors_test.go000066400000000000000000000132061265472114500232120ustar00rootroot00000000000000package errcode import ( "encoding/json" "net/http" "reflect" "strings" "testing" ) // TestErrorsManagement does a quick check of the Errors type to ensure that // members are properly pushed and marshaled. var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ Value: "TEST1", Message: "test error 1", Description: `Just a test message #1.`, HTTPStatusCode: http.StatusInternalServerError, }) var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ Value: "TEST2", Message: "test error 2", Description: `Just a test message #2.`, HTTPStatusCode: http.StatusNotFound, }) var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ Value: "TEST3", Message: "Sorry %q isn't valid", Description: `Just a test message #3.`, HTTPStatusCode: http.StatusNotFound, }) // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { if len(errorCodeToDescriptors) == 0 { t.Fatal("errors aren't loaded!") } for ec, desc := range errorCodeToDescriptors { if ec != desc.Code { t.Fatalf("error code in descriptor isn't correct, %q != %q", ec, desc.Code) } if idToDescriptors[desc.Value].Code != ec { t.Fatalf("error code in idToDesc isn't correct, %q != %q", idToDescriptors[desc.Value].Code, ec) } if ec.Message() != desc.Message { t.Fatalf("ec.Message doesn't mtach desc.Message: %q != %q", ec.Message(), desc.Message) } // Test (de)serializing the ErrorCode p, err := json.Marshal(ec) if err != nil { t.Fatalf("couldn't marshal ec %v: %v", ec, err) } if len(p) <= 0 { t.Fatalf("expected content in marshaled before for error code %v", ec) } // First, unmarshal to interface and ensure we have a string. var ecUnspecified interface{} if err := json.Unmarshal(p, &ecUnspecified); err != nil { t.Fatalf("error unmarshaling error code %v: %v", ec, err) } if _, ok := ecUnspecified.(string); !ok { t.Fatalf("expected a string for error code %v on unmarshal got a %T", ec, ecUnspecified) } // Now, unmarshal with the error code type and ensure they are equal var ecUnmarshaled ErrorCode if err := json.Unmarshal(p, &ecUnmarshaled); err != nil { t.Fatalf("error unmarshaling error code %v: %v", ec, err) } if ecUnmarshaled != ec { t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) } expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) if ec.Error() != expectedErrorString { t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) } } } func TestErrorsManagement(t *testing.T) { var errs Errors errs = append(errs, ErrorCodeTest1) errs = append(errs, ErrorCodeTest2.WithDetail( map[string]interface{}{"digest": "sometestblobsumdoesntmatter"})) errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE")) errs = append(errs, ErrorCodeTest3.WithArgs("BOOGIE").WithDetail("data")) p, err := json.Marshal(errs) if err != nil { t.Fatalf("error marashaling errors: %v", err) } expectedJSON := `{"errors":[` + `{"code":"TEST1","message":"test error 1"},` + `{"code":"TEST2","message":"test error 2","detail":{"digest":"sometestblobsumdoesntmatter"}},` + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid"},` + `{"code":"TEST3","message":"Sorry \"BOOGIE\" isn't valid","detail":"data"}` + `]}` if string(p) != expectedJSON { t.Fatalf("unexpected json:\ngot:\n%q\n\nexpected:\n%q", string(p), expectedJSON) } // Now test the reverse var unmarshaled Errors if err := json.Unmarshal(p, &unmarshaled); err != nil { t.Fatalf("unexpected error unmarshaling error envelope: %v", err) } if !reflect.DeepEqual(unmarshaled, errs) { t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) } // Test the arg substitution stuff e1 := unmarshaled[3].(Error) exp1 := `Sorry "BOOGIE" isn't valid` if e1.Message != exp1 { t.Fatalf("Wrong msg, got:\n%q\n\nexpected:\n%q", e1.Message, exp1) } exp1 = "test3: " + exp1 if e1.Error() != exp1 { t.Fatalf("Error() didn't return the right string, got:%s\nexpected:%s", e1.Error(), exp1) } // Test again with a single value this time errs = Errors{ErrorCodeUnknown} expectedJSON = "{\"errors\":[{\"code\":\"UNKNOWN\",\"message\":\"unknown error\"}]}" p, err = json.Marshal(errs) if err != nil { t.Fatalf("error marashaling errors: %v", err) } if string(p) != expectedJSON { t.Fatalf("unexpected json: %q != %q", string(p), expectedJSON) } // Now test the reverse unmarshaled = nil if err := json.Unmarshal(p, &unmarshaled); err != nil { t.Fatalf("unexpected error unmarshaling error envelope: %v", err) } if !reflect.DeepEqual(unmarshaled, errs) { t.Fatalf("errors not equal after round trip:\nunmarshaled:\n%#v\n\nerrs:\n%#v", unmarshaled, errs) } // Verify that calling WithArgs() more than once does the right thing. // Meaning creates a new Error and uses the ErrorCode Message e1 = ErrorCodeTest3.WithArgs("test1") e2 := e1.WithArgs("test2") if &e1 == &e2 { t.Fatalf("args: e2 and e1 should not be the same, but they are") } if e2.Message != `Sorry "test2" isn't valid` { t.Fatalf("e2 had wrong message: %q", e2.Message) } // Verify that calling WithDetail() more than once does the right thing. // Meaning creates a new Error and overwrites the old detail field e1 = ErrorCodeTest3.WithDetail("stuff1") e2 = e1.WithDetail("stuff2") if &e1 == &e2 { t.Fatalf("detail: e2 and e1 should not be the same, but they are") } if e2.Detail != `stuff2` { t.Fatalf("e2 had wrong detail: %q", e2.Detail) } } distribution-2.3.0/registry/api/errcode/handler.go000066400000000000000000000017441265472114500222600ustar00rootroot00000000000000package errcode import ( "encoding/json" "net/http" ) // ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err // and sets the content-type header to 'application/json'. It will handle // ErrorCoder and Errors, and if necessary will create an envelope. func ServeJSON(w http.ResponseWriter, err error) error { w.Header().Set("Content-Type", "application/json; charset=utf-8") var sc int switch errs := err.(type) { case Errors: if len(errs) < 1 { break } if err, ok := errs[0].(ErrorCoder); ok { sc = err.ErrorCode().Descriptor().HTTPStatusCode } case ErrorCoder: sc = errs.ErrorCode().Descriptor().HTTPStatusCode err = Errors{err} // create an envelope. default: // We just have an unhandled error type, so just place in an envelope // and move along. err = Errors{err} } if sc == 0 { sc = http.StatusInternalServerError } w.WriteHeader(sc) if err := json.NewEncoder(w).Encode(err); err != nil { return err } return nil } distribution-2.3.0/registry/api/errcode/register.go000066400000000000000000000077101265472114500224660ustar00rootroot00000000000000package errcode import ( "fmt" "net/http" "sort" "sync" ) var ( errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} idToDescriptors = map[string]ErrorDescriptor{} groupToDescriptors = map[string][]ErrorDescriptor{} ) var ( // ErrorCodeUnknown is a generic error that can be used as a last // resort if there is no situation-specific error message that can be used ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ Value: "UNKNOWN", Message: "unknown error", Description: `Generic error returned when the error does not have an API classification.`, HTTPStatusCode: http.StatusInternalServerError, }) // ErrorCodeUnsupported is returned when an operation is not supported. ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ Value: "UNSUPPORTED", Message: "The operation is unsupported.", Description: `The operation was unsupported due to a missing implementation or invalid set of parameters.`, HTTPStatusCode: http.StatusMethodNotAllowed, }) // ErrorCodeUnauthorized is returned if a request requires // authentication. ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ Value: "UNAUTHORIZED", Message: "authentication required", Description: `The access controller was unable to authenticate the client. Often this will be accompanied by a Www-Authenticate HTTP response header indicating how to authenticate.`, HTTPStatusCode: http.StatusUnauthorized, }) // ErrorCodeDenied is returned if a client does not have sufficient // permission to perform an action. ErrorCodeDenied = Register("errcode", ErrorDescriptor{ Value: "DENIED", Message: "requested access to the resource is denied", Description: `The access controller denied access for the operation on a resource.`, HTTPStatusCode: http.StatusForbidden, }) // ErrorCodeUnavailable provides a common error to report unavialability // of a service or endpoint. ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ Value: "UNAVAILABLE", Message: "service unavailable", Description: "Returned when a service is not available", HTTPStatusCode: http.StatusServiceUnavailable, }) ) var nextCode = 1000 var registerLock sync.Mutex // Register will make the passed-in error known to the environment and // return a new ErrorCode func Register(group string, descriptor ErrorDescriptor) ErrorCode { registerLock.Lock() defer registerLock.Unlock() descriptor.Code = ErrorCode(nextCode) if _, ok := idToDescriptors[descriptor.Value]; ok { panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) } if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) } groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) errorCodeToDescriptors[descriptor.Code] = descriptor idToDescriptors[descriptor.Value] = descriptor nextCode++ return descriptor.Code } type byValue []ErrorDescriptor func (a byValue) Len() int { return len(a) } func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } // GetGroupNames returns the list of Error group names that are registered func GetGroupNames() []string { keys := []string{} for k := range groupToDescriptors { keys = append(keys, k) } sort.Strings(keys) return keys } // GetErrorCodeGroup returns the named group of error descriptors func GetErrorCodeGroup(name string) []ErrorDescriptor { desc := groupToDescriptors[name] sort.Sort(byValue(desc)) return desc } // GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are // registered, irrespective of what group they're in func GetErrorAllDescriptors() []ErrorDescriptor { result := []ErrorDescriptor{} for _, group := range GetGroupNames() { result = append(result, GetErrorCodeGroup(group)...) } sort.Sort(byValue(result)) return result } distribution-2.3.0/registry/api/v2/000077500000000000000000000000001265472114500172125ustar00rootroot00000000000000distribution-2.3.0/registry/api/v2/descriptors.go000066400000000000000000001440701265472114500221100ustar00rootroot00000000000000package v2 import ( "net/http" "regexp" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" ) var ( nameParameterDescriptor = ParameterDescriptor{ Name: "name", Type: "string", Format: reference.NameRegexp.String(), Required: true, Description: `Name of the target repository.`, } referenceParameterDescriptor = ParameterDescriptor{ Name: "reference", Type: "string", Format: reference.TagRegexp.String(), Required: true, Description: `Tag or digest of the target manifest.`, } uuidParameterDescriptor = ParameterDescriptor{ Name: "uuid", Type: "opaque", Required: true, Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", } digestPathParameter = ParameterDescriptor{ Name: "digest", Type: "path", Required: true, Format: digest.DigestRegexp.String(), Description: `Digest of desired blob.`, } hostHeader = ParameterDescriptor{ Name: "Host", Type: "string", Description: "Standard HTTP Host Header. Should be set to the registry host.", Format: "", Examples: []string{"registry-1.docker.io"}, } authHeader = ParameterDescriptor{ Name: "Authorization", Type: "string", Description: "An RFC7235 compliant authorization header.", Format: " ", Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, } authChallengeHeader = ParameterDescriptor{ Name: "WWW-Authenticate", Type: "string", Description: "An RFC7235 compliant authentication challenge header.", Format: ` realm="", ..."`, Examples: []string{ `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, }, } contentLengthZeroHeader = ParameterDescriptor{ Name: "Content-Length", Description: "The `Content-Length` header must be zero and the body must be empty.", Type: "integer", Format: "0", } dockerUploadUUIDHeader = ParameterDescriptor{ Name: "Docker-Upload-UUID", Description: "Identifies the docker upload uuid for the current request.", Type: "uuid", Format: "", } digestHeader = ParameterDescriptor{ Name: "Docker-Content-Digest", Description: "Digest of the targeted content for the request.", Type: "digest", Format: "", } linkHeader = ParameterDescriptor{ Name: "Link", Type: "link", Description: "RFC5988 compliant rel='next' with URL to next result set, if available", Format: `<?n=&last=>; rel="next"`, } paginationParameters = []ParameterDescriptor{ { Name: "n", Type: "integer", Description: "Limit the number of entries in each response. It not present, all entries will be returned.", Format: "", Required: false, }, { Name: "last", Type: "string", Description: "Result set will include values lexically after last.", Format: "", Required: false, }, } unauthorizedResponseDescriptor = ResponseDescriptor{ Name: "Authentication Required", StatusCode: http.StatusUnauthorized, Description: "The client is not authenticated.", Headers: []ParameterDescriptor{ authChallengeHeader, { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnauthorized, }, } repositoryNotFoundResponseDescriptor = ResponseDescriptor{ Name: "No Such Repository Error", StatusCode: http.StatusNotFound, Description: "The repository is not known to the registry.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, }, } deniedResponseDescriptor = ResponseDescriptor{ Name: "Access Denied", StatusCode: http.StatusForbidden, Description: "The client does not have required access to the repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeDenied, }, } ) const ( manifestBody = `{ "name": , "tag": , "fsLayers": [ { "blobSum": "" }, ... ] ], "history": , "signature": }` errorsBody = `{ "errors:" [ { "code": , "message": "", "detail": ... }, ... ] }` unauthorizedErrorsBody = `{ "errors:" [ { "code": "UNAUTHORIZED", "message": "access to the requested resource is not authorized", "detail": ... }, ... ] }` ) // APIDescriptor exports descriptions of the layout of the v2 registry API. var APIDescriptor = struct { // RouteDescriptors provides a list of the routes available in the API. RouteDescriptors []RouteDescriptor }{ RouteDescriptors: routeDescriptors, } // RouteDescriptor describes a route specified by name. type RouteDescriptor struct { // Name is the name of the route, as specified in RouteNameXXX exports. // These names a should be considered a unique reference for a route. If // the route is registered with gorilla, this is the name that will be // used. Name string // Path is a gorilla/mux-compatible regexp that can be used to match the // route. For any incoming method and path, only one route descriptor // should match. Path string // Entity should be a short, human-readalbe description of the object // targeted by the endpoint. Entity string // Description should provide an accurate overview of the functionality // provided by the route. Description string // Methods should describe the various HTTP methods that may be used on // this route, including request and response formats. Methods []MethodDescriptor } // MethodDescriptor provides a description of the requests that may be // conducted with the target method. type MethodDescriptor struct { // Method is an HTTP method, such as GET, PUT or POST. Method string // Description should provide an overview of the functionality provided by // the covered method, suitable for use in documentation. Use of markdown // here is encouraged. Description string // Requests is a slice of request descriptors enumerating how this // endpoint may be used. Requests []RequestDescriptor } // RequestDescriptor covers a particular set of headers and parameters that // can be carried out with the parent method. Its most helpful to have one // RequestDescriptor per API use case. type RequestDescriptor struct { // Name provides a short identifier for the request, usable as a title or // to provide quick context for the particalar request. Name string // Description should cover the requests purpose, covering any details for // this particular use case. Description string // Headers describes headers that must be used with the HTTP request. Headers []ParameterDescriptor // PathParameters enumerate the parameterized path components for the // given request, as defined in the route's regular expression. PathParameters []ParameterDescriptor // QueryParameters provides a list of query parameters for the given // request. QueryParameters []ParameterDescriptor // Body describes the format of the request body. Body BodyDescriptor // Successes enumerates the possible responses that are considered to be // the result of a successful request. Successes []ResponseDescriptor // Failures covers the possible failures from this particular request. Failures []ResponseDescriptor } // ResponseDescriptor describes the components of an API response. type ResponseDescriptor struct { // Name provides a short identifier for the response, usable as a title or // to provide quick context for the particalar response. Name string // Description should provide a brief overview of the role of the // response. Description string // StatusCode specifies the status recieved by this particular response. StatusCode int // Headers covers any headers that may be returned from the response. Headers []ParameterDescriptor // Fields describes any fields that may be present in the response. Fields []ParameterDescriptor // ErrorCodes enumerates the error codes that may be returned along with // the response. ErrorCodes []errcode.ErrorCode // Body describes the body of the response, if any. Body BodyDescriptor } // BodyDescriptor describes a request body and its expected content type. For // the most part, it should be example json or some placeholder for body // data in documentation. type BodyDescriptor struct { ContentType string Format string } // ParameterDescriptor describes the format of a request parameter, which may // be a header, path parameter or query parameter. type ParameterDescriptor struct { // Name is the name of the parameter, either of the path component or // query parameter. Name string // Type specifies the type of the parameter, such as string, integer, etc. Type string // Description provides a human-readable description of the parameter. Description string // Required means the field is required when set. Required bool // Format is a specifying the string format accepted by this parameter. Format string // Regexp is a compiled regular expression that can be used to validate // the contents of the parameter. Regexp *regexp.Regexp // Examples provides multiple examples for the values that might be valid // for this parameter. Examples []string } var routeDescriptors = []RouteDescriptor{ { Name: RouteNameBase, Path: "/v2/", Entity: "Base", Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, Methods: []MethodDescriptor{ { Method: "GET", Description: "Check that the endpoint implements Docker Registry API V2.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Successes: []ResponseDescriptor{ { Description: "The API implements V2 protocol and is accessible.", StatusCode: http.StatusOK, }, }, Failures: []ResponseDescriptor{ { Description: "The registry does not implement the V2 API.", StatusCode: http.StatusNotFound, }, unauthorizedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameTags, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", Entity: "Tags", Description: "Retrieve information about tags.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the tags under the repository identified by `name`.", Requests: []RequestDescriptor{ { Name: "Tags", Description: "Return all tags for the repository", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": , "tags": [ , ... ] }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Tags Paginated", Description: "Return a portion of the tags for the specified repository.", PathParameters: []ParameterDescriptor{nameParameterDescriptor}, QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Description: "A list of tags for the named repository.", Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, linkHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "name": , "tags": [ , ... ], }`, }, }, }, Failures: []ResponseDescriptor{ unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameManifest, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", Entity: "Manifest", Description: "Create, update, delete and retrieve manifests.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ digestHeader, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: manifestBody, }, }, }, Failures: []ResponseDescriptor{ { Description: "The name or reference was invalid.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PUT", Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: manifestBody, }, Successes: []ResponseDescriptor{ { Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The canonical location url of the uploaded manifest.", Format: "", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Manifest", Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", StatusCode: http.StatusBadRequest, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, ErrorCodeManifestInvalid, ErrorCodeManifestUnverified, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, { Name: "Missing Layer(s)", Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "errors:" [{ "code": "BLOB_UNKNOWN", "message": "blob unknown to registry", "detail": { "digest": "" } }, ... ] }`, }, }, { Name: "Not allowed", Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, { Method: "DELETE", Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, referenceParameterDescriptor, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Reference", Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeTagInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, { Name: "Unknown Manifest", Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeManifestUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Name: "Not allowed", Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, }, }, }, }, }, }, { Name: RouteNameBlob, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", Entity: "Blob", Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Name: "Fetch Blob", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob content.", Format: "", }, digestHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, }, { Description: "The blob identified by `digest` is available at the provided location.", StatusCode: http.StatusTemporaryRedirect, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Description: "The location where the layer should be accessible.", Format: "", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Fetch Blob Part", Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Range", Type: "string", Description: "HTTP Range header specifying blob chunk.", Format: "bytes=-", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", StatusCode: http.StatusPartialContent, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "The length of the requested blob chunk.", Format: "", }, { Name: "Content-Range", Type: "byte range", Description: "Content range of blob chunk.", Format: "bytes -/", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, }, }, Failures: []ResponseDescriptor{ { Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeDigestInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "DELETE", Description: "Delete the blob identified by `name` and `digest`", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, digestPathParameter, }, Successes: []ResponseDescriptor{ { StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "0", Format: "0", }, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", StatusCode: http.StatusNotFound, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameUnknown, ErrorCodeBlobUnknown, }, }, { Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", StatusCode: http.StatusMethodNotAllowed, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, // TODO(stevvooe): We may want to add a PUT request here to // kickoff an upload of a blob, integrated with the blob upload // API. }, }, { Name: RouteNameBlobUpload, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", Entity: "Initiate Blob Upload", Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", Methods: []MethodDescriptor{ { Method: "POST", Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", Requests: []RequestDescriptor{ { Name: "Initiate Monolithic Blob Upload", Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "query", Format: "", Regexp: digest.DigestRegexp, Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octect-stream", Format: "", }, Successes: []ResponseDescriptor{ { Description: "The blob has been created in the registry and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Initiate Resumable Blob Upload", Description: "Initiate a resumable blob upload with an empty request body.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, Successes: []ResponseDescriptor{ { Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", StatusCode: http.StatusAccepted, Headers: []ParameterDescriptor{ contentLengthZeroHeader, { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Format: "0-0", Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", }, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Mount Blob", Description: "Mount a blob identified by the `mount` parameter from another repository.", Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "mount", Type: "query", Format: "", Regexp: digest.DigestRegexp, Description: `Digest of blob to mount from the source repository.`, }, { Name: "from", Type: "query", Format: "", Regexp: reference.NameRegexp, Description: `Name of the source repository.`, }, }, Successes: []ResponseDescriptor{ { Description: "The blob has been mounted in the repository and is available at the provided location.", StatusCode: http.StatusCreated, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Name: "Invalid Name or Digest", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, }, }, { Name: "Not allowed", Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", StatusCode: http.StatusMethodNotAllowed, ErrorCodes: []errcode.ErrorCode{ errcode.ErrorCodeUnsupported, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameBlobUploadChunk, Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", Entity: "Blob Upload", Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", Requests: []RequestDescriptor{ { Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", Headers: []ParameterDescriptor{ hostHeader, authHeader, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Successes: []ResponseDescriptor{ { Name: "Upload Progress", Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PATCH", Description: "Upload a chunk of data for the specified upload.", Requests: []RequestDescriptor{ { Name: "Stream upload", Description: "Upload a stream of data to upload without completing the upload.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Data Accepted", Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, { Name: "Chunked upload", Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Range", Type: "header", Format: "-", Required: true, Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", }, { Name: "Content-Length", Type: "integer", Format: "", Description: "Length of the chunk being uploaded, corresponding the length of the request body.", }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Chunk Accepted", Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "/v2//blobs/uploads/", Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", }, { Name: "Range", Type: "header", Format: "0-", Description: "Range indicating the current progress of the upload.", }, contentLengthZeroHeader, dockerUploadUUIDHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", StatusCode: http.StatusRequestedRangeNotSatisfiable, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "PUT", Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", Requests: []RequestDescriptor{ { Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", Headers: []ParameterDescriptor{ hostHeader, authHeader, { Name: "Content-Length", Type: "integer", Format: "", Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", }, }, PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, QueryParameters: []ParameterDescriptor{ { Name: "digest", Type: "string", Format: "", Regexp: digest.DigestRegexp, Required: true, Description: `Digest of uploaded blob.`, }, }, Body: BodyDescriptor{ ContentType: "application/octet-stream", Format: "", }, Successes: []ResponseDescriptor{ { Name: "Upload Complete", Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ { Name: "Location", Type: "url", Format: "", Description: "The canonical location of the blob for retrieval", }, { Name: "Content-Range", Type: "header", Format: "-", Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", }, contentLengthZeroHeader, digestHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "There was an error processing the upload and it must be restarted.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeDigestInvalid, ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, errcode.ErrorCodeUnsupported, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The upload must be restarted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, { Method: "DELETE", Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", Requests: []RequestDescriptor{ { Description: "Cancel the upload specified by `uuid`.", PathParameters: []ParameterDescriptor{ nameParameterDescriptor, uuidParameterDescriptor, }, Headers: []ParameterDescriptor{ hostHeader, authHeader, contentLengthZeroHeader, }, Successes: []ResponseDescriptor{ { Name: "Upload Deleted", Description: "The upload has been successfully deleted.", StatusCode: http.StatusNoContent, Headers: []ParameterDescriptor{ contentLengthZeroHeader, }, }, }, Failures: []ResponseDescriptor{ { Description: "An error was encountered processing the delete. The client may ignore this error.", StatusCode: http.StatusBadRequest, ErrorCodes: []errcode.ErrorCode{ ErrorCodeNameInvalid, ErrorCodeBlobUploadInvalid, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, { Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", StatusCode: http.StatusNotFound, ErrorCodes: []errcode.ErrorCode{ ErrorCodeBlobUploadUnknown, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: errorsBody, }, }, unauthorizedResponseDescriptor, repositoryNotFoundResponseDescriptor, deniedResponseDescriptor, }, }, }, }, }, }, { Name: RouteNameCatalog, Path: "/v2/_catalog", Entity: "Catalog", Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", Methods: []MethodDescriptor{ { Method: "GET", Description: "Retrieve a sorted, json list of repositories available in the registry.", Requests: []RequestDescriptor{ { Name: "Catalog Fetch Complete", Description: "Request an unabridged list of repositories available.", Successes: []ResponseDescriptor{ { Description: "Returns the unabridged list of repositories as a json response.", StatusCode: http.StatusOK, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, }, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ , ... ] }`, }, }, }, }, { Name: "Catalog Fetch Paginated", Description: "Return the specified portion of repositories.", QueryParameters: paginationParameters, Successes: []ResponseDescriptor{ { StatusCode: http.StatusOK, Body: BodyDescriptor{ ContentType: "application/json; charset=utf-8", Format: `{ "repositories": [ , ... ] "next": "?last=&n=" }`, }, Headers: []ParameterDescriptor{ { Name: "Content-Length", Type: "integer", Description: "Length of the JSON response body.", Format: "", }, linkHeader, }, }, }, }, }, }, }, }, } var routeDescriptorsMap map[string]RouteDescriptor func init() { routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) for _, descriptor := range routeDescriptors { routeDescriptorsMap[descriptor.Name] = descriptor } } distribution-2.3.0/registry/api/v2/doc.go000066400000000000000000000007351265472114500203130ustar00rootroot00000000000000// Package v2 describes routes, urls and the error codes used in the Docker // Registry JSON HTTP API V2. In addition to declarations, descriptors are // provided for routes and error codes that can be used for implementation and // automatically generating documentation. // // Definitions here are considered to be locked down for the V2 registry api. // Any changes must be considered carefully and should not proceed without a // change proposal in docker core. package v2 distribution-2.3.0/registry/api/v2/errors.go000066400000000000000000000127561265472114500210700ustar00rootroot00000000000000package v2 import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) const errGroup = "registry.api.v2" var ( // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "DIGEST_INVALID", Message: "provided digest did not match uploaded content", Description: `When a blob is uploaded, the registry will check that the content matches the digest provided by the client. The error may include a detail structure with the key "digest", including the invalid digest string. This error may also be returned when a manifest includes an invalid layer digest.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeSizeInvalid is returned when uploading a blob if the provided ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "SIZE_INVALID", Message: "provided length did not match content length", Description: `When a layer is uploaded, the provided size will be checked against the uploaded content. If they do not match, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameInvalid is returned when the name in the manifest does not // match the provided name. ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_INVALID", Message: "invalid repository name", Description: `Invalid repository name encountered either during manifest validation or any API operation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeTagInvalid is returned when the tag in the manifest does not // match the provided tag. ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "TAG_INVALID", Message: "manifest tag did not match URI", Description: `During a manifest upload, if the tag in the manifest does not match the uri tag, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeNameUnknown when the repository name is not known. ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NAME_UNKNOWN", Message: "repository name not known to registry", Description: `This is returned if the name used during an operation is unknown to the registry.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestUnknown returned when image manifest is unknown. ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNKNOWN", Message: "manifest unknown", Description: `This error is returned when the manifest, identified by name and tag is unknown to the repository.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeManifestInvalid returned when an image manifest is invalid, // typically during a PUT operation. This error encompasses all errors // encountered during manifest validation that aren't signature errors. ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_INVALID", Message: "manifest invalid", Description: `During upload, manifests undergo several checks ensuring validity. If those checks fail, this error may be returned, unless a more specific error is included. The detail will contain information the failed validation.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestUnverified is returned when the manifest fails // signature verfication. ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_UNVERIFIED", Message: "manifest failed signature verification", Description: `During manifest upload, if the manifest fails signature verification, this error will be returned.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeManifestBlobUnknown is returned when a manifest blob is // unknown to the registry. ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MANIFEST_BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a manifest blob is unknown to the registry.`, HTTPStatusCode: http.StatusBadRequest, }) // ErrorCodeBlobUnknown is returned when a blob is unknown to the // registry. This can happen when the manifest references a nonexistent // layer or the result is not found by a blob fetch. ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UNKNOWN", Message: "blob unknown to registry", Description: `This error may be returned when a blob is unknown to the registry in a specified repository. This can be returned with a standard get or if a manifest references an unknown layer during upload.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_UNKNOWN", Message: "blob upload unknown to registry", Description: `If a blob upload has been cancelled or was never started, this error code may be returned.`, HTTPStatusCode: http.StatusNotFound, }) // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "BLOB_UPLOAD_INVALID", Message: "blob upload invalid", Description: `The blob upload encountered an error and can no longer proceed.`, HTTPStatusCode: http.StatusNotFound, }) ) distribution-2.3.0/registry/api/v2/routes.go000066400000000000000000000023621265472114500210650ustar00rootroot00000000000000package v2 import "github.com/gorilla/mux" // The following are definitions of the name under which all V2 routes are // registered. These symbols can be used to look up a route based on the name. const ( RouteNameBase = "base" RouteNameManifest = "manifest" RouteNameTags = "tags" RouteNameBlob = "blob" RouteNameBlobUpload = "blob-upload" RouteNameBlobUploadChunk = "blob-upload-chunk" RouteNameCatalog = "catalog" ) var allEndpoints = []string{ RouteNameManifest, RouteNameCatalog, RouteNameTags, RouteNameBlob, RouteNameBlobUpload, RouteNameBlobUploadChunk, } // Router builds a gorilla router with named routes for the various API // methods. This can be used directly by both server implementations and // clients. func Router() *mux.Router { return RouterWithPrefix("") } // RouterWithPrefix builds a gorilla router with a configured prefix // on all routes. func RouterWithPrefix(prefix string) *mux.Router { rootRouter := mux.NewRouter() router := rootRouter if prefix != "" { router = router.PathPrefix(prefix).Subrouter() } router.StrictSlash(true) for _, descriptor := range routeDescriptors { router.Path(descriptor.Path).Name(descriptor.Name) } return rootRouter } distribution-2.3.0/registry/api/v2/routes_test.go000066400000000000000000000230531265472114500221240ustar00rootroot00000000000000package v2 import ( "encoding/json" "fmt" "math/rand" "net/http" "net/http/httptest" "reflect" "strings" "testing" "time" "github.com/gorilla/mux" ) type routeTestCase struct { RequestURI string ExpectedURI string Vars map[string]string RouteName string StatusCode int } // TestRouter registers a test handler with all the routes and ensures that // each route returns the expected path variables. Not method verification is // present. This not meant to be exhaustive but as check to ensure that the // expected variables are extracted. // // This may go away as the application structure comes together. func TestRouter(t *testing.T) { testCases := []routeTestCase{ { RouteName: RouteNameBase, RequestURI: "/v2/", Vars: map[string]string{}, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/manifests/bar", Vars: map[string]string{ "name": "foo", "reference": "bar", }, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/tag", Vars: map[string]string{ "name": "foo/bar", "reference": "tag", }, }, { RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", Vars: map[string]string{ "name": "foo/bar", "reference": "sha256:abcdef01234567890", }, }, { RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/tags/list", Vars: map[string]string{ "name": "foo/bar", }, }, { RouteName: RouteNameTags, RequestURI: "/v2/docker.com/foo/tags/list", Vars: map[string]string{ "name": "docker.com/foo", }, }, { RouteName: RouteNameTags, RequestURI: "/v2/docker.com/foo/bar/tags/list", Vars: map[string]string{ "name": "docker.com/foo/bar", }, }, { RouteName: RouteNameTags, RequestURI: "/v2/docker.com/foo/bar/baz/tags/list", Vars: map[string]string{ "name": "docker.com/foo/bar/baz", }, }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", Vars: map[string]string{ "name": "foo/bar", "digest": "sha256:abcdef0919234", }, }, { RouteName: RouteNameBlobUpload, RequestURI: "/v2/foo/bar/blobs/uploads/", Vars: map[string]string{ "name": "foo/bar", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/uuid", Vars: map[string]string{ "name": "foo/bar", "uuid": "uuid", }, }, { // support uuid proper RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", Vars: map[string]string{ "name": "foo/bar", "uuid": "D95306FA-FAD3-4E36-8D41-CF1C93EF8286", }, }, { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", Vars: map[string]string{ "name": "foo/bar", "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA==", }, }, { // supports urlsafe base64 RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", Vars: map[string]string{ "name": "foo/bar", "uuid": "RDk1MzA2RkEtRkFEMy00RTM2LThENDEtQ0YxQzkzRUY4Mjg2IA_-==", }, }, { // does not match RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/bar/blobs/uploads/totalandcompletejunk++$$-==", StatusCode: http.StatusNotFound, }, { // Check ambiguity: ensure we can distinguish between tags for // "foo/bar/image/image" and image for "foo/bar/image" with tag // "tags" RouteName: RouteNameManifest, RequestURI: "/v2/foo/bar/manifests/manifests/tags", Vars: map[string]string{ "name": "foo/bar/manifests", "reference": "tags", }, }, { // This case presents an ambiguity between foo/bar with tag="tags" // and list tags for "foo/bar/manifest" RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/manifests/tags/list", Vars: map[string]string{ "name": "foo/bar/manifests", }, }, { RouteName: RouteNameManifest, RequestURI: "/v2/locahost:8080/foo/bar/baz/manifests/tag", Vars: map[string]string{ "name": "locahost:8080/foo/bar/baz", "reference": "tag", }, }, } checkTestRouter(t, testCases, "", true) checkTestRouter(t, testCases, "/prefix/", true) } func TestRouterWithPathTraversals(t *testing.T) { testCases := []routeTestCase{ { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, { // Testing for path traversal attack handling RouteName: RouteNameTags, RequestURI: "/v2/foo/../bar/baz/tags/list", ExpectedURI: "/v2/bar/baz/tags/list", Vars: map[string]string{ "name": "bar/baz", }, }, } checkTestRouter(t, testCases, "", false) } func TestRouterWithBadCharacters(t *testing.T) { if testing.Short() { testCases := []routeTestCase{ { RouteName: RouteNameBlobUploadChunk, RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", StatusCode: http.StatusNotFound, }, { // Testing for path traversal attack handling RouteName: RouteNameTags, RequestURI: "/v2/foo/不bar/tags/list", StatusCode: http.StatusNotFound, }, } checkTestRouter(t, testCases, "", true) } else { // in the long version we're going to fuzz the router // with random UTF8 characters not in the 128 bit ASCII range. // These are not valid characters for the router and we expect // 404s on every test. rand.Seed(time.Now().UTC().UnixNano()) testCases := make([]routeTestCase, 1000) for idx := range testCases { testCases[idx] = routeTestCase{ RouteName: RouteNameTags, RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), StatusCode: http.StatusNotFound, } } checkTestRouter(t, testCases, "", true) } } func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { router := RouterWithPrefix(prefix) testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { testCase := routeTestCase{ RequestURI: r.RequestURI, Vars: mux.Vars(r), RouteName: mux.CurrentRoute(r).GetName(), } enc := json.NewEncoder(w) if err := enc.Encode(testCase); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } }) // Startup test server server := httptest.NewServer(router) for _, testcase := range testCases { testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI // Register the endpoint route := router.GetRoute(testcase.RouteName) if route == nil { t.Fatalf("route for name %q not found", testcase.RouteName) } route.Handler(testHandler) u := server.URL + testcase.RequestURI resp, err := http.Get(u) if err != nil { t.Fatalf("error issuing get request: %v", err) } if testcase.StatusCode == 0 { // Override default, zero-value testcase.StatusCode = http.StatusOK } if testcase.ExpectedURI == "" { // Override default, zero-value testcase.ExpectedURI = testcase.RequestURI } if resp.StatusCode != testcase.StatusCode { t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) } if testcase.StatusCode != http.StatusOK { resp.Body.Close() // We don't care about json response. continue } dec := json.NewDecoder(resp.Body) var actualRouteInfo routeTestCase if err := dec.Decode(&actualRouteInfo); err != nil { t.Fatalf("error reading json response: %v", err) } // Needs to be set out of band actualRouteInfo.StatusCode = resp.StatusCode if actualRouteInfo.RequestURI != testcase.ExpectedURI { t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) } if actualRouteInfo.RouteName != testcase.RouteName { t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) } // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want // that to make the comparison fail. We're otherwise done with the testcase so empty the // testcase.ExpectedURI testcase.ExpectedURI = "" if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } resp.Body.Close() } } // -------------- START LICENSED CODE -------------- // The following code is derivative of https://github.com/google/gofuzz // gofuzz is licensed under the Apache License, Version 2.0, January 2004, // a copy of which can be found in the LICENSE file at the root of this // repository. // These functions allow us to generate strings containing only multibyte // characters that are invalid in our URLs. They are used above for fuzzing // to ensure we always get 404s on these invalid strings type charRange struct { first, last rune } // choose returns a random unicode character from the given range, using the // given randomness source. func (r *charRange) choose() rune { count := int64(r.last - r.first) return r.first + rune(rand.Int63n(count)) } var unicodeRanges = []charRange{ {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } func randomString(length int) string { runes := make([]rune, length) for i := range runes { runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() } return string(runes) } // -------------- END LICENSED CODE -------------- distribution-2.3.0/registry/api/v2/urls.go000066400000000000000000000145171265472114500205360ustar00rootroot00000000000000package v2 import ( "net/http" "net/url" "strings" "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) // URLBuilder creates registry API urls from a single base endpoint. It can be // used to create urls for use in a registry client or server. // // All urls will be created from the given base, including the api version. // For example, if a root of "/foo/" is provided, urls generated will be fall // under "/foo/v2/...". Most application will only provide a schema, host and // port, such as "https://localhost:5000/". type URLBuilder struct { root *url.URL // url root (ie http://localhost/) router *mux.Router } // NewURLBuilder creates a URLBuilder with provided root url object. func NewURLBuilder(root *url.URL) *URLBuilder { return &URLBuilder{ root: root, router: Router(), } } // NewURLBuilderFromString workes identically to NewURLBuilder except it takes // a string argument for the root, returning an error if it is not a valid // url. func NewURLBuilderFromString(root string) (*URLBuilder, error) { u, err := url.Parse(root) if err != nil { return nil, err } return NewURLBuilder(u), nil } // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { var scheme string forwardedProto := r.Header.Get("X-Forwarded-Proto") switch { case len(forwardedProto) > 0: scheme = forwardedProto case r.TLS != nil: scheme = "https" case len(r.URL.Scheme) > 0: scheme = r.URL.Scheme default: scheme = "http" } host := r.Host forwardedHost := r.Header.Get("X-Forwarded-Host") if len(forwardedHost) > 0 { // According to the Apache mod_proxy docs, X-Forwarded-Host can be a // comma-separated list of hosts, to which each proxy appends the // requested host. We want to grab the first from this comma-separated // list. hosts := strings.SplitN(forwardedHost, ",", 2) host = strings.TrimSpace(hosts[0]) } basePath := routeDescriptorsMap[RouteNameBase].Path requestPath := r.URL.Path index := strings.Index(requestPath, basePath) u := &url.URL{ Scheme: scheme, Host: host, } if index > 0 { // N.B. index+1 is important because we want to include the trailing / u.Path = requestPath[0 : index+1] } return NewURLBuilder(u) } // BuildBaseURL constructs a base url for the API, typically just "/v2/". func (ub *URLBuilder) BuildBaseURL() (string, error) { route := ub.cloneRoute(RouteNameBase) baseURL, err := route.URL() if err != nil { return "", err } return baseURL.String(), nil } // BuildCatalogURL constructs a url get a catalog of repositories func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameCatalog) catalogURL, err := route.URL() if err != nil { return "", err } return appendValuesURL(catalogURL, values...).String(), nil } // BuildTagsURL constructs a url to list the tags in the named repository. func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } return tagsURL.String(), nil } // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) tagOrDigest := "" switch v := ref.(type) { case reference.Tagged: tagOrDigest = v.Tag() case reference.Digested: tagOrDigest = v.Digest().String() } manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } return manifestURL.String(), nil } // BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } return layerURL.String(), nil } // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } return appendValuesURL(uploadURL, values...).String(), nil } // clondedRoute returns a clone of the named route from the router. Routes // must be cloned to avoid modifying them during url generation. func (ub *URLBuilder) cloneRoute(name string) clonedRoute { route := new(mux.Route) root := new(url.URL) *route = *ub.router.GetRoute(name) // clone the route *root = *ub.root return clonedRoute{Route: route, root: root} } type clonedRoute struct { *mux.Route root *url.URL } func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL, err := cr.Route.URL(pairs...) if err != nil { return nil, err } if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { routeURL.Path = routeURL.Path[1:] } url := cr.root.ResolveReference(routeURL) url.Scheme = cr.root.Scheme return url, nil } // appendValuesURL appends the parameters to the url. func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { merged := u.Query() for _, v := range values { for k, vv := range v { merged[k] = append(merged[k], vv...) } } u.RawQuery = merged.Encode() return u } // appendValues appends the parameters to the url. Panics if the string is not // a url. func appendValues(u string, values ...url.Values) string { up, err := url.Parse(u) if err != nil { panic(err) // should never happen } return appendValuesURL(up, values...).String() } distribution-2.3.0/registry/api/v2/urls_test.go000066400000000000000000000202771265472114500215750ustar00rootroot00000000000000package v2 import ( "net/http" "net/url" "testing" "github.com/docker/distribution/reference" ) type urlBuilderTestCase struct { description string expectedPath string build func() (string, error) } func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { fooBarRef, _ := reference.ParseNamed("foo/bar") return []urlBuilderTestCase{ { description: "test base url", expectedPath: "/v2/", build: urlBuilder.BuildBaseURL, }, { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", build: func() (string, error) { return urlBuilder.BuildTagsURL(fooBarRef) }, }, { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { ref, _ := reference.WithTag(fooBarRef, "tag") return urlBuilder.BuildManifestURL(ref) }, }, { description: "build blob url", expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") return urlBuilder.BuildBlobURL(ref) }, }, { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef) }, }, { description: "build blob upload url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, }, { description: "build blob upload chunk url with digest and size", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, } } // TestURLBuilder tests the various url building functions, ensuring they are // returning the expected values. func TestURLBuilder(t *testing.T) { roots := []string{ "http://example.com", "https://example.com", "http://localhost:5000", "https://localhost:5443", } for _, root := range roots { urlBuilder, err := NewURLBuilderFromString(root) if err != nil { t.Fatalf("unexpected error creating urlbuilder: %v", err) } for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } expectedURL := root + testCase.expectedPath if url != expectedURL { t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) } } } } func TestURLBuilderWithPrefix(t *testing.T) { roots := []string{ "http://example.com/prefix/", "https://example.com/prefix/", "http://localhost:5000/prefix/", "https://localhost:5443/prefix/", } for _, root := range roots { urlBuilder, err := NewURLBuilderFromString(root) if err != nil { t.Fatalf("unexpected error creating urlbuilder: %v", err) } for _, testCase := range makeURLBuilderTestCases(urlBuilder) { url, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } expectedURL := root[0:len(root)-1] + testCase.expectedPath if url != expectedURL { t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) } } } } type builderFromRequestTestCase struct { request *http.Request base string } func TestBuilderFromRequest(t *testing.T) { u, err := url.Parse("http://example.com") if err != nil { t.Fatal(err) } forwardedProtoHeader := make(http.Header, 1) forwardedProtoHeader.Set("X-Forwarded-Proto", "https") forwardedHostHeader1 := make(http.Header, 1) forwardedHostHeader1.Set("X-Forwarded-Host", "first.example.com") forwardedHostHeader2 := make(http.Header, 1) forwardedHostHeader2.Set("X-Forwarded-Host", "first.example.com, proxy1.example.com") testRequests := []struct { request *http.Request base string configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "http://example.com", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader1}, base: "http://first.example.com", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, base: "http://first.example.com", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedHostHeader2}, base: "https://third.example.com:5000", configHost: url.URL{ Scheme: "https", Host: "third.example.com:5000", }, }, } for _, tr := range testRequests { var builder *URLBuilder if tr.configHost.Scheme != "" && tr.configHost.Host != "" { builder = NewURLBuilder(&tr.configHost) } else { builder = NewURLBuilderFromRequest(tr.request) } for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } var expectedURL string proto, ok := tr.request.Header["X-Forwarded-Proto"] if !ok { expectedURL = tr.base + testCase.expectedPath } else { urlBase, err := url.Parse(tr.base) if err != nil { t.Fatal(err) } urlBase.Scheme = proto[0] expectedURL = urlBase.String() + testCase.expectedPath } if buildURL != expectedURL { t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } } func TestBuilderFromRequestWithPrefix(t *testing.T) { u, err := url.Parse("http://example.com/prefix/v2/") if err != nil { t.Fatal(err) } forwardedProtoHeader := make(http.Header, 1) forwardedProtoHeader.Set("X-Forwarded-Proto", "https") testRequests := []struct { request *http.Request base string configHost url.URL }{ { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com/prefix/", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "http://example.com/prefix/", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://subdomain.example.com/prefix/", configHost: url.URL{ Scheme: "https", Host: "subdomain.example.com", Path: "/prefix/", }, }, } for _, tr := range testRequests { var builder *URLBuilder if tr.configHost.Scheme != "" && tr.configHost.Host != "" { builder = NewURLBuilder(&tr.configHost) } else { builder = NewURLBuilderFromRequest(tr.request) } for _, testCase := range makeURLBuilderTestCases(builder) { buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } var expectedURL string proto, ok := tr.request.Header["X-Forwarded-Proto"] if !ok { expectedURL = tr.base[0:len(tr.base)-1] + testCase.expectedPath } else { urlBase, err := url.Parse(tr.base) if err != nil { t.Fatal(err) } urlBase.Scheme = proto[0] expectedURL = urlBase.String()[0:len(urlBase.String())-1] + testCase.expectedPath } if buildURL != expectedURL { t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } } distribution-2.3.0/registry/auth/000077500000000000000000000000001265472114500170535ustar00rootroot00000000000000distribution-2.3.0/registry/auth/auth.go000066400000000000000000000111671265472114500203510ustar00rootroot00000000000000// Package auth defines a standard interface for request access controllers. // // An access controller has a simple interface with a single `Authorized` // method which checks that a given request is authorized to perform one or // more actions on one or more resources. This method should return a non-nil // error if the request is not authorized. // // An implementation registers its access controller by name with a constructor // which accepts an options map for configuring the access controller. // // options := map[string]interface{}{"sillySecret": "whysosilly?"} // accessController, _ := auth.GetAccessController("silly", options) // // This `accessController` can then be used in a request handler like so: // // func updateOrder(w http.ResponseWriter, r *http.Request) { // orderNumber := r.FormValue("orderNumber") // resource := auth.Resource{Type: "customerOrder", Name: orderNumber} // access := auth.Access{Resource: resource, Action: "update"} // // if ctx, err := accessController.Authorized(ctx, access); err != nil { // if challenge, ok := err.(auth.Challenge) { // // Let the challenge write the response. // challenge.SetHeaders(w) // w.WriteHeader(http.StatusUnauthorized) // return // } else { // // Some other error. // } // } // } // package auth import ( "fmt" "net/http" "github.com/docker/distribution/context" ) // UserInfo carries information about // an autenticated/authorized client. type UserInfo struct { Name string } // Resource describes a resource by type and name. type Resource struct { Type string Name string } // Access describes a specific action that is // requested or allowed for a given resource. type Access struct { Resource Action string } // Challenge is a special error type which is used for HTTP 401 Unauthorized // responses and is able to write the response with WWW-Authenticate challenge // header values based on the error. type Challenge interface { error // SetHeaders prepares the request to conduct a challenge response by // adding the an HTTP challenge header on the response message. Callers // are expected to set the appropriate HTTP status code (e.g. 401) // themselves. SetHeaders(w http.ResponseWriter) } // AccessController controls access to registry resources based on a request // and required access levels for a request. Implementations can support both // complete denial and http authorization challenges. type AccessController interface { // Authorized returns a non-nil error if the context is granted access and // returns a new authorized context. If one or more Access structs are // provided, the requested access will be compared with what is available // to the context. The given context will contain a "http.request" key with // a `*http.Request` value. If the error is non-nil, access should always // be denied. The error may be of type Challenge, in which case the caller // may have the Challenge handle the request or choose what action to take // based on the Challenge header or response status. The returned context // object should have a "auth.user" value set to a UserInfo struct. Authorized(ctx context.Context, access ...Access) (context.Context, error) } // WithUser returns a context with the authorized user info. func WithUser(ctx context.Context, user UserInfo) context.Context { return userInfoContext{ Context: ctx, user: user, } } type userInfoContext struct { context.Context user UserInfo } func (uic userInfoContext) Value(key interface{}) interface{} { switch key { case "auth.user": return uic.user case "auth.user.name": return uic.user.Name } return uic.Context.Value(key) } // InitFunc is the type of an AccessController factory function and is used // to register the constructor for different AccesController backends. type InitFunc func(options map[string]interface{}) (AccessController, error) var accessControllers map[string]InitFunc func init() { accessControllers = make(map[string]InitFunc) } // Register is used to register an InitFunc for // an AccessController backend with the given name. func Register(name string, initFunc InitFunc) error { if _, exists := accessControllers[name]; exists { return fmt.Errorf("name already registered: %s", name) } accessControllers[name] = initFunc return nil } // GetAccessController constructs an AccessController // with the given options using the named backend. func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { if initFunc, exists := accessControllers[name]; exists { return initFunc(options) } return nil, fmt.Errorf("no access controller registered with name: %s", name) } distribution-2.3.0/registry/auth/htpasswd/000077500000000000000000000000001265472114500207105ustar00rootroot00000000000000distribution-2.3.0/registry/auth/htpasswd/access.go000066400000000000000000000052431265472114500225040ustar00rootroot00000000000000// Package htpasswd provides a simple authentication scheme that checks for the // user credential hash in an htpasswd formatted file in a configuration-determined // location. // // This authentication method MUST be used under TLS, as simple token-replay attack is possible. package htpasswd import ( "errors" "fmt" "net/http" "os" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) var ( // ErrInvalidCredential is returned when the auth token does not authenticate correctly. ErrInvalidCredential = errors.New("invalid authorization credential") // ErrAuthenticationFailure returned when authentication failure to be presented to agent. ErrAuthenticationFailure = errors.New("authentication failured") ) type accessController struct { realm string htpasswd *htpasswd } var _ auth.AccessController = &accessController{} func newAccessController(options map[string]interface{}) (auth.AccessController, error) { realm, present := options["realm"] if _, ok := realm.(string); !present || !ok { return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) } path, present := options["path"] if _, ok := path.(string); !present || !ok { return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) } f, err := os.Open(path.(string)) if err != nil { return nil, err } defer f.Close() h, err := newHTPasswd(f) if err != nil { return nil, err } return &accessController{realm: realm.(string), htpasswd: h}, nil } func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { req, err := context.GetRequest(ctx) if err != nil { return nil, err } username, password, ok := req.BasicAuth() if !ok { return nil, &challenge{ realm: ac.realm, err: ErrInvalidCredential, } } if err := ac.htpasswd.authenticateUser(username, password); err != nil { context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) return nil, &challenge{ realm: ac.realm, err: ErrAuthenticationFailure, } } return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil } // challenge implements the auth.Challenge interface. type challenge struct { realm string err error } var _ auth.Challenge = challenge{} // SetHeaders sets the basic challenge header on the response. func (ch challenge) SetHeaders(w http.ResponseWriter) { w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) } func (ch challenge) Error() string { return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) } func init() { auth.Register("htpasswd", auth.InitFunc(newAccessController)) } distribution-2.3.0/registry/auth/htpasswd/access_test.go000066400000000000000000000064001265472114500235370ustar00rootroot00000000000000package htpasswd import ( "io/ioutil" "net/http" "net/http/httptest" "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) func TestBasicAccessController(t *testing.T) { testRealm := "The-Shire" testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} testPasswords := []string{"baggins", "baggins", "새주", "공주님"} testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 DeokMan:공주님` tempFile, err := ioutil.TempFile("", "htpasswd-test") if err != nil { t.Fatal("could not create temporary htpasswd file") } if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { t.Fatal("could not write temporary htpasswd file") } options := map[string]interface{}{ "realm": testRealm, "path": tempFile.Name(), } ctx := context.Background() accessController, err := newAccessController(options) if err != nil { t.Fatal("error creating access controller") } tempFile.Close() var userNumber = 0 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithRequest(ctx, r) authCtx, err := accessController.Authorized(ctx) if err != nil { switch err := err.(type) { case auth.Challenge: err.SetHeaders(w) w.WriteHeader(http.StatusUnauthorized) return default: t.Fatalf("unexpected error authorizing request: %v", err) } } userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) if !ok { t.Fatal("basic accessController did not set auth.user context") } if userInfo.Name != testUsers[userNumber] { t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) } w.WriteHeader(http.StatusNoContent) })) client := &http.Client{ CheckRedirect: nil, } req, _ := http.NewRequest("GET", server.URL, nil) resp, err := client.Do(req) if err != nil { t.Fatalf("unexpected error during GET: %v", err) } defer resp.Body.Close() // Request should not be authorized if resp.StatusCode != http.StatusUnauthorized { t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) } nonbcrypt := map[string]struct{}{ "bilbo": {}, "DeokMan": {}, } for i := 0; i < len(testUsers); i++ { userNumber = i req, err := http.NewRequest("GET", server.URL, nil) if err != nil { t.Fatalf("error allocating new request: %v", err) } req.SetBasicAuth(testUsers[i], testPasswords[i]) resp, err = client.Do(req) if err != nil { t.Fatalf("unexpected error during GET: %v", err) } defer resp.Body.Close() if _, ok := nonbcrypt[testUsers[i]]; ok { // these are not allowed. // Request should be authorized if resp.StatusCode != http.StatusUnauthorized { t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) } } else { // Request should be authorized if resp.StatusCode != http.StatusNoContent { t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) } } } } distribution-2.3.0/registry/auth/htpasswd/htpasswd.go000066400000000000000000000036601265472114500231010ustar00rootroot00000000000000package htpasswd import ( "bufio" "fmt" "io" "strings" "golang.org/x/crypto/bcrypt" ) // htpasswd holds a path to a system .htpasswd file and the machinery to parse // it. Only bcrypt hash entries are supported. type htpasswd struct { entries map[string][]byte // maps username to password byte slice. } // newHTPasswd parses the reader and returns an htpasswd or an error. func newHTPasswd(rd io.Reader) (*htpasswd, error) { entries, err := parseHTPasswd(rd) if err != nil { return nil, err } return &htpasswd{entries: entries}, nil } // AuthenticateUser checks a given user:password credential against the // receiving HTPasswd's file. If the check passes, nil is returned. func (htpasswd *htpasswd) authenticateUser(username string, password string) error { credentials, ok := htpasswd.entries[username] if !ok { // timing attack paranoia bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) return ErrAuthenticationFailure } err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) if err != nil { return ErrAuthenticationFailure } return nil } // parseHTPasswd parses the contents of htpasswd. This will read all the // entries in the file, whether or not they are needed. An error is returned // if an syntax errors are encountered or if the reader fails. func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { entries := map[string][]byte{} scanner := bufio.NewScanner(rd) var line int for scanner.Scan() { line++ // 1-based line numbering t := strings.TrimSpace(scanner.Text()) if len(t) < 1 { continue } // lines that *begin* with a '#' are considered comments if t[0] == '#' { continue } i := strings.Index(t, ":") if i < 0 || i >= len(t) { return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) } entries[t[:i]] = []byte(t[i+1:]) } if err := scanner.Err(); err != nil { return nil, err } return entries, nil } distribution-2.3.0/registry/auth/htpasswd/htpasswd_test.go000066400000000000000000000035031265472114500241340ustar00rootroot00000000000000package htpasswd import ( "fmt" "reflect" "strings" "testing" ) func TestParseHTPasswd(t *testing.T) { for _, tc := range []struct { desc string input string err error entries map[string][]byte }{ { desc: "basic example", input: ` # This is a comment in a basic example. bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 DeokMan:공주님 `, entries: map[string][]byte{ "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), "DeokMan": []byte("공주님"), }, }, { desc: "ensures comments are filtered", input: ` # asdf:asdf `, }, { desc: "ensure midline hash is not comment", input: ` asdf:as#df `, entries: map[string][]byte{ "asdf": []byte("as#df"), }, }, { desc: "ensure midline hash is not comment", input: ` # A valid comment valid:entry asdf `, err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), }, } { entries, err := parseHTPasswd(strings.NewReader(tc.input)) if err != tc.err { if tc.err == nil { t.Fatalf("%s: unexpected error: %v", tc.desc, err) } else { if err.Error() != tc.err.Error() { // use string equality here. t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) } } } if tc.err != nil { continue // don't test output } // allow empty and nil to be equal if tc.entries == nil { tc.entries = map[string][]byte{} } if !reflect.DeepEqual(entries, tc.entries) { t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) } } } distribution-2.3.0/registry/auth/silly/000077500000000000000000000000001265472114500202075ustar00rootroot00000000000000distribution-2.3.0/registry/auth/silly/access.go000066400000000000000000000052771265472114500220120ustar00rootroot00000000000000// Package silly provides a simple authentication scheme that checks for the // existence of an Authorization header and issues access if is present and // non-empty. // // This package is present as an example implementation of a minimal // auth.AccessController and for testing. This is not suitable for any kind of // production security. package silly import ( "fmt" "net/http" "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) // accessController provides a simple implementation of auth.AccessController // that simply checks for a non-empty Authorization header. It is useful for // demonstration and testing. type accessController struct { realm string service string } var _ auth.AccessController = &accessController{} func newAccessController(options map[string]interface{}) (auth.AccessController, error) { realm, present := options["realm"] if _, ok := realm.(string); !present || !ok { return nil, fmt.Errorf(`"realm" must be set for silly access controller`) } service, present := options["service"] if _, ok := service.(string); !present || !ok { return nil, fmt.Errorf(`"service" must be set for silly access controller`) } return &accessController{realm: realm.(string), service: service.(string)}, nil } // Authorized simply checks for the existence of the authorization header, // responding with a bearer challenge if it doesn't exist. func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { req, err := context.GetRequest(ctx) if err != nil { return nil, err } if req.Header.Get("Authorization") == "" { challenge := challenge{ realm: ac.realm, service: ac.service, } if len(accessRecords) > 0 { var scopes []string for _, access := range accessRecords { scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) } challenge.scope = strings.Join(scopes, " ") } return nil, &challenge } return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil } type challenge struct { realm string service string scope string } var _ auth.Challenge = challenge{} // SetHeaders sets a simple bearer challenge on the response. func (ch challenge) SetHeaders(w http.ResponseWriter) { header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) if ch.scope != "" { header = fmt.Sprintf("%s,scope=%q", header, ch.scope) } w.Header().Set("WWW-Authenticate", header) } func (ch challenge) Error() string { return fmt.Sprintf("silly authentication challenge: %#v", ch) } // init registers the silly auth backend. func init() { auth.Register("silly", auth.InitFunc(newAccessController)) } distribution-2.3.0/registry/auth/silly/access_test.go000066400000000000000000000034341265472114500230420ustar00rootroot00000000000000package silly import ( "net/http" "net/http/httptest" "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" ) func TestSillyAccessController(t *testing.T) { ac := &accessController{ realm: "test-realm", service: "test-service", } server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := context.WithValue(nil, "http.request", r) authCtx, err := ac.Authorized(ctx) if err != nil { switch err := err.(type) { case auth.Challenge: err.SetHeaders(w) w.WriteHeader(http.StatusUnauthorized) return default: t.Fatalf("unexpected error authorizing request: %v", err) } } userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) if !ok { t.Fatal("silly accessController did not set auth.user context") } if userInfo.Name != "silly" { t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) } w.WriteHeader(http.StatusNoContent) })) resp, err := http.Get(server.URL) if err != nil { t.Fatalf("unexpected error during GET: %v", err) } defer resp.Body.Close() // Request should not be authorized if resp.StatusCode != http.StatusUnauthorized { t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) } req, err := http.NewRequest("GET", server.URL, nil) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } req.Header.Set("Authorization", "seriously, anything") resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("unexpected error during GET: %v", err) } defer resp.Body.Close() // Request should not be authorized if resp.StatusCode != http.StatusNoContent { t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) } } distribution-2.3.0/registry/auth/token/000077500000000000000000000000001265472114500201735ustar00rootroot00000000000000distribution-2.3.0/registry/auth/token/accesscontroller.go000066400000000000000000000161421265472114500240730ustar00rootroot00000000000000package token import ( "crypto" "crypto/x509" "encoding/pem" "errors" "fmt" "io/ioutil" "net/http" "os" "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" ) // accessSet maps a typed, named resource to // a set of actions requested or authorized. type accessSet map[auth.Resource]actionSet // newAccessSet constructs an accessSet from // a variable number of auth.Access items. func newAccessSet(accessItems ...auth.Access) accessSet { accessSet := make(accessSet, len(accessItems)) for _, access := range accessItems { resource := auth.Resource{ Type: access.Type, Name: access.Name, } set, exists := accessSet[resource] if !exists { set = newActionSet() accessSet[resource] = set } set.add(access.Action) } return accessSet } // contains returns whether or not the given access is in this accessSet. func (s accessSet) contains(access auth.Access) bool { actionSet, ok := s[access.Resource] if ok { return actionSet.contains(access.Action) } return false } // scopeParam returns a collection of scopes which can // be used for a WWW-Authenticate challenge parameter. // See https://tools.ietf.org/html/rfc6750#section-3 func (s accessSet) scopeParam() string { scopes := make([]string, 0, len(s)) for resource, actionSet := range s { actions := strings.Join(actionSet.keys(), ",") scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) } return strings.Join(scopes, " ") } // Errors used and exported by this package. var ( ErrInsufficientScope = errors.New("insufficient scope") ErrTokenRequired = errors.New("authorization token required") ) // authChallenge implements the auth.Challenge interface. type authChallenge struct { err error realm string service string accessSet accessSet } var _ auth.Challenge = authChallenge{} // Error returns the internal error string for this authChallenge. func (ac authChallenge) Error() string { return ac.err.Error() } // Status returns the HTTP Response Status Code for this authChallenge. func (ac authChallenge) Status() int { return http.StatusUnauthorized } // challengeParams constructs the value to be used in // the WWW-Authenticate response challenge header. // See https://tools.ietf.org/html/rfc6750#section-3 func (ac authChallenge) challengeParams() string { str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) if scope := ac.accessSet.scopeParam(); scope != "" { str = fmt.Sprintf("%s,scope=%q", str, scope) } if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { str = fmt.Sprintf("%s,error=%q", str, "invalid_token") } else if ac.err == ErrInsufficientScope { str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") } return str } // SetChallenge sets the WWW-Authenticate value for the response. func (ac authChallenge) SetHeaders(w http.ResponseWriter) { w.Header().Add("WWW-Authenticate", ac.challengeParams()) } // accessController implements the auth.AccessController interface. type accessController struct { realm string issuer string service string rootCerts *x509.CertPool trustedKeys map[string]libtrust.PublicKey } // tokenAccessOptions is a convenience type for handling // options to the contstructor of an accessController. type tokenAccessOptions struct { realm string issuer string service string rootCertBundle string } // checkOptions gathers the necessary options // for an accessController from the given map. func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { var opts tokenAccessOptions keys := []string{"realm", "issuer", "service", "rootcertbundle"} vals := make([]string, 0, len(keys)) for _, key := range keys { val, ok := options[key].(string) if !ok { return opts, fmt.Errorf("token auth requires a valid option string: %q", key) } vals = append(vals, val) } opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] return opts, nil } // newAccessController creates an accessController using the given options. func newAccessController(options map[string]interface{}) (auth.AccessController, error) { config, err := checkOptions(options) if err != nil { return nil, err } fp, err := os.Open(config.rootCertBundle) if err != nil { return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) } defer fp.Close() rawCertBundle, err := ioutil.ReadAll(fp) if err != nil { return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) } var rootCerts []*x509.Certificate pemBlock, rawCertBundle := pem.Decode(rawCertBundle) for pemBlock != nil { cert, err := x509.ParseCertificate(pemBlock.Bytes) if err != nil { return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) } rootCerts = append(rootCerts, cert) pemBlock, rawCertBundle = pem.Decode(rawCertBundle) } if len(rootCerts) == 0 { return nil, errors.New("token auth requires at least one token signing root certificate") } rootPool := x509.NewCertPool() trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) for _, rootCert := range rootCerts { rootPool.AddCert(rootCert) pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) if err != nil { return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) } trustedKeys[pubKey.KeyID()] = pubKey } return &accessController{ realm: config.realm, issuer: config.issuer, service: config.service, rootCerts: rootPool, trustedKeys: trustedKeys, }, nil } // Authorized handles checking whether the given request is authorized // for actions on resources described by the given access items. func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { challenge := &authChallenge{ realm: ac.realm, service: ac.service, accessSet: newAccessSet(accessItems...), } req, err := context.GetRequest(ctx) if err != nil { return nil, err } parts := strings.Split(req.Header.Get("Authorization"), " ") if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { challenge.err = ErrTokenRequired return nil, challenge } rawToken := parts[1] token, err := NewToken(rawToken) if err != nil { challenge.err = err return nil, challenge } verifyOpts := VerifyOptions{ TrustedIssuers: []string{ac.issuer}, AcceptedAudiences: []string{ac.service}, Roots: ac.rootCerts, TrustedKeys: ac.trustedKeys, } if err = token.Verify(verifyOpts); err != nil { challenge.err = err return nil, challenge } accessSet := token.accessSet() for _, access := range accessItems { if !accessSet.contains(access) { challenge.err = ErrInsufficientScope return nil, challenge } } return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil } // init handles registering the token auth backend. func init() { auth.Register("token", auth.InitFunc(newAccessController)) } distribution-2.3.0/registry/auth/token/stringset.go000066400000000000000000000014051265472114500225440ustar00rootroot00000000000000package token // StringSet is a useful type for looking up strings. type stringSet map[string]struct{} // NewStringSet creates a new StringSet with the given strings. func newStringSet(keys ...string) stringSet { ss := make(stringSet, len(keys)) ss.add(keys...) return ss } // Add inserts the given keys into this StringSet. func (ss stringSet) add(keys ...string) { for _, key := range keys { ss[key] = struct{}{} } } // Contains returns whether the given key is in this StringSet. func (ss stringSet) contains(key string) bool { _, ok := ss[key] return ok } // Keys returns a slice of all keys in this StringSet. func (ss stringSet) keys() []string { keys := make([]string, 0, len(ss)) for key := range ss { keys = append(keys, key) } return keys } distribution-2.3.0/registry/auth/token/token.go000066400000000000000000000236461265472114500216550ustar00rootroot00000000000000package token import ( "crypto" "crypto/x509" "encoding/base64" "encoding/json" "errors" "fmt" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/libtrust" "github.com/docker/distribution/registry/auth" ) const ( // TokenSeparator is the value which separates the header, claims, and // signature in the compact serialization of a JSON Web Token. TokenSeparator = "." ) // Errors used by token parsing and verification. var ( ErrMalformedToken = errors.New("malformed token") ErrInvalidToken = errors.New("invalid token") ) // ResourceActions stores allowed actions on a named and typed resource. type ResourceActions struct { Type string `json:"type"` Name string `json:"name"` Actions []string `json:"actions"` } // ClaimSet describes the main section of a JSON Web Token. type ClaimSet struct { // Public claims Issuer string `json:"iss"` Subject string `json:"sub"` Audience string `json:"aud"` Expiration int64 `json:"exp"` NotBefore int64 `json:"nbf"` IssuedAt int64 `json:"iat"` JWTID string `json:"jti"` // Private claims Access []*ResourceActions `json:"access"` } // Header describes the header section of a JSON Web Token. type Header struct { Type string `json:"typ"` SigningAlg string `json:"alg"` KeyID string `json:"kid,omitempty"` X5c []string `json:"x5c,omitempty"` RawJWK json.RawMessage `json:"jwk,omitempty"` } // Token describes a JSON Web Token. type Token struct { Raw string Header *Header Claims *ClaimSet Signature []byte } // VerifyOptions is used to specify // options when verifying a JSON Web Token. type VerifyOptions struct { TrustedIssuers []string AcceptedAudiences []string Roots *x509.CertPool TrustedKeys map[string]libtrust.PublicKey } // NewToken parses the given raw token string // and constructs an unverified JSON Web Token. func NewToken(rawToken string) (*Token, error) { parts := strings.Split(rawToken, TokenSeparator) if len(parts) != 3 { return nil, ErrMalformedToken } var ( rawHeader, rawClaims = parts[0], parts[1] headerJSON, claimsJSON []byte err error ) defer func() { if err != nil { log.Errorf("error while unmarshalling raw token: %s", err) } }() if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { err = fmt.Errorf("unable to decode header: %s", err) return nil, ErrMalformedToken } if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { err = fmt.Errorf("unable to decode claims: %s", err) return nil, ErrMalformedToken } token := new(Token) token.Header = new(Header) token.Claims = new(ClaimSet) token.Raw = strings.Join(parts[:2], TokenSeparator) if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { err = fmt.Errorf("unable to decode signature: %s", err) return nil, ErrMalformedToken } if err = json.Unmarshal(headerJSON, token.Header); err != nil { return nil, ErrMalformedToken } if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { return nil, ErrMalformedToken } return token, nil } // Verify attempts to verify this token using the given options. // Returns a nil error if the token is valid. func (t *Token) Verify(verifyOpts VerifyOptions) error { // Verify that the Issuer claim is a trusted authority. if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) return ErrInvalidToken } // Verify that the Audience claim is allowed. if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { log.Errorf("token intended for another audience: %q", t.Claims.Audience) return ErrInvalidToken } // Verify that the token is currently usable and not expired. currentUnixTime := time.Now().Unix() if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) return ErrInvalidToken } // Verify the token signature. if len(t.Signature) == 0 { log.Error("token has no signature") return ErrInvalidToken } // Verify that the signing key is trusted. signingKey, err := t.VerifySigningKey(verifyOpts) if err != nil { log.Error(err) return ErrInvalidToken } // Finally, verify the signature of the token using the key which signed it. if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { log.Errorf("unable to verify token signature: %s", err) return ErrInvalidToken } return nil } // VerifySigningKey attempts to get the key which was used to sign this token. // The token header should contain either of these 3 fields: // `x5c` - The x509 certificate chain for the signing key. Needs to be // verified. // `jwk` - The JSON Web Key representation of the signing key. // May contain its own `x5c` field which needs to be verified. // `kid` - The unique identifier for the key. This library interprets it // as a libtrust fingerprint. The key itself can be looked up in // the trustedKeys field of the given verify options. // Each of these methods are tried in that order of preference until the // signing key is found or an error is returned. func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { // First attempt to get an x509 certificate chain from the header. var ( x5c = t.Header.X5c rawJWK = t.Header.RawJWK keyID = t.Header.KeyID ) switch { case len(x5c) > 0: signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) case len(rawJWK) > 0: signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) case len(keyID) > 0: signingKey = verifyOpts.TrustedKeys[keyID] if signingKey == nil { err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) } default: err = errors.New("unable to get token signing key") } return } func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { if len(x5c) == 0 { return nil, errors.New("empty x509 certificate chain") } // Ensure the first element is encoded correctly. leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) if err != nil { return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) } // And that it is a valid x509 certificate. leafCert, err := x509.ParseCertificate(leafCertDer) if err != nil { return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) } // The rest of the certificate chain are intermediate certificates. intermediates := x509.NewCertPool() for i := 1; i < len(x5c); i++ { intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) if err != nil { return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) } intermediateCert, err := x509.ParseCertificate(intermediateCertDer) if err != nil { return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) } intermediates.AddCert(intermediateCert) } verifyOpts := x509.VerifyOptions{ Intermediates: intermediates, Roots: roots, KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, } // TODO: this call returns certificate chains which we ignore for now, but // we should check them for revocations if we have the ability later. if _, err = leafCert.Verify(verifyOpts); err != nil { return nil, fmt.Errorf("unable to verify certificate chain: %s", err) } // Get the public key from the leaf certificate. leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) if !ok { return nil, errors.New("unable to get leaf cert public key value") } leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) if err != nil { return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) } return } func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) if err != nil { return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) } // Check to see if the key includes a certificate chain. x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) if !ok { // The JWK should be one of the trusted root keys. if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { return nil, errors.New("untrusted JWK with no certificate chain") } // The JWK is one of the trusted keys. return } // Ensure each item in the chain is of the correct type. x5c := make([]string, len(x5cVal)) for i, val := range x5cVal { certString, ok := val.(string) if !ok || len(certString) == 0 { return nil, errors.New("malformed certificate chain") } x5c[i] = certString } // Ensure that the x509 certificate chain can // be verified up to one of our trusted roots. leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) if err != nil { return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) } // Verify that the public key in the leaf cert *is* the signing key. if pubKey.KeyID() != leafKey.KeyID() { return nil, errors.New("leaf certificate public key ID does not match JWK key ID") } return } // accessSet returns a set of actions available for the resource // actions listed in the `access` section of this token. func (t *Token) accessSet() accessSet { if t.Claims == nil { return nil } accessSet := make(accessSet, len(t.Claims.Access)) for _, resourceActions := range t.Claims.Access { resource := auth.Resource{ Type: resourceActions.Type, Name: resourceActions.Name, } set, exists := accessSet[resource] if !exists { set = newActionSet() accessSet[resource] = set } for _, action := range resourceActions.Actions { set.add(action) } } return accessSet } func (t *Token) compactRaw() string { return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) } distribution-2.3.0/registry/auth/token/token_test.go000066400000000000000000000226101265472114500227020ustar00rootroot00000000000000package token import ( "crypto" "crypto/rand" "crypto/x509" "encoding/base64" "encoding/json" "encoding/pem" "fmt" "io/ioutil" "net/http" "os" "strings" "testing" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/auth" "github.com/docker/libtrust" ) func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { keys := make([]libtrust.PrivateKey, 0, numKeys) for i := 0; i < numKeys; i++ { key, err := libtrust.GenerateECP256PrivateKey() if err != nil { return nil, err } keys = append(keys, key) } return keys, nil } func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { if depth == 0 { // Don't need to build a chain. return rootKey, nil } var ( x5c = make([]string, depth) parentKey = rootKey key libtrust.PrivateKey cert *x509.Certificate err error ) for depth > 0 { if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { return nil, err } if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { return nil, err } depth-- x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) parentKey = key } key.AddExtendedField("x5c", x5c) return key, nil } func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { certs := make([]*x509.Certificate, 0, len(rootKeys)) for _, key := range rootKeys { cert, err := libtrust.GenerateCACert(key, key) if err != nil { return nil, err } certs = append(certs, cert) } return certs, nil } func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) for _, key := range rootKeys { trustedKeys[key.KeyID()] = key.PublicKey() } return trustedKeys } func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { signingKey, err := makeSigningKeyWithChain(rootKey, depth) if err != nil { return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) } rawJWK, err := signingKey.PublicKey().MarshalJSON() if err != nil { return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) } joseHeader := &Header{ Type: "JWT", SigningAlg: "ES256", RawJWK: json.RawMessage(rawJWK), } now := time.Now() randomBytes := make([]byte, 15) if _, err = rand.Read(randomBytes); err != nil { return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) } claimSet := &ClaimSet{ Issuer: issuer, Subject: "foo", Audience: audience, Expiration: now.Add(5 * time.Minute).Unix(), NotBefore: now.Unix(), IssuedAt: now.Unix(), JWTID: base64.URLEncoding.EncodeToString(randomBytes), Access: access, } var joseHeaderBytes, claimSetBytes []byte if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { return nil, fmt.Errorf("unable to marshal jose header: %s", err) } if claimSetBytes, err = json.Marshal(claimSet); err != nil { return nil, fmt.Errorf("unable to marshal claim set: %s", err) } encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) encodedClaimSet := joseBase64UrlEncode(claimSetBytes) encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) var signatureBytes []byte if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { return nil, fmt.Errorf("unable to sign jwt payload: %s", err) } signature := joseBase64UrlEncode(signatureBytes) tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) return NewToken(tokenString) } // This test makes 4 tokens with a varying number of intermediate // certificates ranging from no intermediate chain to a length of 3 // intermediates. func TestTokenVerify(t *testing.T) { var ( numTokens = 4 issuer = "test-issuer" audience = "test-audience" access = []*ResourceActions{ { Type: "repository", Name: "foo/bar", Actions: []string{"pull", "push"}, }, } ) rootKeys, err := makeRootKeys(numTokens) if err != nil { t.Fatal(err) } rootCerts, err := makeRootCerts(rootKeys) if err != nil { t.Fatal(err) } rootPool := x509.NewCertPool() for _, rootCert := range rootCerts { rootPool.AddCert(rootCert) } trustedKeys := makeTrustedKeyMap(rootKeys) tokens := make([]*Token, 0, numTokens) for i := 0; i < numTokens; i++ { token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) if err != nil { t.Fatal(err) } tokens = append(tokens, token) } verifyOps := VerifyOptions{ TrustedIssuers: []string{issuer}, AcceptedAudiences: []string{audience}, Roots: rootPool, TrustedKeys: trustedKeys, } for _, token := range tokens { if err := token.Verify(verifyOps); err != nil { t.Fatal(err) } } } func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { rootCerts, err := makeRootCerts(rootKeys) if err != nil { return "", err } tempFile, err := ioutil.TempFile("", "rootCertBundle") if err != nil { return "", err } defer tempFile.Close() for _, cert := range rootCerts { if err = pem.Encode(tempFile, &pem.Block{ Type: "CERTIFICATE", Bytes: cert.Raw, }); err != nil { os.Remove(tempFile.Name()) return "", err } } return tempFile.Name(), nil } // TestAccessController tests complete integration of the token auth package. // It starts by mocking the options for a token auth accessController which // it creates. It then tries a few mock requests: // - don't supply a token; should error with challenge // - supply an invalid token; should error with challenge // - supply a token with insufficient access; should error with challenge // - supply a valid token; should not error func TestAccessController(t *testing.T) { // Make 2 keys; only the first is to be a trusted root key. rootKeys, err := makeRootKeys(2) if err != nil { t.Fatal(err) } rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) if err != nil { t.Fatal(err) } defer os.Remove(rootCertBundleFilename) realm := "https://auth.example.com/token/" issuer := "test-issuer.example.com" service := "test-service.example.com" options := map[string]interface{}{ "realm": realm, "issuer": issuer, "service": service, "rootcertbundle": rootCertBundleFilename, } accessController, err := newAccessController(options) if err != nil { t.Fatal(err) } // 1. Make a mock http.Request with no token. req, err := http.NewRequest("GET", "http://example.com/foo", nil) if err != nil { t.Fatal(err) } testAccess := auth.Access{ Resource: auth.Resource{ Type: "foo", Name: "bar", }, Action: "baz", } ctx := context.WithValue(nil, "http.request", req) authCtx, err := accessController.Authorized(ctx, testAccess) challenge, ok := err.(auth.Challenge) if !ok { t.Fatal("accessController did not return a challenge") } if challenge.Error() != ErrTokenRequired.Error() { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) } if authCtx != nil { t.Fatalf("expected nil auth context but got %s", authCtx) } // 2. Supply an invalid token. token, err := makeTestToken( issuer, service, []*ResourceActions{{ Type: testAccess.Type, Name: testAccess.Name, Actions: []string{testAccess.Action}, }}, rootKeys[1], 1, // Everything is valid except the key which signed it. ) if err != nil { t.Fatal(err) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) authCtx, err = accessController.Authorized(ctx, testAccess) challenge, ok = err.(auth.Challenge) if !ok { t.Fatal("accessController did not return a challenge") } if challenge.Error() != ErrInvalidToken.Error() { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) } if authCtx != nil { t.Fatalf("expected nil auth context but got %s", authCtx) } // 3. Supply a token with insufficient access. token, err = makeTestToken( issuer, service, []*ResourceActions{}, // No access specified. rootKeys[0], 1, ) if err != nil { t.Fatal(err) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) authCtx, err = accessController.Authorized(ctx, testAccess) challenge, ok = err.(auth.Challenge) if !ok { t.Fatal("accessController did not return a challenge") } if challenge.Error() != ErrInsufficientScope.Error() { t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) } if authCtx != nil { t.Fatalf("expected nil auth context but got %s", authCtx) } // 4. Supply the token we need, or deserve, or whatever. token, err = makeTestToken( issuer, service, []*ResourceActions{{ Type: testAccess.Type, Name: testAccess.Name, Actions: []string{testAccess.Action}, }}, rootKeys[0], 1, ) if err != nil { t.Fatal(err) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) authCtx, err = accessController.Authorized(ctx, testAccess) if err != nil { t.Fatalf("accessController returned unexpected error: %s", err) } userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) if !ok { t.Fatal("token accessController did not set auth.user context") } if userInfo.Name != "foo" { t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) } } distribution-2.3.0/registry/auth/token/util.go000066400000000000000000000027471265472114500215110ustar00rootroot00000000000000package token import ( "encoding/base64" "errors" "strings" ) // joseBase64UrlEncode encodes the given data using the standard base64 url // encoding format but with all trailing '=' characters omitted in accordance // with the jose specification. // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 func joseBase64UrlEncode(b []byte) string { return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } // joseBase64UrlDecode decodes the given string using the standard base64 url // decoder but first adds the appropriate number of trailing '=' characters in // accordance with the jose specification. // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 func joseBase64UrlDecode(s string) ([]byte, error) { switch len(s) % 4 { case 0: case 2: s += "==" case 3: s += "=" default: return nil, errors.New("illegal base64url string") } return base64.URLEncoding.DecodeString(s) } // actionSet is a special type of stringSet. type actionSet struct { stringSet } func newActionSet(actions ...string) actionSet { return actionSet{newStringSet(actions...)} } // Contains calls StringSet.Contains() for // either "*" or the given action string. func (s actionSet) contains(action string) bool { return s.stringSet.contains("*") || s.stringSet.contains(action) } // contains returns true if q is found in ss. func contains(ss []string, q string) bool { for _, s := range ss { if s == q { return true } } return false } distribution-2.3.0/registry/client/000077500000000000000000000000001265472114500173705ustar00rootroot00000000000000distribution-2.3.0/registry/client/auth/000077500000000000000000000000001265472114500203315ustar00rootroot00000000000000distribution-2.3.0/registry/client/auth/api_version.go000066400000000000000000000031701265472114500231770ustar00rootroot00000000000000package auth import ( "net/http" "strings" ) // APIVersion represents a version of an API including its // type and version number. type APIVersion struct { // Type refers to the name of a specific API specification // such as "registry" Type string // Version is the version of the API specification implemented, // This may omit the revision number and only include // the major and minor version, such as "2.0" Version string } // String returns the string formatted API Version func (v APIVersion) String() string { return v.Type + "/" + v.Version } // APIVersions gets the API versions out of an HTTP response using the provided // version header as the key for the HTTP header. func APIVersions(resp *http.Response, versionHeader string) []APIVersion { versions := []APIVersion{} if versionHeader != "" { for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { for _, version := range strings.Fields(supportedVersions) { versions = append(versions, ParseAPIVersion(version)) } } } return versions } // ParseAPIVersion parses an API version string into an APIVersion // Format (Expected, not enforced): // API version string = '/' // API type = [a-z][a-z0-9]* // API version = [0-9]+(\.[0-9]+)? // TODO(dmcgowan): Enforce format, add error condition, remove unknown type func ParseAPIVersion(versionStr string) APIVersion { idx := strings.IndexRune(versionStr, '/') if idx == -1 { return APIVersion{ Type: "unknown", Version: versionStr, } } return APIVersion{ Type: strings.ToLower(versionStr[:idx]), Version: versionStr[idx+1:], } } distribution-2.3.0/registry/client/auth/authchallenge.go000066400000000000000000000126521265472114500234720ustar00rootroot00000000000000package auth import ( "fmt" "net/http" "net/url" "strings" ) // Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. type Challenge struct { // Scheme is the auth-scheme according to RFC 2617 Scheme string // Parameters are the auth-params according to RFC 2617 Parameters map[string]string } // ChallengeManager manages the challenges for endpoints. // The challenges are pulled out of HTTP responses. Only // responses which expect challenges should be added to // the manager, since a non-unauthorized request will be // viewed as not requiring challenges. type ChallengeManager interface { // GetChallenges returns the challenges for the given // endpoint URL. GetChallenges(endpoint string) ([]Challenge, error) // AddResponse adds the response to the challenge // manager. The challenges will be parsed out of // the WWW-Authenicate headers and added to the // URL which was produced the response. If the // response was authorized, any challenges for the // endpoint will be cleared. AddResponse(resp *http.Response) error } // NewSimpleChallengeManager returns an instance of // ChallengeManger which only maps endpoints to challenges // based on the responses which have been added the // manager. The simple manager will make no attempt to // perform requests on the endpoints or cache the responses // to a backend. func NewSimpleChallengeManager() ChallengeManager { return simpleChallengeManager{} } type simpleChallengeManager map[string][]Challenge func (m simpleChallengeManager) GetChallenges(endpoint string) ([]Challenge, error) { challenges := m[endpoint] return challenges, nil } func (m simpleChallengeManager) AddResponse(resp *http.Response) error { challenges := ResponseChallenges(resp) if resp.Request == nil { return fmt.Errorf("missing request reference") } urlCopy := url.URL{ Path: resp.Request.URL.Path, Host: resp.Request.URL.Host, Scheme: resp.Request.URL.Scheme, } m[urlCopy.String()] = challenges return nil } // Octet types from RFC 2616. type octetType byte var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = // CHAR = // CTL = // CR = // LF = // SP = // HT = // <"> = // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1* // qdtext = > for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } // ResponseChallenges returns a list of authorization challenges // for the given http Response. Challenges are only checked if // the response status code was a 401. func ResponseChallenges(resp *http.Response) []Challenge { if resp.StatusCode == http.StatusUnauthorized { // Parse the WWW-Authenticate Header and store the challenges // on this endpoint object. return parseAuthHeader(resp.Header) } return nil } func parseAuthHeader(header http.Header) []Challenge { challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) } } return challenges } func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } distribution-2.3.0/registry/client/auth/authchallenge_test.go000066400000000000000000000024041265472114500245230ustar00rootroot00000000000000package auth import ( "net/http" "testing" ) func TestAuthChallengeParse(t *testing.T) { header := http.Header{} header.Add("WWW-Authenticate", `Bearer realm="https://auth.example.com/token",service="registry.example.com",other=fun,slashed="he\"\l\lo"`) challenges := parseAuthHeader(header) if len(challenges) != 1 { t.Fatalf("Unexpected number of auth challenges: %d, expected 1", len(challenges)) } challenge := challenges[0] if expected := "bearer"; challenge.Scheme != expected { t.Fatalf("Unexpected scheme: %s, expected: %s", challenge.Scheme, expected) } if expected := "https://auth.example.com/token"; challenge.Parameters["realm"] != expected { t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["realm"], expected) } if expected := "registry.example.com"; challenge.Parameters["service"] != expected { t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["service"], expected) } if expected := "fun"; challenge.Parameters["other"] != expected { t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["other"], expected) } if expected := "he\"llo"; challenge.Parameters["slashed"] != expected { t.Fatalf("Unexpected param: %s, expected: %s", challenge.Parameters["slashed"], expected) } } distribution-2.3.0/registry/client/auth/session.go000066400000000000000000000210511265472114500223420ustar00rootroot00000000000000package auth import ( "encoding/json" "errors" "fmt" "net/http" "net/url" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/transport" ) // AuthenticationHandler is an interface for authorizing a request from // params from a "WWW-Authenicate" header for a single scheme. type AuthenticationHandler interface { // Scheme returns the scheme as expected from the "WWW-Authenicate" header. Scheme() string // AuthorizeRequest adds the authorization header to a request (if needed) // using the parameters from "WWW-Authenticate" method. The parameters // values depend on the scheme. AuthorizeRequest(req *http.Request, params map[string]string) error } // CredentialStore is an interface for getting credentials for // a given URL type CredentialStore interface { // Basic returns basic auth for the given URL Basic(*url.URL) (string, string) } // NewAuthorizer creates an authorizer which can handle multiple authentication // schemes. The handlers are tried in order, the higher priority authentication // methods should be first. The challengeMap holds a list of challenges for // a given root API endpoint (for example "https://registry-1.docker.io/v2/"). func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { return &endpointAuthorizer{ challenges: manager, handlers: handlers, } } type endpointAuthorizer struct { challenges ChallengeManager handlers []AuthenticationHandler transport http.RoundTripper } func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { v2Root := strings.Index(req.URL.Path, "/v2/") if v2Root == -1 { return nil } ping := url.URL{ Host: req.URL.Host, Scheme: req.URL.Scheme, Path: req.URL.Path[:v2Root+4], } pingEndpoint := ping.String() challenges, err := ea.challenges.GetChallenges(pingEndpoint) if err != nil { return err } if len(challenges) > 0 { for _, handler := range ea.handlers { for _, challenge := range challenges { if challenge.Scheme != handler.Scheme() { continue } if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { return err } } } } return nil } // This is the minimum duration a token can last (in seconds). // A token must not live less than 60 seconds because older versions // of the Docker client didn't read their expiration from the token // response and assumed 60 seconds. So to remain compatible with // those implementations, a token must live at least this long. const minimumTokenLifetimeSeconds = 60 // Private interface for time used by this package to enable tests to provide their own implementation. type clock interface { Now() time.Time } type tokenHandler struct { header http.Header creds CredentialStore scope tokenScope transport http.RoundTripper clock clock tokenLock sync.Mutex tokenCache string tokenExpiration time.Time additionalScopes map[string]struct{} } // tokenScope represents the scope at which a token will be requested. // This represents a specific action on a registry resource. type tokenScope struct { Resource string Scope string Actions []string } func (ts tokenScope) String() string { return fmt.Sprintf("%s:%s:%s", ts.Resource, ts.Scope, strings.Join(ts.Actions, ",")) } // An implementation of clock for providing real time data. type realClock struct{} // Now implements clock func (realClock) Now() time.Time { return time.Now() } // NewTokenHandler creates a new AuthenicationHandler which supports // fetching tokens from a remote token server. func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { return newTokenHandler(transport, creds, realClock{}, scope, actions...) } // newTokenHandler exposes the option to provide a clock to manipulate time in unit testing. func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock, scope string, actions ...string) AuthenticationHandler { return &tokenHandler{ transport: transport, creds: creds, clock: c, scope: tokenScope{ Resource: "repository", Scope: scope, Actions: actions, }, additionalScopes: map[string]struct{}{}, } } func (th *tokenHandler) client() *http.Client { return &http.Client{ Transport: th.transport, Timeout: 15 * time.Second, } } func (th *tokenHandler) Scheme() string { return "bearer" } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { var additionalScopes []string if fromParam := req.URL.Query().Get("from"); fromParam != "" { additionalScopes = append(additionalScopes, tokenScope{ Resource: "repository", Scope: fromParam, Actions: []string{"pull"}, }.String()) } if err := th.refreshToken(params, additionalScopes...); err != nil { return err } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.tokenCache)) return nil } func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() var addedScopes bool for _, scope := range additionalScopes { if _, ok := th.additionalScopes[scope]; !ok { th.additionalScopes[scope] = struct{}{} addedScopes = true } } now := th.clock.Now() if now.After(th.tokenExpiration) || addedScopes { tr, err := th.fetchToken(params) if err != nil { return err } th.tokenCache = tr.Token th.tokenExpiration = tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second) } return nil } type tokenResponse struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` } func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenResponse, err error) { //log.Debugf("Getting bearer token with %s for %s", challenge.Parameters, ta.auth.Username) realm, ok := params["realm"] if !ok { return nil, errors.New("no realm specified for token auth challenge") } // TODO(dmcgowan): Handle empty scheme realmURL, err := url.Parse(realm) if err != nil { return nil, fmt.Errorf("invalid token auth challenge realm: %s", err) } req, err := http.NewRequest("GET", realmURL.String(), nil) if err != nil { return nil, err } reqParams := req.URL.Query() service := params["service"] scope := th.scope.String() if service != "" { reqParams.Add("service", service) } for _, scopeField := range strings.Fields(scope) { reqParams.Add("scope", scopeField) } for scope := range th.additionalScopes { reqParams.Add("scope", scope) } if th.creds != nil { username, password := th.creds.Basic(realmURL) if username != "" && password != "" { reqParams.Add("account", username) req.SetBasicAuth(username, password) } } req.URL.RawQuery = reqParams.Encode() resp, err := th.client().Do(req) if err != nil { return nil, err } defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { err := client.HandleErrorResponse(resp) return nil, err } decoder := json.NewDecoder(resp.Body) tr := new(tokenResponse) if err = decoder.Decode(tr); err != nil { return nil, fmt.Errorf("unable to decode token response: %s", err) } // `access_token` is equivalent to `token` and if both are specified // the choice is undefined. Canonicalize `access_token` by sticking // things in `token`. if tr.AccessToken != "" { tr.Token = tr.AccessToken } if tr.Token == "" { return nil, errors.New("authorization server did not include a token in the response") } if tr.ExpiresIn < minimumTokenLifetimeSeconds { logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) // The default/minimum lifetime. tr.ExpiresIn = minimumTokenLifetimeSeconds } if tr.IssuedAt.IsZero() { // issued_at is optional in the token response. tr.IssuedAt = th.clock.Now() } return tr, nil } type basicHandler struct { creds CredentialStore } // NewBasicHandler creaters a new authentiation handler which adds // basic authentication credentials to a request. func NewBasicHandler(creds CredentialStore) AuthenticationHandler { return &basicHandler{ creds: creds, } } func (*basicHandler) Scheme() string { return "basic" } func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { if bh.creds != nil { username, password := bh.creds.Basic(req.URL) if username != "" && password != "" { req.SetBasicAuth(username, password) return nil } } return errors.New("no basic auth credentials") } distribution-2.3.0/registry/client/auth/session_test.go000066400000000000000000000425261265472114500234130ustar00rootroot00000000000000package auth import ( "encoding/base64" "fmt" "net/http" "net/http/httptest" "net/url" "testing" "time" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/testutil" ) // An implementation of clock for providing fake time data. type fakeClock struct { current time.Time } // Now implements clock func (fc *fakeClock) Now() time.Time { return fc.current } func testServer(rrm testutil.RequestResponseMap) (string, func()) { h := testutil.NewHandler(rrm) s := httptest.NewServer(h) return s.URL, s.Close } type testAuthenticationWrapper struct { headers http.Header authCheck func(string) bool next http.Handler } func (w *testAuthenticationWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { auth := r.Header.Get("Authorization") if auth == "" || !w.authCheck(auth) { h := rw.Header() for k, values := range w.headers { h[k] = values } rw.WriteHeader(http.StatusUnauthorized) return } w.next.ServeHTTP(rw, r) } func testServerWithAuth(rrm testutil.RequestResponseMap, authenticate string, authCheck func(string) bool) (string, func()) { h := testutil.NewHandler(rrm) wrapper := &testAuthenticationWrapper{ headers: http.Header(map[string][]string{ "X-API-Version": {"registry/2.0"}, "X-Multi-API-Version": {"registry/2.0", "registry/2.1", "trust/1.0"}, "WWW-Authenticate": {authenticate}, }), authCheck: authCheck, next: h, } s := httptest.NewServer(wrapper) return s.URL, s.Close } // ping pings the provided endpoint to determine its required authorization challenges. // If a version header is provided, the versions will be returned. func ping(manager ChallengeManager, endpoint, versionHeader string) ([]APIVersion, error) { resp, err := http.Get(endpoint) if err != nil { return nil, err } defer resp.Body.Close() if err := manager.AddResponse(resp); err != nil { return nil, err } return APIVersions(resp, versionHeader), err } type testCredentialStore struct { username string password string } func (tcs *testCredentialStore) Basic(*url.URL) (string, string) { return tcs.username, tcs.password } func TestEndpointAuthorizeToken(t *testing.T) { service := "localhost.localdomain" repo1 := "some/registry" repo2 := "other/registry" scope1 := fmt.Sprintf("repository:%s:pull,push", repo1) scope2 := fmt.Sprintf("repository:%s:pull,push", repo2) tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope1), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"token":"statictoken"}`), }, }, { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?scope=%s&service=%s", url.QueryEscape(scope2), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"token":"badtoken"}`), }, }, }) te, tc := testServer(tokenMap) defer tc() m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, }) authenicate := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) validCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate, validCheck) defer c() challengeManager1 := NewSimpleChallengeManager() versions, err := ping(challengeManager1, e+"/v2/", "x-api-version") if err != nil { t.Fatal(err) } if len(versions) != 1 { t.Fatalf("Unexpected version count: %d, expected 1", len(versions)) } if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) } transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager1, NewTokenHandler(nil, nil, repo1, "pull", "push"))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } badCheck := func(a string) bool { return a == "Bearer statictoken" } e2, c2 := testServerWithAuth(m, authenicate, badCheck) defer c2() challengeManager2 := NewSimpleChallengeManager() versions, err = ping(challengeManager2, e+"/v2/", "x-multi-api-version") if err != nil { t.Fatal(err) } if len(versions) != 3 { t.Fatalf("Unexpected version count: %d, expected 3", len(versions)) } if check := (APIVersion{Type: "registry", Version: "2.0"}); versions[0] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[0], check) } if check := (APIVersion{Type: "registry", Version: "2.1"}); versions[1] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[1], check) } if check := (APIVersion{Type: "trust", Version: "1.0"}); versions[2] != check { t.Fatalf("Unexpected api version: %#v, expected %#v", versions[2], check) } transport2 := transport.NewTransport(nil, NewAuthorizer(challengeManager2, NewTokenHandler(nil, nil, repo2, "pull", "push"))) client2 := &http.Client{Transport: transport2} req, _ = http.NewRequest("GET", e2+"/v2/hello", nil) resp, err = client2.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusUnauthorized { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusUnauthorized) } } func basicAuth(username, password string) string { auth := username + ":" + password return base64.StdEncoding.EncodeToString([]byte(auth)) } func TestEndpointAuthorizeTokenBasic(t *testing.T) { service := "localhost.localdomain" repo := "some/fun/registry" scope := fmt.Sprintf("repository:%s:pull,push", repo) username := "tokenuser" password := "superSecretPa$$word" tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"access_token":"statictoken"}`), }, }, }) authenicate1 := fmt.Sprintf("Basic realm=localhost") basicCheck := func(a string) bool { return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) } te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) defer tc() m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, }) authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) bearerCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate2, bearerCheck) defer c() creds := &testCredentialStore{ username: username, password: password, } challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewTokenHandler(nil, creds, repo, "pull", "push"), NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } } func TestEndpointAuthorizeTokenBasicWithExpiresIn(t *testing.T) { service := "localhost.localdomain" repo := "some/fun/registry" scope := fmt.Sprintf("repository:%s:pull,push", repo) username := "tokenuser" password := "superSecretPa$$word" tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"token":"statictoken", "expires_in": 3001}`), }, }, { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"access_token":"statictoken", "expires_in": 3001}`), }, }, }) authenicate1 := fmt.Sprintf("Basic realm=localhost") tokenExchanges := 0 basicCheck := func(a string) bool { tokenExchanges = tokenExchanges + 1 return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) } te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) defer tc() m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, }) authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) bearerCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate2, bearerCheck) defer c() creds := &testCredentialStore{ username: username, password: password, } challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } clock := &fakeClock{current: time.Now()} transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. timeIncrement := 1000 * time.Second for i := 0; i < 4; i++ { req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } if tokenExchanges != 1 { t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) } clock.current = clock.current.Add(timeIncrement) } // After we've exceeded the expiration, we should see a second token exchange. req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } if tokenExchanges != 2 { t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) } } func TestEndpointAuthorizeTokenBasicWithExpiresInAndIssuedAt(t *testing.T) { service := "localhost.localdomain" repo := "some/fun/registry" scope := fmt.Sprintf("repository:%s:pull,push", repo) username := "tokenuser" password := "superSecretPa$$word" // This test sets things up such that the token was issued one increment // earlier than its sibling in TestEndpointAuthorizeTokenBasicWithExpiresIn. // This will mean that the token expires after 3 increments instead of 4. clock := &fakeClock{current: time.Now()} timeIncrement := 1000 * time.Second firstIssuedAt := clock.Now() clock.current = clock.current.Add(timeIncrement) secondIssuedAt := clock.current.Add(2 * timeIncrement) tokenMap := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"token":"statictoken", "issued_at": "` + firstIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), }, }, { Request: testutil.Request{ Method: "GET", Route: fmt.Sprintf("/token?account=%s&scope=%s&service=%s", username, url.QueryEscape(scope), service), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: []byte(`{"access_token":"statictoken", "issued_at": "` + secondIssuedAt.Format(time.RFC3339Nano) + `", "expires_in": 3001}`), }, }, }) authenicate1 := fmt.Sprintf("Basic realm=localhost") tokenExchanges := 0 basicCheck := func(a string) bool { tokenExchanges = tokenExchanges + 1 return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) } te, tc := testServerWithAuth(tokenMap, authenicate1, basicCheck) defer tc() m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, }) authenicate2 := fmt.Sprintf("Bearer realm=%q,service=%q", te+"/token", service) bearerCheck := func(a string) bool { return a == "Bearer statictoken" } e, c := testServerWithAuth(m, authenicate2, bearerCheck) defer c() creds := &testCredentialStore{ username: username, password: password, } challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, newTokenHandler(nil, creds, clock, repo, "pull", "push"), NewBasicHandler(creds))) client := &http.Client{Transport: transport1} // First call should result in a token exchange // Subsequent calls should recycle the token from the first request, until the expiration has lapsed. // We shaved one increment off of the equivalent logic in TestEndpointAuthorizeTokenBasicWithExpiresIn // so this loop should have one fewer iteration. for i := 0; i < 3; i++ { req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } if tokenExchanges != 1 { t.Fatalf("Unexpected number of token exchanges, want: 1, got %d (iteration: %d)", tokenExchanges, i) } clock.current = clock.current.Add(timeIncrement) } // After we've exceeded the expiration, we should see a second token exchange. req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } if tokenExchanges != 2 { t.Fatalf("Unexpected number of token exchanges, want: 2, got %d", tokenExchanges) } } func TestEndpointAuthorizeBasic(t *testing.T) { m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/hello", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, }, }, }) username := "user1" password := "funSecretPa$$word" authenicate := fmt.Sprintf("Basic realm=localhost") validCheck := func(a string) bool { return a == fmt.Sprintf("Basic %s", basicAuth(username, password)) } e, c := testServerWithAuth(m, authenicate, validCheck) defer c() creds := &testCredentialStore{ username: username, password: password, } challengeManager := NewSimpleChallengeManager() _, err := ping(challengeManager, e+"/v2/", "") if err != nil { t.Fatal(err) } transport1 := transport.NewTransport(nil, NewAuthorizer(challengeManager, NewBasicHandler(creds))) client := &http.Client{Transport: transport1} req, _ := http.NewRequest("GET", e+"/v2/hello", nil) resp, err := client.Do(req) if err != nil { t.Fatalf("Error sending get request: %s", err) } if resp.StatusCode != http.StatusAccepted { t.Fatalf("Unexpected status code: %d, expected %d", resp.StatusCode, http.StatusAccepted) } } distribution-2.3.0/registry/client/blob_writer.go000066400000000000000000000077121265472114500222400ustar00rootroot00000000000000package client import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "os" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" ) type httpBlobUpload struct { statter distribution.BlobStatter client *http.Client uuid string startedAt time.Time location string // always the last value of the location header. offset int64 closed bool } func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { panic("Not implemented") } func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) if err != nil { return 0, err } defer req.Body.Close() resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int64 if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) if err != nil { return 0, err } req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) req.Header.Set("Content-Type", "application/octet-stream") resp, err := hbu.client.Do(req) if err != nil { return 0, err } if !SuccessStatus(resp.StatusCode) { return 0, hbu.handleErrorResponse(resp) } hbu.uuid = resp.Header.Get("Docker-Upload-UUID") hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) if err != nil { return 0, err } rng := resp.Header.Get("Range") var start, end int if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { return 0, err } else if n != 2 || end < start { return 0, fmt.Errorf("bad range format: %s", rng) } return (end - start + 1), nil } func (hbu *httpBlobUpload) Seek(offset int64, whence int) (int64, error) { newOffset := hbu.offset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: newOffset += int64(offset) case os.SEEK_SET: newOffset = int64(offset) } hbu.offset = newOffset return hbu.offset, nil } func (hbu *httpBlobUpload) ID() string { return hbu.uuid } func (hbu *httpBlobUpload) StartedAt() time.Time { return hbu.startedAt } func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { // TODO(dmcgowan): Check if already finished, if so just fetch req, err := http.NewRequest("PUT", hbu.location, nil) if err != nil { return distribution.Descriptor{}, err } values := req.URL.Query() values.Set("digest", desc.Digest.String()) req.URL.RawQuery = values.Encode() resp, err := hbu.client.Do(req) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if !SuccessStatus(resp.StatusCode) { return distribution.Descriptor{}, hbu.handleErrorResponse(resp) } return hbu.statter.Stat(ctx, desc.Digest) } func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { req, err := http.NewRequest("DELETE", hbu.location, nil) if err != nil { return err } resp, err := hbu.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { return nil } return hbu.handleErrorResponse(resp) } func (hbu *httpBlobUpload) Close() error { hbu.closed = true return nil } distribution-2.3.0/registry/client/blob_writer_test.go000066400000000000000000000125761265472114500233030ustar00rootroot00000000000000package client import ( "bytes" "fmt" "net/http" "testing" "github.com/docker/distribution" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/testutil" ) // Test implements distribution.BlobWriter var _ distribution.BlobWriter = &httpBlobUpload{} func TestUploadReadFrom(t *testing.T) { _, b := newRandomBlob(64) repo := "test/upload/readfrom" locationPath := fmt.Sprintf("/v2/%s/uploads/testid", repo) m := testutil.RequestResponseMap([]testutil.RequestResponseMapping{ { Request: testutil.Request{ Method: "GET", Route: "/v2/", }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Docker-Distribution-API-Version": {"registry/2.0"}, }), }, }, // Test Valid case { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, "Location": {locationPath}, "Range": {"0-63"}, }), }, }, // Test invalid range { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Docker-Upload-UUID": {"46603072-7a1b-4b41-98f9-fd8a7da89f9b"}, "Location": {locationPath}, "Range": {""}, }), }, }, // Test 404 { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusNotFound, }, }, // Test 400 valid json { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusBadRequest, Body: []byte(` { "errors": [ { "code": "BLOB_UPLOAD_INVALID", "message": "blob upload invalid", "detail": "more detail" } ] } `), }, }, // Test 400 invalid json { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusBadRequest, Body: []byte("something bad happened"), }, }, // Test 500 { Request: testutil.Request{ Method: "PATCH", Route: locationPath, Body: b, }, Response: testutil.Response{ StatusCode: http.StatusInternalServerError, }, }, }) e, c := testServer(m) defer c() blobUpload := &httpBlobUpload{ client: &http.Client{}, } // Valid case blobUpload.location = e + locationPath n, err := blobUpload.ReadFrom(bytes.NewReader(b)) if err != nil { t.Fatalf("Error calling ReadFrom: %s", err) } if n != 64 { t.Fatalf("Wrong length returned from ReadFrom: %d, expected 64", n) } // Bad range blobUpload.location = e + locationPath _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when bad range received") } // 404 blobUpload.location = e + locationPath _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } if err != distribution.ErrBlobUploadUnknown { t.Fatalf("Wrong error thrown: %s, expected %s", err, distribution.ErrBlobUploadUnknown) } // 400 valid json blobUpload.location = e + locationPath _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } if uploadErr, ok := err.(errcode.Errors); !ok { t.Fatalf("Wrong error type %T: %s", err, err) } else if len(uploadErr) != 1 { t.Fatalf("Unexpected number of errors: %d, expected 1", len(uploadErr)) } else { v2Err, ok := uploadErr[0].(errcode.Error) if !ok { t.Fatalf("Not an 'Error' type: %#v", uploadErr[0]) } if v2Err.Code != v2.ErrorCodeBlobUploadInvalid { t.Fatalf("Unexpected error code: %s, expected %d", v2Err.Code.String(), v2.ErrorCodeBlobUploadInvalid) } if expected := "blob upload invalid"; v2Err.Message != expected { t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Message, expected) } if expected := "more detail"; v2Err.Detail.(string) != expected { t.Fatalf("Unexpected error message: %q, expected %q", v2Err.Detail.(string), expected) } } // 400 invalid json blobUpload.location = e + locationPath _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } if uploadErr, ok := err.(*UnexpectedHTTPResponseError); !ok { t.Fatalf("Wrong error type %T: %s", err, err) } else { respStr := string(uploadErr.Response) if expected := "something bad happened"; respStr != expected { t.Fatalf("Unexpected response string: %s, expected: %s", respStr, expected) } } // 500 blobUpload.location = e + locationPath _, err = blobUpload.ReadFrom(bytes.NewReader(b)) if err == nil { t.Fatalf("Expected error when not found") } if uploadErr, ok := err.(*UnexpectedHTTPStatusError); !ok { t.Fatalf("Wrong error type %T: %s", err, err) } else if expected := "500 " + http.StatusText(http.StatusInternalServerError); uploadErr.Status != expected { t.Fatalf("Unexpected response status: %s, expected %s", uploadErr.Status, expected) } } distribution-2.3.0/registry/client/errors.go000066400000000000000000000046551265472114500212450ustar00rootroot00000000000000package client import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "github.com/docker/distribution/registry/api/errcode" ) // UnexpectedHTTPStatusError is returned when an unexpected HTTP status is // returned when making a registry api call. type UnexpectedHTTPStatusError struct { Status string } func (e *UnexpectedHTTPStatusError) Error() string { return fmt.Sprintf("Received unexpected HTTP status: %s", e.Status) } // UnexpectedHTTPResponseError is returned when an expected HTTP status code // is returned, but the content was unexpected and failed to be parsed. type UnexpectedHTTPResponseError struct { ParseErr error Response []byte } func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } // For backward compatibility, handle irregularly formatted // messages that contain a "details" field. var detailsErr struct { Details string `json:"details"` } err = json.Unmarshal(body, &detailsErr) if err == nil && detailsErr.Details != "" { if statusCode == http.StatusUnauthorized { return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) } return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) } if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, Response: body, } } return errors } // HandleErrorResponse returns error parsed from HTTP response for an // unsuccessful HTTP response code (in the range 400 - 499 inclusive). An // UnexpectedHTTPStatusError returned for response code outside of expected // range. func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } // SuccessStatus returns true if the argument is a successful HTTP response // code (in the range 200 - 399 inclusive). func SuccessStatus(status int) bool { return status >= 200 && status <= 399 } distribution-2.3.0/registry/client/errors_test.go000066400000000000000000000052271265472114500223000ustar00rootroot00000000000000package client import ( "bytes" "io" "net/http" "strings" "testing" ) type nopCloser struct { io.Reader } func (nopCloser) Close() error { return nil } func TestHandleErrorResponse401ValidBody(t *testing.T) { json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" response := &http.Response{ Status: "401 Unauthorized", StatusCode: 401, Body: nopCloser{bytes.NewBufferString(json)}, } err := HandleErrorResponse(response) expectedMsg := "unauthorized: action requires authentication" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } } func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { json := "{invalid json}" response := &http.Response{ Status: "401 Unauthorized", StatusCode: 401, Body: nopCloser{bytes.NewBufferString(json)}, } err := HandleErrorResponse(response) expectedMsg := "unauthorized: authentication required" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } } func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" response := &http.Response{ Status: "400 Bad Request", StatusCode: 400, Body: nopCloser{bytes.NewBufferString(json)}, } err := HandleErrorResponse(response) expectedMsg := "digest invalid: provided digest does not match" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } } func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { json := "{invalid json}" response := &http.Response{ Status: "404 Not Found", StatusCode: 404, Body: nopCloser{bytes.NewBufferString(json)}, } err := HandleErrorResponse(response) expectedMsg := "Error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } } func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { response := &http.Response{ Status: "501 Not Implemented", StatusCode: 501, Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, } err := HandleErrorResponse(response) expectedMsg := "Received unexpected HTTP status: 501 Not Implemented" if !strings.Contains(err.Error(), expectedMsg) { t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) } } distribution-2.3.0/registry/client/repository.go000066400000000000000000000447561265472114500221560ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/storage/cache" "github.com/docker/distribution/registry/storage/cache/memory" ) // Registry provides an interface for calling Repositories, which returns a catalog of repositories. type Registry interface { Repositories(ctx context.Context, repos []string, last string) (n int, err error) } // NewRegistry creates a registry namespace which can be used to get a listing of repositories func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, Timeout: 1 * time.Minute, } return ®istry{ client: client, ub: ub, context: ctx, }, nil } type registry struct { client *http.Client ub *v2.URLBuilder context context.Context } // Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size // of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there // are no more entries func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { var numFilled int var returnErr error values := buildCatalogValues(len(entries), last) u, err := r.ub.BuildCatalogURL(values) if err != nil { return 0, err } resp, err := r.client.Get(u) if err != nil { return 0, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { var ctlg struct { Repositories []string `json:"repositories"` } decoder := json.NewDecoder(resp.Body) if err := decoder.Decode(&ctlg); err != nil { return 0, err } for cnt := range ctlg.Repositories { entries[cnt] = ctlg.Repositories[cnt] } numFilled = len(ctlg.Repositories) link := resp.Header.Get("Link") if link == "" { returnErr = io.EOF } } else { return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err } client := &http.Client{ Transport: transport, // TODO(dmcgowan): create cookie jar } return &repository{ client: client, ub: ub, name: name, context: ctx, }, nil } type repository struct { client *http.Client ub *v2.URLBuilder context context.Context name reference.Named } func (r *repository) Name() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ name: r.name, ub: r.ub, client: r.client, } return &blobs{ name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), } } func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } func (r *repository) Tags(ctx context.Context) distribution.TagService { return &tags{ client: r.client, ub: r.ub, context: r.context, name: r.Name(), } } // tags implements remote tagging operations. type tags struct { client *http.Client ub *v2.URLBuilder context context.Context name reference.Named } // All returns all tags func (t *tags) All(ctx context.Context) ([]string, error) { var tags []string u, err := t.ub.BuildTagsURL(t.name) if err != nil { return tags, err } resp, err := t.client.Get(u) if err != nil { return tags, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { return tags, err } tagsResponse := struct { Tags []string `json:"tags"` }{} if err := json.Unmarshal(b, &tagsResponse); err != nil { return tags, err } tags = tagsResponse.Tags return tags, nil } return tags, HandleErrorResponse(resp) } func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { desc := distribution.Descriptor{} headers := response.Header ctHeader := headers.Get("Content-Type") if ctHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") } desc.MediaType = ctHeader digestHeader := headers.Get("Docker-Content-Digest") if digestHeader == "" { bytes, err := ioutil.ReadAll(response.Body) if err != nil { return distribution.Descriptor{}, err } _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) if err != nil { return distribution.Descriptor{}, err } return desc, nil } dgst, err := digest.ParseDigest(digestHeader) if err != nil { return distribution.Descriptor{}, err } desc.Digest = dgst lengthHeader := headers.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, err } desc.Size = length return desc, nil } // Get issues a HEAD request for a Manifest against its named endpoint in order // to construct a descriptor for the tag. If the registry doesn't support HEADing // a manifest, fallback to GET. func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { ref, err := reference.WithTag(t.name, tag) if err != nil { return distribution.Descriptor{}, err } u, err := t.ub.BuildManifestURL(ref) if err != nil { return distribution.Descriptor{}, err } var attempts int resp, err := t.client.Head(u) check: if err != nil { return distribution.Descriptor{}, err } switch { case resp.StatusCode >= 200 && resp.StatusCode < 400: return descriptorFromResponse(resp) case resp.StatusCode == http.StatusMethodNotAllowed: resp, err = t.client.Get(u) attempts++ if attempts > 1 { return distribution.Descriptor{}, err } goto check default: return distribution.Descriptor{}, HandleErrorResponse(resp) } } func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { panic("not implemented") } func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { panic("not implemented") } func (t *tags) Untag(ctx context.Context, tag string) error { panic("not implemented") } type manifests struct { name reference.Named ub *v2.URLBuilder client *http.Client etags map[string]string } func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { ref, err := reference.WithDigest(ms.name, dgst) if err != nil { return false, err } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } resp, err := ms.client.Head(u) if err != nil { return false, err } if SuccessStatus(resp.StatusCode) { return true, nil } else if resp.StatusCode == http.StatusNotFound { return false, nil } return false, HandleErrorResponse(resp) } // AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest // and ErrManifestNotModified error will be returned. etag is automatically // quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { return etagOption{tag, etag} } type etagOption struct{ tag, etag string } func (o etagOption) Apply(ms distribution.ManifestService) error { if ms, ok := ms.(*manifests); ok { ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) return nil } return fmt.Errorf("etag options is a client-only option") } func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { var ( digestOrTag string ref reference.Named err error ) for _, option := range options { if opt, ok := option.(withTagOption); ok { digestOrTag = opt.tag ref, err = reference.WithTag(ms.name, opt.tag) if err != nil { return nil, err } } else { err := option.Apply(ms) if err != nil { return nil, err } } } if digestOrTag == "" { digestOrTag = dgst.String() ref, err = reference.WithDigest(ms.name, dgst) if err != nil { return nil, err } } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } for _, t := range distribution.ManifestMediaTypes() { req.Header.Add("Accept", t) } if _, ok := ms.etags[digestOrTag]; ok { req.Header.Set("If-None-Match", ms.etags[digestOrTag]) } resp, err := ms.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { mt := resp.Header.Get("Content-Type") body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } m, _, err := distribution.UnmarshalManifest(mt, body) if err != nil { return nil, err } return m, nil } return nil, HandleErrorResponse(resp) } // WithTag allows a tag to be passed into Put which enables the client // to build a correct URL. func WithTag(tag string) distribution.ManifestServiceOption { return withTagOption{tag} } type withTagOption struct{ tag string } func (o withTagOption) Apply(m distribution.ManifestService) error { if _, ok := m.(*manifests); ok { return nil } return fmt.Errorf("withTagOption is a client-only option") } // Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the // tag name in order to build the correct upload URL. This state is written and read under a lock. func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { ref := ms.name for _, option := range options { if opt, ok := option.(withTagOption); ok { var err error ref, err = reference.WithTag(ref, opt.tag) if err != nil { return "", err } } else { err := option.Apply(ms) if err != nil { return "", err } } } manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { return "", err } mediaType, p, err := m.Payload() if err != nil { return "", err } putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) if err != nil { return "", err } putRequest.Header.Set("Content-Type", mediaType) resp, err := ms.client.Do(putRequest) if err != nil { return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { dgstHeader := resp.Header.Get("Docker-Content-Digest") dgst, err := digest.ParseDigest(dgstHeader) if err != nil { return "", err } return dgst, nil } return "", HandleErrorResponse(resp) } func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { ref, err := reference.WithDigest(ms.name, dgst) if err != nil { return err } u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } req, err := http.NewRequest("DELETE", u, nil) if err != nil { return err } resp, err := ms.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } // todo(richardscothern): Restore interface and implementation with merge of #1050 /*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { panic("not supported") }*/ type blobs struct { name reference.Named ub *v2.URLBuilder client *http.Client statter distribution.BlobDescriptorService distribution.BlobDeleter } func sanitizeLocation(location, base string) (string, error) { baseURL, err := url.Parse(base) if err != nil { return "", err } locationURL, err := url.Parse(location) if err != nil { return "", err } return baseURL.ResolveReference(locationURL).String(), nil } func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return bs.statter.Stat(ctx, dgst) } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } defer reader.Close() return ioutil.ReadAll(reader) } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return nil, err } blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } return transport.NewHTTPReadSeeker(bs.client, blobURL, func(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUnknown } return HandleErrorResponse(resp) }), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { panic("not implemented") } func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { writer, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } dgstr := digest.Canonical.New() n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) if err != nil { return distribution.Descriptor{}, err } if n < int64(len(p)) { return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) } desc := distribution.Descriptor{ MediaType: mediaType, Size: int64(len(p)), Digest: dgstr.Digest(), } return writer.Commit(ctx, desc) } // createOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. type createOptions struct { Mount struct { ShouldMount bool From reference.Canonical } } type optionFunc func(interface{}) error func (f optionFunc) Apply(v interface{}) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { return optionFunc(func(v interface{}) error { opts, ok := v.(*createOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) } opts.Mount.ShouldMount = true opts.Mount.From = ref return nil }) } func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { var opts createOptions for _, option := range options { err := option.Apply(&opts) if err != nil { return nil, err } } var values []url.Values if opts.Mount.ShouldMount { values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) } u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) if err != nil { return nil, err } resp, err := bs.client.Post(u, "", nil) if err != nil { return nil, err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusCreated: desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) if err != nil { return nil, err } return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) if err != nil { return nil, err } return &httpBlobUpload{ statter: bs.statter, client: bs.client, uuid: uuid, startedAt: time.Now(), location: location, }, nil default: return nil, HandleErrorResponse(resp) } } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { panic("not implemented") } func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { return bs.statter.Clear(ctx, dgst) } type blobStatter struct { name reference.Named ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return distribution.Descriptor{}, err } u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } resp, err := bs.client.Head(u) if err != nil { return distribution.Descriptor{}, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") if lengthHeader == "" { return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) } length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) } return distribution.Descriptor{ MediaType: resp.Header.Get("Content-Type"), Size: length, Digest: dgst, }, nil } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { values := url.Values{} if maxEntries > 0 { values.Add("n", strconv.Itoa(maxEntries)) } if last != "" { values.Add("last", last) } return values } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { ref, err := reference.WithDigest(bs.name, dgst) if err != nil { return err } blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } req, err := http.NewRequest("DELETE", blobURL, nil) if err != nil { return err } resp, err := bs.client.Do(req) if err != nil { return err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { return nil } return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { return nil } distribution-2.3.0/registry/client/repository_test.go000066400000000000000000000663421265472114500232100ustar00rootroot00000000000000package client import ( "bytes" "crypto/rand" "fmt" "io" "log" "net/http" "net/http/httptest" "strconv" "strings" "testing" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" "github.com/docker/libtrust" ) func testServer(rrm testutil.RequestResponseMap) (string, func()) { h := testutil.NewHandler(rrm) s := httptest.NewServer(h) return s.URL, s.Close } func newRandomBlob(size int) (digest.Digest, []byte) { b := make([]byte, size) if n, err := rand.Read(b); err != nil { panic(err) } else if n != size { panic("unable to read enough bytes") } return digest.FromBytes(b), b } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) } func addTestCatalog(route string, content []byte, link string, m *testutil.RequestResponseMap) { headers := map[string][]string{ "Content-Length": {strconv.Itoa(len(content))}, "Content-Type": {"application/json; charset=utf-8"}, } if link != "" { headers["Link"] = append(headers["Link"], link) } *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: route, }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(headers), }, }) } func TestBlobDelete(t *testing.T) { dgst, _ := newRandomBlob(1024) var m testutil.RequestResponseMap repo, _ := reference.ParseNamed("test.example.com/repo1") m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) err = l.Delete(ctx, dgst) if err != nil { t.Errorf("Error deleting blob: %s", err.Error()) } } func TestBlobFetch(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) e, c := testServer(m) defer c() ctx := context.Background() repo, _ := reference.ParseNamed("test.example.com/repo1") r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) b, err := l.Get(ctx, d1) if err != nil { t.Fatal(err) } if bytes.Compare(b, b1) != 0 { t.Fatalf("Wrong bytes values fetched: [%d]byte != [%d]byte", len(b), len(b1)) } // TODO(dmcgowan): Test for unknown blob case } func TestBlobExistsNoContentLength(t *testing.T) { var m testutil.RequestResponseMap repo, _ := reference.ParseNamed("biff") dgst, content := newRandomBlob(1024) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ // "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ // "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) _, err = l.Stat(ctx, dgst) if err == nil { t.Fatal(err) } if !strings.Contains(err.Error(), "missing content-length heade") { t.Fatalf("Expected missing content-length error message") } } func TestBlobExists(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap addTestFetch("test.example.com/repo1", d1, b1, &m) e, c := testServer(m) defer c() ctx := context.Background() repo, _ := reference.ParseNamed("test.example.com/repo1") r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) stat, err := l.Stat(ctx, d1) if err != nil { t.Fatal(err) } if stat.Digest != d1 { t.Fatalf("Unexpected digest: %s, expected %s", stat.Digest, d1) } if stat.Size != int64(len(b1)) { t.Fatalf("Unexpected length: %d, expected %d", stat.Size, len(b1)) } // TODO(dmcgowan): Test error cases and ErrBlobUnknown case } func TestBlobUploadChunked(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap chunks := [][]byte{ b1[0:256], b1[256:512], b1[512:513], b1[513:1024], } repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, "Docker-Upload-UUID": {uuids[0]}, "Range": {"0-0"}, }), }, }) offset := 0 for i, chunk := range chunks { uuids = append(uuids, uuid.Generate().String()) newOffset := offset + len(chunk) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], Body: chunk, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, "Docker-Upload-UUID": {uuids[i+1]}, "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, }), }, }) offset = newOffset } m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], QueryParams: map[string][]string{ "digest": {dgst.String()}, }, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Content-Range": {fmt.Sprintf("0-%d", offset-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(offset)}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } if upload.ID() != uuids[0] { log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uuids[0]) } for _, chunk := range chunks { n, err := upload.Write(chunk) if err != nil { t.Fatal(err) } if n != len(chunk) { t.Fatalf("Unexpected length returned from write: %d; expected: %d", n, len(chunk)) } } blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } if blob.Size != int64(len(b1)) { t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } } func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Range": {"0-0"}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, Body: b1, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Range": {fmt.Sprintf("0-%d", len(b1)-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, QueryParams: map[string][]string{ "digest": {dgst.String()}, }, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, "Content-Range": {fmt.Sprintf("0-%d", len(b1)-1)}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(b1))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) upload, err := l.Create(ctx) if err != nil { t.Fatal(err) } if upload.ID() != uploadID { log.Fatalf("Unexpected UUID %s; expected %s", upload.ID(), uploadID) } n, err := upload.ReadFrom(bytes.NewReader(b1)) if err != nil { t.Fatal(err) } if n != int64(len(b1)) { t.Fatalf("Unexpected ReadFrom length: %d; expected: %d", n, len(b1)) } blob, err := upload.Commit(ctx, distribution.Descriptor{ Digest: dgst, Size: int64(len(b1)), }) if err != nil { t.Fatal(err) } if blob.Size != int64(len(b1)) { t.Fatalf("Unexpected blob size: %d; expected: %d", blob.Size, len(b1)) } } func TestBlobMount(t *testing.T) { dgst, content := newRandomBlob(1024) var m testutil.RequestResponseMap repo, _ := reference.ParseNamed("test.example.com/uploadrepo") sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", Route: "/v2/" + repo.Name() + "/blobs/uploads/", QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, }, Response: testutil.Response{ StatusCode: http.StatusCreated, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, "Docker-Content-Digest": {dgst.String()}, }), }, }) m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } l := r.Blobs(ctx) bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) if bw != nil { t.Fatalf("Expected blob writer to be nil, was %v", bw) } if ebm, ok := err.(distribution.ErrBlobMounted); ok { if ebm.From.Digest() != dgst { t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) } if ebm.From.Name() != sourceRepo.Name() { t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) } } else { t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) } } func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) for i := 0; i < blobCount; i++ { dgst, blob := newRandomBlob((i % 5) * 16) blobs[i] = schema1.FSLayer{BlobSum: dgst} history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } m := schema1.Manifest{ Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, History: history, Versioned: manifest.Versioned{ SchemaVersion: 1, }, } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } sm, err := schema1.Sign(&m, pk) if err != nil { panic(err) } return sm, digest.FromBytes(sm.Canonical), sm.Canonical } func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { actualDigest := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", Route: "/v2/" + repo.Name() + "/manifests/" + reference, Headers: http.Header(map[string][]string{ "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), } var getRespWithEtag testutil.Response if actualDigest.String() == dgst { getRespWithEtag = testutil.Response{ StatusCode: http.StatusNotModified, Body: []byte{}, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, "Content-Type": {schema1.MediaTypeSignedManifest}, }), } } else { getRespWithEtag = testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, "Content-Type": {schema1.MediaTypeSignedManifest}, }), } } *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, "Content-Type": {mediatype}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, "Content-Type": {mediatype}, }), }, }) } func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { if m1.Name != m2.Name { return fmt.Errorf("name does not match %q != %q", m1.Name, m2.Name) } if m1.Tag != m2.Tag { return fmt.Errorf("tag does not match %q != %q", m1.Tag, m2.Tag) } if len(m1.FSLayers) != len(m2.FSLayers) { return fmt.Errorf("fs blob length does not match %d != %d", len(m1.FSLayers), len(m2.FSLayers)) } for i := range m1.FSLayers { if m1.FSLayers[i].BlobSum != m2.FSLayers[i].BlobSum { return fmt.Errorf("blobsum does not match %q != %q", m1.FSLayers[i].BlobSum, m2.FSLayers[i].BlobSum) } } if len(m1.History) != len(m2.History) { return fmt.Errorf("history length does not match %d != %d", len(m1.History), len(m2.History)) } for i := range m1.History { if m1.History[i].V1Compatibility != m2.History[i].V1Compatibility { return fmt.Errorf("blobsum does not match %q != %q", m1.History[i].V1Compatibility, m2.History[i].V1Compatibility) } } return nil } func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() repo, _ := reference.ParseNamed("test.example.com/repo") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap _, pl, err := m1.Payload() if err != nil { t.Fatal(err) } addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) addTestManifest(repo, "badcontenttype", "text/html", pl, &m) e, c := testServer(m) defer c() r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } ok, err := ms.Exists(ctx, dgst) if err != nil { t.Fatal(err) } if !ok { t.Fatal("Manifest does not exist") } manifest, err := ms.Get(ctx, dgst) if err != nil { t.Fatal(err) } v1manifest, ok := manifest.(*schema1.SignedManifest) if !ok { t.Fatalf("Unexpected manifest type from Get: %T", manifest) } if err := checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } manifest, err = ms.Get(ctx, dgst, WithTag("latest")) if err != nil { t.Fatal(err) } v1manifest, ok = manifest.(*schema1.SignedManifest) if !ok { t.Fatalf("Unexpected manifest type from Get: %T", manifest) } if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } manifest, err = ms.Get(ctx, dgst, WithTag("badcontenttype")) if err != nil { t.Fatal(err) } v1manifest, ok = manifest.(*schema1.SignedManifest) if !ok { t.Fatalf("Unexpected manifest type from Get: %T", manifest) } if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } } func TestManifestFetchWithEtag(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) e, c := testServer(m) defer c() ctx := context.Background() r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } clientManifestService, ok := ms.(*manifests) if !ok { panic("wrong type for client manifest service") } _, err = clientManifestService.Get(ctx, d1, WithTag("latest"), AddEtagToTag("latest", d1.String())) if err != distribution.ErrManifestNotModified { t.Fatal(err) } } func TestManifestDelete(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, }), }, }) e, c := testServer(m) defer c() r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } if err := ms.Delete(ctx, dgst1); err != nil { t.Fatal(err) } if err := ms.Delete(ctx, dgst2); err == nil { t.Fatal("Expected error deleting unknown manifest") } // TODO(dmcgowan): Check for specific unknown error } func TestManifestPut(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo/delete") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) _, payload, err := m1.Payload() if err != nil { t.Fatal(err) } var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", Route: "/v2/" + repo.Name() + "/manifests/other", Body: payload, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, }), }, }) e, c := testServer(m) defer c() r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } if _, err := ms.Put(ctx, m1, WithTag(m1.Tag)); err != nil { t.Fatal(err) } // TODO(dmcgowan): Check for invalid input error } func TestManifestTags(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []byte(strings.TrimSpace(` { "name": "test.example.com/repo/tags/list", "tags": [ "tag1", "tag2", "funtag" ] } `)) var m testutil.RequestResponseMap for i := 0; i < 3; i++ { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo.Name() + "/tags/list", }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: tagsList, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(tagsList))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) } e, c := testServer(m) defer c() r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() tagService := r.Tags(ctx) tags, err := tagService.All(ctx) if err != nil { t.Fatal(err) } if len(tags) != 3 { t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) } expected := map[string]struct{}{ "tag1": {}, "tag2": {}, "funtag": {}, } for _, t := range tags { delete(expected, t) } if len(expected) != 0 { t.Fatalf("unexpected tags returned: %v", expected) } // TODO(dmcgowan): Check for error cases } func TestManifestUnauthorized(t *testing.T) { repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusUnauthorized, Body: []byte("garbage"), }, }) e, c := testServer(m) defer c() r, err := NewRepository(context.Background(), repo, e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } _, err = ms.Get(ctx, dgst) if err == nil { t.Fatal("Expected error fetching manifest") } v2Err, ok := err.(errcode.Error) if !ok { t.Fatalf("Unexpected error type: %#v", err) } if v2Err.Code != errcode.ErrorCodeUnauthorized { t.Fatalf("Unexpected error code: %s", v2Err.Code.String()) } if expected := errcode.ErrorCodeUnauthorized.Message(); v2Err.Message != expected { t.Fatalf("Unexpected message value: %q, expected %q", v2Err.Message, expected) } } func TestCatalog(t *testing.T) { var m testutil.RequestResponseMap addTestCatalog( "/v2/_catalog?n=5", []byte("{\"repositories\":[\"foo\", \"bar\", \"baz\"]}"), "", &m) e, c := testServer(m) defer c() entries := make([]string, 5) r, err := NewRegistry(context.Background(), e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() numFilled, err := r.Repositories(ctx, entries, "") if err != io.EOF { t.Fatal(err) } if numFilled != 3 { t.Fatalf("Got wrong number of repos") } } func TestCatalogInParts(t *testing.T) { var m testutil.RequestResponseMap addTestCatalog( "/v2/_catalog?n=2", []byte("{\"repositories\":[\"bar\", \"baz\"]}"), "", &m) addTestCatalog( "/v2/_catalog?last=baz&n=2", []byte("{\"repositories\":[\"foo\"]}"), "", &m) e, c := testServer(m) defer c() entries := make([]string, 2) r, err := NewRegistry(context.Background(), e, nil) if err != nil { t.Fatal(err) } ctx := context.Background() numFilled, err := r.Repositories(ctx, entries, "") if err != nil { t.Fatal(err) } if numFilled != 2 { t.Fatalf("Got wrong number of repos") } numFilled, err = r.Repositories(ctx, entries, "baz") if err != io.EOF { t.Fatal(err) } if numFilled != 1 { t.Fatalf("Got wrong number of repos") } } func TestSanitizeLocation(t *testing.T) { for _, testcase := range []struct { description string location string source string expected string err error }{ { description: "ensure relative location correctly resolved", location: "/v2/foo/baasdf", source: "http://blahalaja.com/v1", expected: "http://blahalaja.com/v2/foo/baasdf", }, { description: "ensure parameters are preserved", location: "/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", source: "http://blahalaja.com/v1", expected: "http://blahalaja.com/v2/foo/baasdf?_state=asdfasfdasdfasdf&digest=foo", }, { description: "ensure new hostname overidden", location: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", source: "http://blahalaja.com/v1", expected: "https://mwhahaha.com/v2/foo/baasdf?_state=asdfasfdasdfasdf", }, } { fatalf := func(format string, args ...interface{}) { t.Fatalf(testcase.description+": "+format, args...) } s, err := sanitizeLocation(testcase.location, testcase.source) if err != testcase.err { if testcase.err != nil { fatalf("expected error: %v != %v", err, testcase) } else { fatalf("unexpected error sanitizing: %v", err) } } if s != testcase.expected { fatalf("bad sanitize: %q != %q", s, testcase.expected) } } } distribution-2.3.0/registry/client/transport/000077500000000000000000000000001265472114500214245ustar00rootroot00000000000000distribution-2.3.0/registry/client/transport/http_reader.go000066400000000000000000000106461265472114500242630ustar00rootroot00000000000000package transport import ( "bufio" "errors" "fmt" "io" "net/http" "os" ) // ReadSeekCloser combines io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. // TODO(dmcgowan): Move this into a separate utility package func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { return &httpReadSeeker{ client: client, url: url, errorHandler: errorHandler, } } type httpReadSeeker struct { client *http.Client url string // errorHandler creates an error from an unsuccessful HTTP response. // This allows the error to be created with the HTTP response body // without leaking the body through a returned error. errorHandler func(*http.Response) error size int64 // rc is the remote read closer. rc io.ReadCloser // brd is a buffer for internal buffered io. brd *bufio.Reader // readerOffset tracks the offset as of the last read. readerOffset int64 // seekOffset allows Seek to override the offset. Seek changes // seekOffset instead of changing readOffset directly so that // connection resets can be delayed and possibly avoided if the // seek is undone (i.e. seeking to the end and then back to the // beginning). seekOffset int64 err error } func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { if hrs.err != nil { return 0, hrs.err } // If we seeked to a different position, we need to reset the // connection. This logic is here instead of Seek so that if // a seek is undone before the next read, the connection doesn't // need to be closed and reopened. A common example of this is // seeking to the end to determine the length, and then seeking // back to the original position. if hrs.readerOffset != hrs.seekOffset { hrs.reset() } hrs.readerOffset = hrs.seekOffset rd, err := hrs.reader() if err != nil { return 0, err } n, err = rd.Read(p) hrs.seekOffset += int64(n) hrs.readerOffset += int64(n) // Simulate io.EOF error if we reach filesize. if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { err = io.EOF } return n, err } func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if hrs.err != nil { return 0, hrs.err } _, err := hrs.reader() if err != nil { return 0, err } newOffset := hrs.seekOffset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: if hrs.size < 0 { return 0, errors.New("content length not known") } newOffset = hrs.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) } if newOffset < 0 { err = errors.New("cannot seek to negative position") } else { hrs.seekOffset = newOffset } return hrs.seekOffset, err } func (hrs *httpReadSeeker) Close() error { if hrs.err != nil { return hrs.err } // close and release reader chain if hrs.rc != nil { hrs.rc.Close() } hrs.rc = nil hrs.brd = nil hrs.err = errors.New("httpLayer: closed") return nil } func (hrs *httpReadSeeker) reset() { if hrs.err != nil { return } if hrs.rc != nil { hrs.rc.Close() hrs.rc = nil } } func (hrs *httpReadSeeker) reader() (io.Reader, error) { if hrs.err != nil { return nil, hrs.err } if hrs.rc != nil { return hrs.brd, nil } req, err := http.NewRequest("GET", hrs.url, nil) if err != nil { return nil, err } if hrs.readerOffset > 0 { // TODO(stevvooe): Get this working correctly. // If we are at different offset, issue a range request from there. req.Header.Add("Range", "1-") // TODO: get context in here // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) } resp, err := hrs.client.Do(req) if err != nil { return nil, err } // Normally would use client.SuccessStatus, but that would be a cyclic // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { hrs.rc = resp.Body if resp.StatusCode == http.StatusOK { hrs.size = resp.ContentLength } else { hrs.size = -1 } } else { defer resp.Body.Close() if hrs.errorHandler != nil { return nil, hrs.errorHandler(resp) } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } if hrs.brd == nil { hrs.brd = bufio.NewReader(hrs.rc) } else { hrs.brd.Reset(hrs.rc) } return hrs.brd, nil } distribution-2.3.0/registry/client/transport/transport.go000066400000000000000000000063571265472114500240220ustar00rootroot00000000000000package transport import ( "io" "net/http" "sync" ) // RequestModifier represents an object which will do an inplace // modification of an HTTP request. type RequestModifier interface { ModifyRequest(*http.Request) error } type headerModifier http.Header // NewHeaderRequestModifier returns a new RequestModifier which will // add the given headers to a request. func NewHeaderRequestModifier(header http.Header) RequestModifier { return headerModifier(header) } func (h headerModifier) ModifyRequest(req *http.Request) error { for k, s := range http.Header(h) { req.Header[k] = append(req.Header[k], s...) } return nil } // NewTransport creates a new transport which will apply modifiers to // the request on a RoundTrip call. func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { return &transport{ Modifiers: modifiers, Base: base, } } // transport is an http.RoundTripper that makes HTTP requests after // copying and modifying the request type transport struct { Modifiers []RequestModifier Base http.RoundTripper mu sync.Mutex // guards modReq modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an // access token. If no token exists or token is expired, // tries to refresh/fetch a new token. func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) for _, modifier := range t.Modifiers { if err := modifier.ModifyRequest(req2); err != nil { return nil, err } } t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) if err != nil { t.setModReq(req, nil) return nil, err } res.Body = &onEOFReader{ rc: res.Body, fn: func() { t.setModReq(req, nil) }, } return res, nil } // CancelRequest cancels an in-flight request by closing its connection. func (t *transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { t.mu.Lock() modReq := t.modReq[req] delete(t.modReq, req) t.mu.Unlock() cr.CancelRequest(modReq) } } func (t *transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } func (t *transport) setModReq(orig, mod *http.Request) { t.mu.Lock() defer t.mu.Unlock() if t.modReq == nil { t.modReq = make(map[*http.Request]*http.Request) } if mod == nil { delete(t.modReq, orig) } else { t.modReq[orig] = mod } } // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { // shallow copy of the struct r2 := new(http.Request) *r2 = *r // deep copy of the Header r2.Header = make(http.Header, len(r.Header)) for k, s := range r.Header { r2.Header[k] = append([]string(nil), s...) } return r2 } type onEOFReader struct { rc io.ReadCloser fn func() } func (r *onEOFReader) Read(p []byte) (n int, err error) { n, err = r.rc.Read(p) if err == io.EOF { r.runFunc() } return } func (r *onEOFReader) Close() error { err := r.rc.Close() r.runFunc() return err } func (r *onEOFReader) runFunc() { if fn := r.fn; fn != nil { fn() r.fn = nil } } distribution-2.3.0/registry/doc.go000066400000000000000000000001331265472114500172030ustar00rootroot00000000000000// Package registry provides the main entrypoints for running a registry. package registry distribution-2.3.0/registry/handlers/000077500000000000000000000000001265472114500177125ustar00rootroot00000000000000distribution-2.3.0/registry/handlers/api_test.go000066400000000000000000002166201265472114500220600ustar00rootroot00000000000000package handlers import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "net/http/httputil" "net/url" "os" "path" "reflect" "regexp" "strconv" "strings" "testing" "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" _ "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" "github.com/gorilla/handlers" ) var headerConfig = http.Header{ "X-Content-Type-Options": []string{"nosniff"}, } // TestCheckAPI hits the base endpoint (/v2/) ensures we return the specified // 200 OK response. func TestCheckAPI(t *testing.T) { env := newTestEnv(t, false) baseURL, err := env.builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) } resp, err := http.Get(baseURL) if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() checkResponse(t, "issuing api base check", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Type": []string{"application/json; charset=utf-8"}, "Content-Length": []string{"2"}, }) p, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("unexpected error reading response body: %v", err) } if string(p) != "{}" { t.Fatalf("unexpected response body: %v", string(p)) } } // TestCatalogAPI tests the /v2/_catalog endpoint func TestCatalogAPI(t *testing.T) { chunkLen := 2 env := newTestEnv(t, false) values := url.Values{ "last": []string{""}, "n": []string{strconv.Itoa(chunkLen)}} catalogURL, err := env.builder.BuildCatalogURL(values) if err != nil { t.Fatalf("unexpected error building catalog url: %v", err) } // ----------------------------------- // try to get an empty catalog resp, err := http.Get(catalogURL) if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() checkResponse(t, "issuing catalog api check", resp, http.StatusOK) var ctlg struct { Repositories []string `json:"repositories"` } dec := json.NewDecoder(resp.Body) if err := dec.Decode(&ctlg); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } // we haven't pushed anything to the registry yet if len(ctlg.Repositories) != 0 { t.Fatalf("repositories has unexpected values") } if resp.Header.Get("Link") != "" { t.Fatalf("repositories has more data when none expected") } // ----------------------------------- // push something to the registry and try again images := []string{"foo/aaaa", "foo/bbbb", "foo/cccc"} for _, image := range images { createRepository(env, t, image, "sometag") } resp, err = http.Get(catalogURL) if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() checkResponse(t, "issuing catalog api check", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) if err = dec.Decode(&ctlg); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } if len(ctlg.Repositories) != chunkLen { t.Fatalf("repositories has unexpected values") } for _, image := range images[:chunkLen] { if !contains(ctlg.Repositories, image) { t.Fatalf("didn't find our repository '%s' in the catalog", image) } } link := resp.Header.Get("Link") if link == "" { t.Fatalf("repositories has less data than expected") } newValues := checkLink(t, link, chunkLen, ctlg.Repositories[len(ctlg.Repositories)-1]) // ----------------------------------- // get the last chunk of data catalogURL, err = env.builder.BuildCatalogURL(newValues) if err != nil { t.Fatalf("unexpected error building catalog url: %v", err) } resp, err = http.Get(catalogURL) if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() checkResponse(t, "issuing catalog api check", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) if err = dec.Decode(&ctlg); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } if len(ctlg.Repositories) != 1 { t.Fatalf("repositories has unexpected values") } lastImage := images[len(images)-1] if !contains(ctlg.Repositories, lastImage) { t.Fatalf("didn't find our repository '%s' in the catalog", lastImage) } link = resp.Header.Get("Link") if link != "" { t.Fatalf("catalog has unexpected data") } } func checkLink(t *testing.T, urlStr string, numEntries int, last string) url.Values { re := regexp.MustCompile("<(/v2/_catalog.*)>; rel=\"next\"") matches := re.FindStringSubmatch(urlStr) if len(matches) != 2 { t.Fatalf("Catalog link address response was incorrect") } linkURL, _ := url.Parse(matches[1]) urlValues := linkURL.Query() if urlValues.Get("n") != strconv.Itoa(numEntries) { t.Fatalf("Catalog link entry size is incorrect") } if urlValues.Get("last") != last { t.Fatal("Catalog link last entry is incorrect") } return urlValues } func contains(elems []string, e string) bool { for _, elem := range elems { if elem == e { return true } } return false } func TestURLPrefix(t *testing.T) { config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, } config.HTTP.Prefix = "/test/" config.HTTP.Headers = headerConfig env := newTestEnvWithConfig(t, &config) baseURL, err := env.builder.BuildBaseURL() if err != nil { t.Fatalf("unexpected error building base url: %v", err) } parsed, _ := url.Parse(baseURL) if !strings.HasPrefix(parsed.Path, config.HTTP.Prefix) { t.Fatalf("Prefix %v not included in test url %v", config.HTTP.Prefix, baseURL) } resp, err := http.Get(baseURL) if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() checkResponse(t, "issuing api base check", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Type": []string{"application/json; charset=utf-8"}, "Content-Length": []string{"2"}, }) } type blobArgs struct { imageName reference.Named layerFile io.ReadSeeker layerDigest digest.Digest } func makeBlobArgs(t *testing.T) blobArgs { layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } args := blobArgs{ layerFile: layerFile, layerDigest: layerDigest, } args.imageName, _ = reference.ParseNamed("foo/bar") return args } // TestBlobAPI conducts a full test of the of the blob api. func TestBlobAPI(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) args := makeBlobArgs(t) testBlobAPI(t, env, args) deleteEnabled = true env = newTestEnv(t, deleteEnabled) args = makeBlobArgs(t) testBlobAPI(t, env, args) } func TestBlobDelete(t *testing.T) { deleteEnabled := true env := newTestEnv(t, deleteEnabled) args := makeBlobArgs(t) env = testBlobAPI(t, env, args) testBlobDelete(t, env, args) } func TestBlobDeleteDisabled(t *testing.T) { deleteEnabled := false env := newTestEnv(t, deleteEnabled) args := makeBlobArgs(t) imageName := args.imageName layerDigest := args.layerDigest ref, _ := reference.WithDigest(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } resp, err := httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting when disabled: %v", err) } checkResponse(t, "status of disabled delete", resp, http.StatusMethodNotAllowed) } func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv { // TODO(stevvooe): This test code is complete junk but it should cover the // complete flow. This must be broken down and checked against the // specification *before* we submit the final to docker core. imageName := args.imageName layerFile := args.layerFile layerDigest := args.layerDigest // ----------------------------------- // Test fetch for non-existent content ref, _ := reference.WithDigest(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("error building url: %v", err) } resp, err := http.Get(layerURL) if err != nil { t.Fatalf("unexpected error fetching non-existent layer: %v", err) } checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound) // ------------------------------------------ // Test head request for non-existent content resp, err = http.Head(layerURL) if err != nil { t.Fatalf("unexpected error checking head on non-existent layer: %v", err) } checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound) // ------------------------------------------ // Start an upload, check the status then cancel uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName) // A status check should work resp, err = http.Get(uploadURLBase) if err != nil { t.Fatalf("unexpected error getting upload status: %v", err) } checkResponse(t, "status of deleted upload", resp, http.StatusNoContent) checkHeaders(t, resp, http.Header{ "Location": []string{"*"}, "Range": []string{"0-0"}, "Docker-Upload-UUID": []string{uploadUUID}, }) req, err := http.NewRequest("DELETE", uploadURLBase, nil) if err != nil { t.Fatalf("unexpected error creating delete request: %v", err) } resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("unexpected error sending delete request: %v", err) } checkResponse(t, "deleting upload", resp, http.StatusNoContent) // A status check should result in 404 resp, err = http.Get(uploadURLBase) if err != nil { t.Fatalf("unexpected error getting upload status: %v", err) } checkResponse(t, "status of deleted upload", resp, http.StatusNotFound) // ----------------------------------------- // Do layer push with an empty body and different digest uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error doing bad layer push: %v", err) } checkResponse(t, "bad layer push", resp, http.StatusBadRequest) checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid) // ----------------------------------------- // Do layer push with an empty body and correct digest zeroDigest, err := digest.FromReader(bytes.NewReader([]byte{})) if err != nil { t.Fatalf("unexpected error digesting empty buffer: %v", err) } uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{})) // ----------------------------------------- // Do layer push with an empty body and correct digest // This is a valid but empty tarfile! emptyTar := bytes.Repeat([]byte("\x00"), 1024) emptyDigest, err := digest.FromReader(bytes.NewReader(emptyTar)) if err != nil { t.Fatalf("unexpected error digesting empty tar: %v", err) } uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar)) // ------------------------------------------ // Now, actually do successful upload. layerLength, _ := layerFile.Seek(0, os.SEEK_END) layerFile.Seek(0, os.SEEK_SET) uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) // ------------------------------------------ // Now, push just a chunk layerFile.Seek(0, 0) canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } canonicalDigest := canonicalDigester.Digest() layerFile.Seek(0, 0) uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName) uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength) finishUpload(t, env.builder, imageName, uploadURLBase, dgst) // ------------------------ // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) if err != nil { t.Fatalf("unexpected error checking head on existing layer: %v", err) } checkResponse(t, "checking head on existing layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // ---------------- // Fetch the layer! resp, err = http.Get(layerURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) } checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, "Docker-Content-Digest": []string{canonicalDigest.String()}, }) // Verify the body verifier, err := digest.NewDigestVerifier(layerDigest) if err != nil { t.Fatalf("unexpected error getting digest verifier: %s", err) } io.Copy(verifier, resp.Body) if !verifier.Verified() { t.Fatalf("response body did not pass verification") } // ---------------- // Fetch the layer with an invalid digest badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = http.Get(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) } checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest) // Cache headers resp, err = http.Get(layerURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) } checkResponse(t, "fetching layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, "Docker-Content-Digest": []string{canonicalDigest.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, canonicalDigest)}, "Cache-Control": []string{"max-age=31536000"}, }) // Matching etag, gives 304 etag := resp.Header.Get("Etag") req, err = http.NewRequest("GET", layerURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified) // Non-matching etag, gives 200 req, err = http.NewRequest("GET", layerURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", "") resp, err = http.DefaultClient.Do(req) checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK) // Missing tests: // - Upload the same tar file under and different repository and // ensure the content remains uncorrupted. return env } func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) { // Upload a layer imageName := args.imageName layerFile := args.layerFile layerDigest := args.layerDigest ref, _ := reference.WithDigest(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf(err.Error()) } // --------------- // Delete a layer resp, err := httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting layer: %v", err) } checkResponse(t, "deleting layer", resp, http.StatusAccepted) checkHeaders(t, resp, http.Header{ "Content-Length": []string{"0"}, }) // --------------- // Try and get it back // Use a head request to see if the layer exists. resp, err = http.Head(layerURL) if err != nil { t.Fatalf("unexpected error checking head on existing layer: %v", err) } checkResponse(t, "checking existence of deleted layer", resp, http.StatusNotFound) // Delete already deleted layer resp, err = httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting layer: %v", err) } checkResponse(t, "deleting layer", resp, http.StatusNotFound) // ---------------- // Attempt to delete a layer with an invalid digest badURL := strings.Replace(layerURL, "sha256", "sha257", 1) resp, err = httpDelete(badURL) if err != nil { t.Fatalf("unexpected error fetching layer: %v", err) } checkResponse(t, "deleting layer bad digest", resp, http.StatusBadRequest) // ---------------- // Reupload previously deleted blob layerFile.Seek(0, os.SEEK_SET) uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) layerFile.Seek(0, os.SEEK_SET) canonicalDigester := digest.Canonical.New() if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil { t.Fatalf("error copying to digest: %v", err) } canonicalDigest := canonicalDigester.Digest() // ------------------------ // Use a head request to see if it exists resp, err = http.Head(layerURL) if err != nil { t.Fatalf("unexpected error checking head on existing layer: %v", err) } layerLength, _ := layerFile.Seek(0, os.SEEK_END) checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Content-Length": []string{fmt.Sprint(layerLength)}, "Docker-Content-Digest": []string{canonicalDigest.String()}, }) } func TestDeleteDisabled(t *testing.T) { env := newTestEnv(t, false) imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } ref, _ := reference.WithDigest(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) resp, err := httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting layer: %v", err) } checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) } func TestDeleteReadOnly(t *testing.T) { env := newTestEnv(t, true) imageName, _ := reference.ParseNamed("foo/bar") // "build" our layer file layerFile, layerDigest, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } ref, _ := reference.WithDigest(imageName, layerDigest) layerURL, err := env.builder.BuildBlobURL(ref) if err != nil { t.Fatalf("Error building blob URL") } uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) env.app.readOnly = true resp, err := httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting layer: %v", err) } checkResponse(t, "deleting layer in read-only mode", resp, http.StatusMethodNotAllowed) } func TestStartPushReadOnly(t *testing.T) { env := newTestEnv(t, true) env.app.readOnly = true imageName, _ := reference.ParseNamed("foo/bar") layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) } resp, err := http.Post(layerUploadURL, "", nil) if err != nil { t.Fatalf("unexpected error starting layer push: %v", err) } defer resp.Body.Close() checkResponse(t, "starting push in read-only mode", resp, http.StatusMethodNotAllowed) } func httpDelete(url string) (*http.Response, error) { req, err := http.NewRequest("DELETE", url, nil) if err != nil { return nil, err } resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } // defer resp.Body.Close() return resp, err } type manifestArgs struct { imageName reference.Named mediaType string manifest distribution.Manifest dgst digest.Digest } func TestManifestAPI(t *testing.T) { schema1Repo, _ := reference.ParseNamed("foo/schema1") schema2Repo, _ := reference.ParseNamed("foo/schema2") deleteEnabled := false env := newTestEnv(t, deleteEnabled) testManifestAPISchema1(t, env, schema1Repo) schema2Args := testManifestAPISchema2(t, env, schema2Repo) testManifestAPIManifestList(t, env, schema2Args) deleteEnabled = true env = newTestEnv(t, deleteEnabled) testManifestAPISchema1(t, env, schema1Repo) schema2Args = testManifestAPISchema2(t, env, schema2Repo) testManifestAPIManifestList(t, env, schema2Args) } func TestManifestDelete(t *testing.T) { schema1Repo, _ := reference.ParseNamed("foo/schema1") schema2Repo, _ := reference.ParseNamed("foo/schema2") deleteEnabled := true env := newTestEnv(t, deleteEnabled) schema1Args := testManifestAPISchema1(t, env, schema1Repo) testManifestDelete(t, env, schema1Args) schema2Args := testManifestAPISchema2(t, env, schema2Repo) testManifestDelete(t, env, schema2Args) } func TestManifestDeleteDisabled(t *testing.T) { schema1Repo, _ := reference.ParseNamed("foo/schema1") deleteEnabled := false env := newTestEnv(t, deleteEnabled) testManifestDeleteDisabled(t, env, schema1Repo) } func testManifestDeleteDisabled(t *testing.T, env *testEnv, imageName reference.Named) { ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) manifestURL, err := env.builder.BuildManifestURL(ref) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } resp, err := httpDelete(manifestURL) if err != nil { t.Fatalf("unexpected error deleting manifest %v", err) } defer resp.Body.Close() checkResponse(t, "status of disabled delete of manifest", resp, http.StatusMethodNotAllowed) } func testManifestAPISchema1(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "thetag" args := manifestArgs{imageName: imageName} tagRef, _ := reference.WithTag(imageName, tag) manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } // ----------------------------- // Attempt to fetch the manifest resp, err := http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error getting manifest: %v", err) } defer resp.Body.Close() checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) tagsURL, err := env.builder.BuildTagsURL(imageName) if err != nil { t.Fatalf("unexpected error building tags url: %v", err) } resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() // Check that we get an unknown repository error when asking for tags checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) // -------------------------------- // Attempt to push unsigned manifest with missing layers unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, { BlobSum: "qwer", }, }, History: []schema1.History{ { V1Compatibility: "", }, { V1Compatibility: "", }, }, } resp = putManifest(t, "putting unsigned manifest", manifestURL, "", unsignedManifest) defer resp.Body.Close() checkResponse(t, "putting unsigned manifest", resp, http.StatusBadRequest) _, p, counts := checkBodyHasErrorCodes(t, "putting unsigned manifest", resp, v2.ErrorCodeManifestInvalid) expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestInvalid: 1, } if !reflect.DeepEqual(counts, expectedCounts) { t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // sign the manifest and still get some interesting errors. sm, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } resp = putManifest(t, "putting signed manifest with errors", manifestURL, "", sm) defer resp.Body.Close() checkResponse(t, "putting signed manifest with errors", resp, http.StatusBadRequest) _, p, counts = checkBodyHasErrorCodes(t, "putting signed manifest with errors", resp, v2.ErrorCodeManifestBlobUnknown, v2.ErrorCodeDigestInvalid) expectedCounts = map[errcode.ErrorCode]int{ v2.ErrorCodeManifestBlobUnknown: 2, v2.ErrorCodeDigestInvalid: 2, } if !reflect.DeepEqual(counts, expectedCounts) { t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // TODO(stevvooe): Add a test case where we take a mostly valid registry, // tamper with the content and ensure that we get a unverified manifest // error. // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } // ------------------- // Push the signed manifest with all layers pushed. signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } dgst := digest.FromBytes(signedManifest.Canonical) args.manifest = signedManifest args.dgst = dgst digestRef, _ := reference.WithDigest(imageName, dgst) manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting signed manifest no error", manifestURL, "", signedManifest) checkResponse(t, "putting signed manifest no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // -------------------- // Push by digest -- should get same result resp = putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // ------------------ // Fetch by tag name resp, err = http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifest schema1.SignedManifest dec := json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } if !bytes.Equal(fetchedManifest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } // --------------- // Fetch by digest resp, err = http.Get(manifestDigestURL) checkErr(t, err, "fetching manifest by digest") defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestByDigest schema1.SignedManifest dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestByDigest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } if !bytes.Equal(fetchedManifestByDigest.Canonical, signedManifest.Canonical) { t.Fatalf("manifests do not match") } // check signature was roundtripped signatures, err := fetchedManifestByDigest.Signatures() if err != nil { t.Fatal(err) } if len(signatures) != 1 { t.Fatalf("expected 1 signature from manifest, got: %d", len(signatures)) } // Re-sign, push and pull the same digest sm2, err := schema1.Sign(&fetchedManifestByDigest.Manifest, env.pk) if err != nil { t.Fatal(err) } // Re-push with a few different Content-Types. The official schema1 // content type should work, as should application/json with/without a // charset. resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, schema1.MediaTypeSignedManifest, sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json; charset=utf-8", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp = putManifest(t, "re-putting signed manifest", manifestDigestURL, "application/json", sm2) checkResponse(t, "re-putting signed manifest", resp, http.StatusCreated) resp, err = http.Get(manifestDigestURL) checkErr(t, err, "re-fetching manifest by digest") defer resp.Body.Close() checkResponse(t, "re-fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestByDigest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } // check two signatures were roundtripped signatures, err = fetchedManifestByDigest.Signatures() if err != nil { t.Fatal(err) } if len(signatures) != 2 { t.Fatalf("expected 2 signature from manifest, got: %d", len(signatures)) } // Get by name with etag, gives 304 etag := resp.Header.Get("Etag") req, err := http.NewRequest("GET", manifestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) // Get by digest with etag, gives 304 req, err = http.NewRequest("GET", manifestDigestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() checkResponse(t, "getting tags", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) var tagsResponse tagsAPIResponse if err := dec.Decode(&tagsResponse); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName.Name()) } if len(tagsResponse.Tags) != 1 { t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) } if tagsResponse.Tags[0] != tag { t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } // Attempt to put a manifest with mismatching FSLayer and History array cardinalities unsignedManifest.History = append(unsignedManifest.History, schema1.History{ V1Compatibility: "", }) invalidSigned, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("error signing manifest") } resp = putManifest(t, "putting invalid signed manifest", manifestDigestURL, "", invalidSigned) checkResponse(t, "putting invalid signed manifest", resp, http.StatusBadRequest) return args } func testManifestAPISchema2(t *testing.T, env *testEnv, imageName reference.Named) manifestArgs { tag := "schema2tag" args := manifestArgs{ imageName: imageName, mediaType: schema2.MediaTypeManifest, } tagRef, _ := reference.WithTag(imageName, tag) manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } // ----------------------------- // Attempt to fetch the manifest resp, err := http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error getting manifest: %v", err) } defer resp.Body.Close() checkResponse(t, "getting non-existent manifest", resp, http.StatusNotFound) checkBodyHasErrorCodes(t, "getting non-existent manifest", resp, v2.ErrorCodeManifestUnknown) tagsURL, err := env.builder.BuildTagsURL(imageName) if err != nil { t.Fatalf("unexpected error building tags url: %v", err) } resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() // Check that we get an unknown repository error when asking for tags checkResponse(t, "getting unknown manifest tags", resp, http.StatusNotFound) checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeNameUnknown) // -------------------------------- // Attempt to push manifest with missing config and missing layers manifest := &schema2.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 2, MediaType: schema2.MediaTypeManifest, }, Config: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, MediaType: schema2.MediaTypeConfig, }, Layers: []distribution.Descriptor{ { Digest: "sha256:463434349086340864309863409683460843608348608934092322395278926a", Size: 6323, MediaType: schema2.MediaTypeLayer, }, { Digest: "sha256:630923423623623423352523525237238023652897356239852383652aaaaaaa", Size: 6863, MediaType: schema2.MediaTypeLayer, }, }, } resp = putManifest(t, "putting missing config manifest", manifestURL, schema2.MediaTypeManifest, manifest) defer resp.Body.Close() checkResponse(t, "putting missing config manifest", resp, http.StatusBadRequest) _, p, counts := checkBodyHasErrorCodes(t, "putting missing config manifest", resp, v2.ErrorCodeManifestBlobUnknown) expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestBlobUnknown: 3, } if !reflect.DeepEqual(counts, expectedCounts) { t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // Push a config, and reference it in the manifest sampleConfig := []byte(`{ "architecture": "amd64", "history": [ { "created": "2015-10-31T22:22:54.690851953Z", "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" }, { "created": "2015-10-31T22:22:55.613815829Z", "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]" } ], "rootfs": { "diff_ids": [ "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" ], "type": "layers" } }`) sampleConfigDigest := digest.FromBytes(sampleConfig) uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, sampleConfigDigest, uploadURLBase, bytes.NewReader(sampleConfig)) manifest.Config.Digest = sampleConfigDigest manifest.Config.Size = int64(len(sampleConfig)) // The manifest should still be invalid, because its layer doesnt exist resp = putManifest(t, "putting missing layer manifest", manifestURL, schema2.MediaTypeManifest, manifest) defer resp.Body.Close() checkResponse(t, "putting missing layer manifest", resp, http.StatusBadRequest) _, p, counts = checkBodyHasErrorCodes(t, "getting unknown manifest tags", resp, v2.ErrorCodeManifestBlobUnknown) expectedCounts = map[errcode.ErrorCode]int{ v2.ErrorCodeManifestBlobUnknown: 2, } if !reflect.DeepEqual(counts, expectedCounts) { t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range manifest.Layers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs manifest.Layers[i].Digest = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } // ------------------- // Push the manifest with all layers pushed. deserializedManifest, err := schema2.FromStruct(*manifest) if err != nil { t.Fatalf("could not create DeserializedManifest: %v", err) } _, canonical, err := deserializedManifest.Payload() if err != nil { t.Fatalf("could not get manifest payload: %v", err) } dgst := digest.FromBytes(canonical) args.dgst = dgst args.manifest = deserializedManifest digestRef, _ := reference.WithDigest(imageName, dgst) manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting manifest no error", manifestURL, schema2.MediaTypeManifest, manifest) checkResponse(t, "putting manifest no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // -------------------- // Push by digest -- should get same result resp = putManifest(t, "putting manifest by digest", manifestDigestURL, schema2.MediaTypeManifest, manifest) checkResponse(t, "putting manifest by digest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // ------------------ // Fetch by tag name req, err := http.NewRequest("GET", manifestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("Accept", schema2.MediaTypeManifest) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifest schema2.DeserializedManifest dec := json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } _, fetchedCanonical, err := fetchedManifest.Payload() if err != nil { t.Fatalf("error getting manifest payload: %v", err) } if !bytes.Equal(fetchedCanonical, canonical) { t.Fatalf("manifests do not match") } // --------------- // Fetch by digest req, err = http.NewRequest("GET", manifestDigestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("Accept", schema2.MediaTypeManifest) resp, err = http.DefaultClient.Do(req) checkErr(t, err, "fetching manifest by digest") defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestByDigest schema2.DeserializedManifest dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestByDigest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } _, fetchedCanonical, err = fetchedManifest.Payload() if err != nil { t.Fatalf("error getting manifest payload: %v", err) } if !bytes.Equal(fetchedCanonical, canonical) { t.Fatalf("manifests do not match") } // Get by name with etag, gives 304 etag := resp.Header.Get("Etag") req, err = http.NewRequest("GET", manifestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) // Get by digest with etag, gives 304 req, err = http.NewRequest("GET", manifestDigestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() checkResponse(t, "getting unknown manifest tags", resp, http.StatusOK) dec = json.NewDecoder(resp.Body) var tagsResponse tagsAPIResponse if err := dec.Decode(&tagsResponse); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } if len(tagsResponse.Tags) != 1 { t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) } if tagsResponse.Tags[0] != tag { t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } // ------------------ // Fetch as a schema1 manifest resp, err = http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error fetching manifest as schema1: %v", err) } defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest as schema1", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedSchema1Manifest schema1.SignedManifest dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedSchema1Manifest); err != nil { t.Fatalf("error decoding fetched schema1 manifest: %v", err) } if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { t.Fatal("wrong schema version") } if fetchedSchema1Manifest.Architecture != "amd64" { t.Fatal("wrong architecture") } if fetchedSchema1Manifest.Name != imageName.Name() { t.Fatal("wrong image name") } if fetchedSchema1Manifest.Tag != tag { t.Fatal("wrong tag") } if len(fetchedSchema1Manifest.FSLayers) != 2 { t.Fatal("wrong number of FSLayers") } for i := range manifest.Layers { if fetchedSchema1Manifest.FSLayers[i].BlobSum != manifest.Layers[len(manifest.Layers)-i-1].Digest { t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) } } if len(fetchedSchema1Manifest.History) != 2 { t.Fatal("wrong number of History entries") } // Don't check V1Compatibility fields becuase we're using randomly-generated // layers. return args } func testManifestAPIManifestList(t *testing.T, env *testEnv, args manifestArgs) { imageName := args.imageName tag := "manifestlisttag" tagRef, _ := reference.WithTag(imageName, tag) manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error getting manifest url: %v", err) } // -------------------------------- // Attempt to push manifest list that refers to an unknown manifest manifestList := &manifestlist.ManifestList{ Versioned: manifest.Versioned{ SchemaVersion: 2, MediaType: manifestlist.MediaTypeManifestList, }, Manifests: []manifestlist.ManifestDescriptor{ { Descriptor: distribution.Descriptor{ Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", Size: 3253, MediaType: schema2.MediaTypeManifest, }, Platform: manifestlist.PlatformSpec{ Architecture: "amd64", OS: "linux", }, }, }, } resp := putManifest(t, "putting missing manifest manifestlist", manifestURL, manifestlist.MediaTypeManifestList, manifestList) defer resp.Body.Close() checkResponse(t, "putting missing manifest manifestlist", resp, http.StatusBadRequest) _, p, counts := checkBodyHasErrorCodes(t, "putting missing manifest manifestlist", resp, v2.ErrorCodeManifestBlobUnknown) expectedCounts := map[errcode.ErrorCode]int{ v2.ErrorCodeManifestBlobUnknown: 1, } if !reflect.DeepEqual(counts, expectedCounts) { t.Fatalf("unexpected number of error codes encountered: %v\n!=\n%v\n---\n%s", counts, expectedCounts, string(p)) } // ------------------- // Push a manifest list that references an actual manifest manifestList.Manifests[0].Digest = args.dgst deserializedManifestList, err := manifestlist.FromDescriptors(manifestList.Manifests) if err != nil { t.Fatalf("could not create DeserializedManifestList: %v", err) } _, canonical, err := deserializedManifestList.Payload() if err != nil { t.Fatalf("could not get manifest list payload: %v", err) } dgst := digest.FromBytes(canonical) digestRef, _ := reference.WithDigest(imageName, dgst) manifestDigestURL, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp = putManifest(t, "putting manifest list no error", manifestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) checkResponse(t, "putting manifest list no error", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // -------------------- // Push by digest -- should get same result resp = putManifest(t, "putting manifest list by digest", manifestDigestURL, manifestlist.MediaTypeManifestList, deserializedManifestList) checkResponse(t, "putting manifest list by digest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // ------------------ // Fetch by tag name req, err := http.NewRequest("GET", manifestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("Accept", manifestlist.MediaTypeManifestList) req.Header.Add("Accept", schema1.MediaTypeSignedManifest) req.Header.Add("Accept", schema2.MediaTypeManifest) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("unexpected error fetching manifest list: %v", err) } defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestList manifestlist.DeserializedManifestList dec := json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestList); err != nil { t.Fatalf("error decoding fetched manifest list: %v", err) } _, fetchedCanonical, err := fetchedManifestList.Payload() if err != nil { t.Fatalf("error getting manifest list payload: %v", err) } if !bytes.Equal(fetchedCanonical, canonical) { t.Fatalf("manifest lists do not match") } // --------------- // Fetch by digest req, err = http.NewRequest("GET", manifestDigestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("Accept", manifestlist.MediaTypeManifestList) resp, err = http.DefaultClient.Do(req) checkErr(t, err, "fetching manifest list by digest") defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest list", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedManifestListByDigest manifestlist.DeserializedManifestList dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedManifestListByDigest); err != nil { t.Fatalf("error decoding fetched manifest: %v", err) } _, fetchedCanonical, err = fetchedManifestListByDigest.Payload() if err != nil { t.Fatalf("error getting manifest list payload: %v", err) } if !bytes.Equal(fetchedCanonical, canonical) { t.Fatalf("manifests do not match") } // Get by name with etag, gives 304 etag := resp.Header.Get("Etag") req, err = http.NewRequest("GET", manifestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by name with etag", resp, http.StatusNotModified) // Get by digest with etag, gives 304 req, err = http.NewRequest("GET", manifestDigestURL, nil) if err != nil { t.Fatalf("Error constructing request: %s", err) } req.Header.Set("If-None-Match", etag) resp, err = http.DefaultClient.Do(req) if err != nil { t.Fatalf("Error constructing request: %s", err) } checkResponse(t, "fetching manifest by dgst with etag", resp, http.StatusNotModified) // ------------------ // Fetch as a schema1 manifest resp, err = http.Get(manifestURL) if err != nil { t.Fatalf("unexpected error fetching manifest list as schema1: %v", err) } defer resp.Body.Close() checkResponse(t, "fetching uploaded manifest list as schema1", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, "ETag": []string{fmt.Sprintf(`"%s"`, dgst)}, }) var fetchedSchema1Manifest schema1.SignedManifest dec = json.NewDecoder(resp.Body) if err := dec.Decode(&fetchedSchema1Manifest); err != nil { t.Fatalf("error decoding fetched schema1 manifest: %v", err) } if fetchedSchema1Manifest.Manifest.SchemaVersion != 1 { t.Fatal("wrong schema version") } if fetchedSchema1Manifest.Architecture != "amd64" { t.Fatal("wrong architecture") } if fetchedSchema1Manifest.Name != imageName.Name() { t.Fatal("wrong image name") } if fetchedSchema1Manifest.Tag != tag { t.Fatal("wrong tag") } if len(fetchedSchema1Manifest.FSLayers) != 2 { t.Fatal("wrong number of FSLayers") } layers := args.manifest.(*schema2.DeserializedManifest).Layers for i := range layers { if fetchedSchema1Manifest.FSLayers[i].BlobSum != layers[len(layers)-i-1].Digest { t.Fatalf("blob digest mismatch in schema1 manifest for layer %d", i) } } if len(fetchedSchema1Manifest.History) != 2 { t.Fatal("wrong number of History entries") } // Don't check V1Compatibility fields becuase we're using randomly-generated // layers. } func testManifestDelete(t *testing.T, env *testEnv, args manifestArgs) { imageName := args.imageName dgst := args.dgst manifest := args.manifest ref, _ := reference.WithDigest(imageName, dgst) manifestDigestURL, err := env.builder.BuildManifestURL(ref) // --------------- // Delete by digest resp, err := httpDelete(manifestDigestURL) checkErr(t, err, "deleting manifest by digest") checkResponse(t, "deleting manifest", resp, http.StatusAccepted) checkHeaders(t, resp, http.Header{ "Content-Length": []string{"0"}, }) // --------------- // Attempt to fetch deleted manifest resp, err = http.Get(manifestDigestURL) checkErr(t, err, "fetching deleted manifest by digest") defer resp.Body.Close() checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) // --------------- // Delete already deleted manifest by digest resp, err = httpDelete(manifestDigestURL) checkErr(t, err, "re-deleting manifest by digest") checkResponse(t, "re-deleting manifest", resp, http.StatusNotFound) // -------------------- // Re-upload manifest by digest resp = putManifest(t, "putting manifest", manifestDigestURL, args.mediaType, manifest) checkResponse(t, "putting manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) // --------------- // Attempt to fetch re-uploaded deleted digest resp, err = http.Get(manifestDigestURL) checkErr(t, err, "fetching re-uploaded manifest by digest") defer resp.Body.Close() checkResponse(t, "fetching re-uploaded manifest", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, }) // --------------- // Attempt to delete an unknown manifest unknownDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") unknownRef, _ := reference.WithDigest(imageName, unknownDigest) unknownManifestDigestURL, err := env.builder.BuildManifestURL(unknownRef) checkErr(t, err, "building unknown manifest url") resp, err = httpDelete(unknownManifestDigestURL) checkErr(t, err, "delting unknown manifest by digest") checkResponse(t, "fetching deleted manifest", resp, http.StatusNotFound) // -------------------- // Upload manifest by tag tag := "atag" tagRef, _ := reference.WithTag(imageName, tag) manifestTagURL, err := env.builder.BuildManifestURL(tagRef) resp = putManifest(t, "putting manifest by tag", manifestTagURL, args.mediaType, manifest) checkResponse(t, "putting manifest by tag", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) tagsURL, err := env.builder.BuildTagsURL(imageName) if err != nil { t.Fatalf("unexpected error building tags url: %v", err) } // Ensure that the tag is listed. resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() dec := json.NewDecoder(resp.Body) var tagsResponse tagsAPIResponse if err := dec.Decode(&tagsResponse); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } if len(tagsResponse.Tags) != 1 { t.Fatalf("expected some tags in response: %v", tagsResponse.Tags) } if tagsResponse.Tags[0] != tag { t.Fatalf("tag not as expected: %q != %q", tagsResponse.Tags[0], tag) } // --------------- // Delete by digest resp, err = httpDelete(manifestDigestURL) checkErr(t, err, "deleting manifest by digest") checkResponse(t, "deleting manifest with tag", resp, http.StatusAccepted) checkHeaders(t, resp, http.Header{ "Content-Length": []string{"0"}, }) // Ensure that the tag is not listed. resp, err = http.Get(tagsURL) if err != nil { t.Fatalf("unexpected error getting unknown tags: %v", err) } defer resp.Body.Close() dec = json.NewDecoder(resp.Body) if err := dec.Decode(&tagsResponse); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } if tagsResponse.Name != imageName.Name() { t.Fatalf("tags name should match image name: %v != %v", tagsResponse.Name, imageName) } if len(tagsResponse.Tags) != 0 { t.Fatalf("expected 0 tags in response: %v", tagsResponse.Tags) } } type testEnv struct { pk libtrust.PrivateKey ctx context.Context config configuration.Configuration app *App server *httptest.Server builder *v2.URLBuilder } func newTestEnvMirror(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, "delete": configuration.Parameters{"enabled": deleteEnabled}, }, Proxy: configuration.Proxy{ RemoteURL: "http://example.com", }, } return newTestEnvWithConfig(t, &config) } func newTestEnv(t *testing.T, deleteEnabled bool) *testEnv { config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, "delete": configuration.Parameters{"enabled": deleteEnabled}, }, } config.HTTP.Headers = headerConfig return newTestEnvWithConfig(t, &config) } func newTestEnvWithConfig(t *testing.T, config *configuration.Configuration) *testEnv { ctx := context.Background() app := NewApp(ctx, config) server := httptest.NewServer(handlers.CombinedLoggingHandler(os.Stderr, app)) builder, err := v2.NewURLBuilderFromString(server.URL + config.HTTP.Prefix) if err != nil { t.Fatalf("error creating url builder: %v", err) } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } return &testEnv{ pk: pk, ctx: ctx, config: *config, app: app, server: server, builder: builder, } } func putManifest(t *testing.T, msg, url, contentType string, v interface{}) *http.Response { var body []byte switch m := v.(type) { case *schema1.SignedManifest: _, pl, err := m.Payload() if err != nil { t.Fatalf("error getting payload: %v", err) } body = pl case *manifestlist.DeserializedManifestList: _, pl, err := m.Payload() if err != nil { t.Fatalf("error getting payload: %v", err) } body = pl default: var err error body, err = json.MarshalIndent(v, "", " ") if err != nil { t.Fatalf("unexpected error marshaling %v: %v", v, err) } } req, err := http.NewRequest("PUT", url, bytes.NewReader(body)) if err != nil { t.Fatalf("error creating request for %s: %v", msg, err) } if contentType != "" { req.Header.Set("Content-Type", contentType) } resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatalf("error doing put request while %s: %v", msg, err) } return resp } func startPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named) (location string, uuid string) { layerUploadURL, err := ub.BuildBlobUploadURL(name) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) } resp, err := http.Post(layerUploadURL, "", nil) if err != nil { t.Fatalf("unexpected error starting layer push: %v", err) } defer resp.Body.Close() checkResponse(t, fmt.Sprintf("pushing starting layer push %v", name.String()), resp, http.StatusAccepted) u, err := url.Parse(resp.Header.Get("Location")) if err != nil { t.Fatalf("error parsing location header: %v", err) } uuid = path.Base(u.Path) checkHeaders(t, resp, http.Header{ "Location": []string{"*"}, "Content-Length": []string{"0"}, "Docker-Upload-UUID": []string{uuid}, }) return resp.Header.Get("Location"), uuid } // doPushLayer pushes the layer content returning the url on success returning // the response. If you're only expecting a successful response, use pushLayer. func doPushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) (*http.Response, error) { u, err := url.Parse(uploadURLBase) if err != nil { t.Fatalf("unexpected error parsing pushLayer url: %v", err) } u.RawQuery = url.Values{ "_state": u.Query()["_state"], "digest": []string{dgst.String()}, }.Encode() uploadURL := u.String() // Just do a monolithic upload req, err := http.NewRequest("PUT", uploadURL, body) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } return http.DefaultClient.Do(req) } // pushLayer pushes the layer content returning the url on success. func pushLayer(t *testing.T, ub *v2.URLBuilder, name reference.Named, dgst digest.Digest, uploadURLBase string, body io.Reader) string { digester := digest.Canonical.New() resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } defer resp.Body.Close() checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) if err != nil { t.Fatalf("error generating sha256 digest of body") } sha256Dgst := digester.Digest() ref, _ := reference.WithDigest(name, sha256Dgst) expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } checkHeaders(t, resp, http.Header{ "Location": []string{expectedLayerURL}, "Content-Length": []string{"0"}, "Docker-Content-Digest": []string{sha256Dgst.String()}, }) return resp.Header.Get("Location") } func finishUpload(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, dgst digest.Digest) string { resp, err := doPushLayer(t, ub, name, dgst, uploadURLBase, nil) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } defer resp.Body.Close() checkResponse(t, "putting monolithic chunk", resp, http.StatusCreated) ref, _ := reference.WithDigest(name, dgst) expectedLayerURL, err := ub.BuildBlobURL(ref) if err != nil { t.Fatalf("error building expected layer url: %v", err) } checkHeaders(t, resp, http.Header{ "Location": []string{expectedLayerURL}, "Content-Length": []string{"0"}, "Docker-Content-Digest": []string{dgst.String()}, }) return resp.Header.Get("Location") } func doPushChunk(t *testing.T, uploadURLBase string, body io.Reader) (*http.Response, digest.Digest, error) { u, err := url.Parse(uploadURLBase) if err != nil { t.Fatalf("unexpected error parsing pushLayer url: %v", err) } u.RawQuery = url.Values{ "_state": u.Query()["_state"], }.Encode() uploadURL := u.String() digester := digest.Canonical.New() req, err := http.NewRequest("PATCH", uploadURL, io.TeeReader(body, digester.Hash())) if err != nil { t.Fatalf("unexpected error creating new request: %v", err) } req.Header.Set("Content-Type", "application/octet-stream") resp, err := http.DefaultClient.Do(req) return resp, digester.Digest(), err } func pushChunk(t *testing.T, ub *v2.URLBuilder, name reference.Named, uploadURLBase string, body io.Reader, length int64) (string, digest.Digest) { resp, dgst, err := doPushChunk(t, uploadURLBase, body) if err != nil { t.Fatalf("unexpected error doing push layer request: %v", err) } defer resp.Body.Close() checkResponse(t, "putting chunk", resp, http.StatusAccepted) if err != nil { t.Fatalf("error generating sha256 digest of body") } checkHeaders(t, resp, http.Header{ "Range": []string{fmt.Sprintf("0-%d", length-1)}, "Content-Length": []string{"0"}, }) return resp.Header.Get("Location"), dgst } func checkResponse(t *testing.T, msg string, resp *http.Response, expectedStatus int) { if resp.StatusCode != expectedStatus { t.Logf("unexpected status %s: %v != %v", msg, resp.StatusCode, expectedStatus) maybeDumpResponse(t, resp) t.FailNow() } // We expect the headers included in the configuration, unless the // status code is 405 (Method Not Allowed), which means the handler // doesn't even get called. if resp.StatusCode != 405 && !reflect.DeepEqual(resp.Header["X-Content-Type-Options"], []string{"nosniff"}) { t.Logf("missing or incorrect header X-Content-Type-Options %s", msg) maybeDumpResponse(t, resp) t.FailNow() } } // checkBodyHasErrorCodes ensures the body is an error body and has the // expected error codes, returning the error structure, the json slice and a // count of the errors by code. func checkBodyHasErrorCodes(t *testing.T, msg string, resp *http.Response, errorCodes ...errcode.ErrorCode) (errcode.Errors, []byte, map[errcode.ErrorCode]int) { p, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("unexpected error reading body %s: %v", msg, err) } var errs errcode.Errors if err := json.Unmarshal(p, &errs); err != nil { t.Fatalf("unexpected error decoding error response: %v", err) } if len(errs) == 0 { t.Fatalf("expected errors in response") } // TODO(stevvooe): Shoot. The error setup is not working out. The content- // type headers are being set after writing the status code. // if resp.Header.Get("Content-Type") != "application/json; charset=utf-8" { // t.Fatalf("unexpected content type: %v != 'application/json'", // resp.Header.Get("Content-Type")) // } expected := map[errcode.ErrorCode]struct{}{} counts := map[errcode.ErrorCode]int{} // Initialize map with zeros for expected for _, code := range errorCodes { expected[code] = struct{}{} counts[code] = 0 } for _, e := range errs { err, ok := e.(errcode.ErrorCoder) if !ok { t.Fatalf("not an ErrorCoder: %#v", e) } if _, ok := expected[err.ErrorCode()]; !ok { t.Fatalf("unexpected error code %v encountered during %s: %s ", err.ErrorCode(), msg, string(p)) } counts[err.ErrorCode()]++ } // Ensure that counts of expected errors were all non-zero for code := range expected { if counts[code] == 0 { t.Fatalf("expected error code %v not encounterd during %s: %s", code, msg, string(p)) } } return errs, p, counts } func maybeDumpResponse(t *testing.T, resp *http.Response) { if d, err := httputil.DumpResponse(resp, true); err != nil { t.Logf("error dumping response: %v", err) } else { t.Logf("response:\n%s", string(d)) } } // matchHeaders checks that the response has at least the headers. If not, the // test will fail. If a passed in header value is "*", any non-zero value will // suffice as a match. func checkHeaders(t *testing.T, resp *http.Response, headers http.Header) { for k, vs := range headers { if resp.Header.Get(k) == "" { t.Fatalf("response missing header %q", k) } for _, v := range vs { if v == "*" { // Just ensure there is some value. if len(resp.Header[http.CanonicalHeaderKey(k)]) > 0 { continue } } for _, hv := range resp.Header[http.CanonicalHeaderKey(k)] { if hv != v { t.Fatalf("%+v %v header value not matched in response: %q != %q", resp.Header, k, hv, v) } } } } } func checkErr(t *testing.T, err error, msg string) { if err != nil { t.Fatalf("unexpected error %s: %v", msg, err) } } func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { imageNameRef, err := reference.ParseNamed(imageName) if err != nil { t.Fatalf("unable to parse reference: %v", err) } unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, }, History: []schema1.History{ { V1Compatibility: "", }, }, } // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageNameRef) pushLayer(t, env.builder, imageNameRef, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } dgst := digest.FromBytes(signedManifest.Canonical) // Create this repository by tag to ensure the tag mapping is made in the registry tagRef, _ := reference.WithTag(imageNameRef, tag) manifestDigestURL, err := env.builder.BuildManifestURL(tagRef) checkErr(t, err, "building manifest url") digestRef, _ := reference.WithDigest(imageNameRef, dgst) location, err := env.builder.BuildManifestURL(digestRef) checkErr(t, err, "building location URL") resp := putManifest(t, "putting signed manifest", manifestDigestURL, "", signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{location}, "Docker-Content-Digest": []string{dgst.String()}, }) return dgst } // Test mutation operations on a registry configured as a cache. Ensure that they return // appropriate errors. func TestRegistryAsCacheMutationAPIs(t *testing.T) { deleteEnabled := true env := newTestEnvMirror(t, deleteEnabled) imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" tagRef, _ := reference.WithTag(imageName, tag) manifestURL, err := env.builder.BuildManifestURL(tagRef) if err != nil { t.Fatalf("unexpected error building base url: %v", err) } // Manifest upload m := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName.Name(), Tag: tag, FSLayers: []schema1.FSLayer{}, History: []schema1.History{}, } sm, err := schema1.Sign(m, env.pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } resp := putManifest(t, "putting unsigned manifest", manifestURL, "", sm) checkResponse(t, "putting signed manifest to cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Manifest Delete resp, err = httpDelete(manifestURL) checkResponse(t, "deleting signed manifest from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob upload initialization layerUploadURL, err := env.builder.BuildBlobUploadURL(imageName) if err != nil { t.Fatalf("unexpected error building layer upload url: %v", err) } resp, err = http.Post(layerUploadURL, "", nil) if err != nil { t.Fatalf("unexpected error starting layer push: %v", err) } defer resp.Body.Close() checkResponse(t, fmt.Sprintf("starting layer push to cache %v", imageName), resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) // Blob Delete ref, _ := reference.WithDigest(imageName, digest.DigestSha256EmptyTar) blobURL, err := env.builder.BuildBlobURL(ref) resp, err = httpDelete(blobURL) checkResponse(t, "deleting blob from cache", resp, errcode.ErrorCodeUnsupported.Descriptor().HTTPStatusCode) } // TestCheckContextNotifier makes sure the API endpoints get a ResponseWriter // that implements http.ContextNotifier. func TestCheckContextNotifier(t *testing.T) { env := newTestEnv(t, false) // Register a new endpoint for testing env.app.router.Handle("/unittest/{name}/", env.app.dispatcher(func(ctx *Context, r *http.Request) http.Handler { return handlers.MethodHandler{ "GET": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if _, ok := w.(http.CloseNotifier); !ok { t.Fatal("could not cast ResponseWriter to CloseNotifier") } w.WriteHeader(200) }), } })) resp, err := http.Get(env.server.URL + "/unittest/reponame/") if err != nil { t.Fatalf("unexpected error issuing request: %v", err) } defer resp.Body.Close() if resp.StatusCode != 200 { t.Fatalf("wrong status code - expected 200, got %d", resp.StatusCode) } } func TestProxyManifestGetByTag(t *testing.T) { truthConfig := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, } truthConfig.HTTP.Headers = headerConfig imageName, _ := reference.ParseNamed("foo/bar") tag := "latest" truthEnv := newTestEnvWithConfig(t, &truthConfig) // create a repository in the truth registry dgst := createRepository(truthEnv, t, imageName.Name(), tag) proxyConfig := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, Proxy: configuration.Proxy{ RemoteURL: truthEnv.server.URL, }, } proxyConfig.HTTP.Headers = headerConfig proxyEnv := newTestEnvWithConfig(t, &proxyConfig) digestRef, _ := reference.WithDigest(imageName, dgst) manifestDigestURL, err := proxyEnv.builder.BuildManifestURL(digestRef) checkErr(t, err, "building manifest url") resp, err := http.Get(manifestDigestURL) checkErr(t, err, "fetching manifest from proxy by digest") defer resp.Body.Close() tagRef, _ := reference.WithTag(imageName, tag) manifestTagURL, err := proxyEnv.builder.BuildManifestURL(tagRef) checkErr(t, err, "building manifest url") resp, err = http.Get(manifestTagURL) checkErr(t, err, "fetching manifest from proxy by tag") defer resp.Body.Close() checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{dgst.String()}, }) // Create another manifest in the remote with the same image/tag pair newDigest := createRepository(truthEnv, t, imageName.Name(), tag) if dgst == newDigest { t.Fatalf("non-random test data") } // fetch it with the same proxy URL as before. Ensure the updated content is at the same tag resp, err = http.Get(manifestTagURL) checkErr(t, err, "fetching manifest from proxy by tag") defer resp.Body.Close() checkResponse(t, "fetching manifest from proxy by tag", resp, http.StatusOK) checkHeaders(t, resp, http.Header{ "Docker-Content-Digest": []string{newDigest.String()}, }) } distribution-2.3.0/registry/handlers/app.go000066400000000000000000001005721265472114500210260ustar00rootroot00000000000000package handlers import ( cryptorand "crypto/rand" "expvar" "fmt" "math/rand" "net" "net/http" "net/url" "os" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/configuration" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/health/checks" "github.com/docker/distribution/notifications" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" registrymiddleware "github.com/docker/distribution/registry/middleware/registry" repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" "github.com/docker/distribution/registry/proxy" "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" rediscache "github.com/docker/distribution/registry/storage/cache/redis" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/factory" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" "github.com/docker/libtrust" "github.com/garyburd/redigo/redis" "github.com/gorilla/mux" "golang.org/x/net/context" ) // randomSecretSize is the number of random bytes to generate if no secret // was specified. const randomSecretSize = 32 // defaultCheckInterval is the default time in between health checks const defaultCheckInterval = 10 * time.Second // App is a global registry application object. Shared resources can be placed // on this object that will be accessible from all requests. Any writable // fields should be protected. type App struct { context.Context Config *configuration.Configuration router *mux.Router // main application router, configured with dispatchers driver storagedriver.StorageDriver // driver maintains the app global storage driver instance. registry distribution.Namespace // registry is the primary registry backend for the app instance. accessController auth.AccessController // main access controller for application // httpHost is a parsed representation of the http.host parameter from // the configuration. Only the Scheme and Host fields are used. httpHost url.URL // events contains notification related configuration. events struct { sink notifications.Sink source notifications.SourceRecord } redis *redis.Pool // trustKey is a deprecated key used to sign manifests converted to // schema1 for backward compatibility. It should not be used for any // other purposes. trustKey libtrust.PrivateKey // isCache is true if this registry is configured as a pull through cache isCache bool // readOnly is true if the registry is in a read-only maintenance mode readOnly bool } // NewApp takes a configuration and returns a configured app, ready to serve // requests. The app only implements ServeHTTP and can be wrapped in other // handlers accordingly. func NewApp(ctx context.Context, configuration *configuration.Configuration) *App { app := &App{ Config: configuration, Context: ctx, router: v2.RouterWithPrefix(configuration.HTTP.Prefix), isCache: configuration.Proxy.RemoteURL != "", } // Register the handler dispatchers. app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler { return http.HandlerFunc(apiBase) }) app.register(v2.RouteNameManifest, imageManifestDispatcher) app.register(v2.RouteNameCatalog, catalogDispatcher) app.register(v2.RouteNameTags, tagsDispatcher) app.register(v2.RouteNameBlob, blobDispatcher) app.register(v2.RouteNameBlobUpload, blobUploadDispatcher) app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher) var err error app.driver, err = factory.Create(configuration.Storage.Type(), configuration.Storage.Parameters()) if err != nil { // TODO(stevvooe): Move the creation of a service into a protected // method, where this is created lazily. Its status can be queried via // a health check. panic(err) } purgeConfig := uploadPurgeDefaultConfig() if mc, ok := configuration.Storage["maintenance"]; ok { if v, ok := mc["uploadpurging"]; ok { purgeConfig, ok = v.(map[interface{}]interface{}) if !ok { panic("uploadpurging config key must contain additional keys") } } if v, ok := mc["readonly"]; ok { readOnly, ok := v.(map[interface{}]interface{}) if !ok { panic("readonly config key must contain additional keys") } if readOnlyEnabled, ok := readOnly["enabled"]; ok { app.readOnly, ok = readOnlyEnabled.(bool) if !ok { panic("readonly's enabled config key must have a boolean value") } } } } startUploadPurger(app, app.driver, ctxu.GetLogger(app), purgeConfig) app.driver, err = applyStorageMiddleware(app.driver, configuration.Middleware["storage"]) if err != nil { panic(err) } app.configureSecret(configuration) app.configureEvents(configuration) app.configureRedis(configuration) app.configureLogHook(configuration) // Generate an ephemeral key to be used for signing converted manifests // for clients that don't support schema2. app.trustKey, err = libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } if configuration.HTTP.Host != "" { u, err := url.Parse(configuration.HTTP.Host) if err != nil { panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err)) } app.httpHost = *u } options := []storage.RegistryOption{} if app.isCache { options = append(options, storage.DisableDigestResumption) } // configure deletion if d, ok := configuration.Storage["delete"]; ok { e, ok := d["enabled"] if ok { if deleteEnabled, ok := e.(bool); ok && deleteEnabled { options = append(options, storage.EnableDelete) } } } // configure redirects var redirectDisabled bool if redirectConfig, ok := configuration.Storage["redirect"]; ok { v := redirectConfig["disable"] switch v := v.(type) { case bool: redirectDisabled = v default: panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig)) } } if redirectDisabled { ctxu.GetLogger(app).Infof("backend redirection disabled") } else { options = append(options, storage.EnableRedirect) } // configure storage caches if cc, ok := configuration.Storage["cache"]; ok { v, ok := cc["blobdescriptor"] if !ok { // Backwards compatible: "layerinfo" == "blobdescriptor" v = cc["layerinfo"] } switch v { case "redis": if app.redis == nil { panic("redis configuration required to use for layerinfo cache") } cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis) localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) if err != nil { panic("could not create registry: " + err.Error()) } ctxu.GetLogger(app).Infof("using redis blob descriptor cache") case "inmemory": cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider() localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider)) app.registry, err = storage.NewRegistry(app, app.driver, localOptions...) if err != nil { panic("could not create registry: " + err.Error()) } ctxu.GetLogger(app).Infof("using inmemory blob descriptor cache") default: if v != "" { ctxu.GetLogger(app).Warnf("unknown cache type %q, caching disabled", configuration.Storage["cache"]) } } } if app.registry == nil { // configure the registry if no cache section is available. app.registry, err = storage.NewRegistry(app.Context, app.driver, options...) if err != nil { panic("could not create registry: " + err.Error()) } } app.registry, err = applyRegistryMiddleware(app.Context, app.registry, configuration.Middleware["registry"]) if err != nil { panic(err) } authType := configuration.Auth.Type() if authType != "" { accessController, err := auth.GetAccessController(configuration.Auth.Type(), configuration.Auth.Parameters()) if err != nil { panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err)) } app.accessController = accessController ctxu.GetLogger(app).Debugf("configured %q access controller", authType) } // configure as a pull through cache if configuration.Proxy.RemoteURL != "" { app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, configuration.Proxy) if err != nil { panic(err.Error()) } app.isCache = true ctxu.GetLogger(app).Info("Registry configured as a proxy cache to ", configuration.Proxy.RemoteURL) } return app } // RegisterHealthChecks is an awful hack to defer health check registration // control to callers. This should only ever be called once per registry // process, typically in a main function. The correct way would be register // health checks outside of app, since multiple apps may exist in the same // process. Because the configuration and app are tightly coupled, // implementing this properly will require a refactor. This method may panic // if called twice in the same process. func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) { if len(healthRegistries) > 1 { panic("RegisterHealthChecks called with more than one registry") } healthRegistry := health.DefaultRegistry if len(healthRegistries) == 1 { healthRegistry = healthRegistries[0] } if app.Config.Health.StorageDriver.Enabled { interval := app.Config.Health.StorageDriver.Interval if interval == 0 { interval = defaultCheckInterval } storageDriverCheck := func() error { _, err := app.driver.List(app, "/") // "/" should always exist return err // any error will be treated as failure } if app.Config.Health.StorageDriver.Threshold != 0 { healthRegistry.RegisterPeriodicThresholdFunc("storagedriver_"+app.Config.Storage.Type(), interval, app.Config.Health.StorageDriver.Threshold, storageDriverCheck) } else { healthRegistry.RegisterPeriodicFunc("storagedriver_"+app.Config.Storage.Type(), interval, storageDriverCheck) } } for _, fileChecker := range app.Config.Health.FileCheckers { interval := fileChecker.Interval if interval == 0 { interval = defaultCheckInterval } ctxu.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second) healthRegistry.Register(fileChecker.File, health.PeriodicChecker(checks.FileChecker(fileChecker.File), interval)) } for _, httpChecker := range app.Config.Health.HTTPCheckers { interval := httpChecker.Interval if interval == 0 { interval = defaultCheckInterval } statusCode := httpChecker.StatusCode if statusCode == 0 { statusCode = 200 } checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers) if httpChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold) healthRegistry.Register(httpChecker.URI, health.PeriodicThresholdChecker(checker, interval, httpChecker.Threshold)) } else { ctxu.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d", httpChecker.URI, interval/time.Second) healthRegistry.Register(httpChecker.URI, health.PeriodicChecker(checker, interval)) } } for _, tcpChecker := range app.Config.Health.TCPCheckers { interval := tcpChecker.Interval if interval == 0 { interval = defaultCheckInterval } checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout) if tcpChecker.Threshold != 0 { ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold) healthRegistry.Register(tcpChecker.Addr, health.PeriodicThresholdChecker(checker, interval, tcpChecker.Threshold)) } else { ctxu.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d", tcpChecker.Addr, interval/time.Second) healthRegistry.Register(tcpChecker.Addr, health.PeriodicChecker(checker, interval)) } } } // register a handler with the application, by route name. The handler will be // passed through the application filters and context will be constructed at // request time. func (app *App) register(routeName string, dispatch dispatchFunc) { // TODO(stevvooe): This odd dispatcher/route registration is by-product of // some limitations in the gorilla/mux router. We are using it to keep // routing consistent between the client and server, but we may want to // replace it with manual routing and structure-based dispatch for better // control over the request execution. app.router.GetRoute(routeName).Handler(app.dispatcher(dispatch)) } // configureEvents prepares the event sink for action. func (app *App) configureEvents(configuration *configuration.Configuration) { // Configure all of the endpoint sinks. var sinks []notifications.Sink for _, endpoint := range configuration.Notifications.Endpoints { if endpoint.Disabled { ctxu.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name) continue } ctxu.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers) endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{ Timeout: endpoint.Timeout, Threshold: endpoint.Threshold, Backoff: endpoint.Backoff, Headers: endpoint.Headers, }) sinks = append(sinks, endpoint) } // NOTE(stevvooe): Moving to a new queueing implementation is as easy as // replacing broadcaster with a rabbitmq implementation. It's recommended // that the registry instances also act as the workers to keep deployment // simple. app.events.sink = notifications.NewBroadcaster(sinks...) // Populate registry event source hostname, err := os.Hostname() if err != nil { hostname = configuration.HTTP.Addr } else { // try to pick the port off the config _, port, err := net.SplitHostPort(configuration.HTTP.Addr) if err == nil { hostname = net.JoinHostPort(hostname, port) } } app.events.source = notifications.SourceRecord{ Addr: hostname, InstanceID: ctxu.GetStringValue(app, "instance.id"), } } func (app *App) configureRedis(configuration *configuration.Configuration) { if configuration.Redis.Addr == "" { ctxu.GetLogger(app).Infof("redis not configured") return } pool := &redis.Pool{ Dial: func() (redis.Conn, error) { // TODO(stevvooe): Yet another use case for contextual timing. ctx := context.WithValue(app, "redis.connect.startedat", time.Now()) done := func(err error) { logger := ctxu.GetLoggerWithField(ctx, "redis.connect.duration", ctxu.Since(ctx, "redis.connect.startedat")) if err != nil { logger.Errorf("redis: error connecting: %v", err) } else { logger.Infof("redis: connect %v", configuration.Redis.Addr) } } conn, err := redis.DialTimeout("tcp", configuration.Redis.Addr, configuration.Redis.DialTimeout, configuration.Redis.ReadTimeout, configuration.Redis.WriteTimeout) if err != nil { ctxu.GetLogger(app).Errorf("error connecting to redis instance %s: %v", configuration.Redis.Addr, err) done(err) return nil, err } // authorize the connection if configuration.Redis.Password != "" { if _, err = conn.Do("AUTH", configuration.Redis.Password); err != nil { defer conn.Close() done(err) return nil, err } } // select the database to use if configuration.Redis.DB != 0 { if _, err = conn.Do("SELECT", configuration.Redis.DB); err != nil { defer conn.Close() done(err) return nil, err } } done(nil) return conn, nil }, MaxIdle: configuration.Redis.Pool.MaxIdle, MaxActive: configuration.Redis.Pool.MaxActive, IdleTimeout: configuration.Redis.Pool.IdleTimeout, TestOnBorrow: func(c redis.Conn, t time.Time) error { // TODO(stevvooe): We can probably do something more interesting // here with the health package. _, err := c.Do("PING") return err }, Wait: false, // if a connection is not avialable, proceed without cache. } app.redis = pool // setup expvar registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} { return map[string]interface{}{ "Config": configuration.Redis, "Active": app.redis.ActiveCount(), } })) } // configureLogHook prepares logging hook parameters. func (app *App) configureLogHook(configuration *configuration.Configuration) { entry, ok := ctxu.GetLogger(app).(*log.Entry) if !ok { // somehow, we are not using logrus return } logger := entry.Logger for _, configHook := range configuration.Log.Hooks { if !configHook.Disabled { switch configHook.Type { case "mail": hook := &logHook{} hook.LevelsParam = configHook.Levels hook.Mail = &mailer{ Addr: configHook.MailOptions.SMTP.Addr, Username: configHook.MailOptions.SMTP.Username, Password: configHook.MailOptions.SMTP.Password, Insecure: configHook.MailOptions.SMTP.Insecure, From: configHook.MailOptions.From, To: configHook.MailOptions.To, } logger.Hooks.Add(hook) default: } } } } // configureSecret creates a random secret if a secret wasn't included in the // configuration. func (app *App) configureSecret(configuration *configuration.Configuration) { if configuration.HTTP.Secret == "" { var secretBytes [randomSecretSize]byte if _, err := cryptorand.Read(secretBytes[:]); err != nil { panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err)) } configuration.HTTP.Secret = string(secretBytes[:]) ctxu.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.") } } func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() // ensure that request body is always closed. // Instantiate an http context here so we can track the error codes // returned by the request router. ctx := defaultContextManager.context(app, w, r) defer func() { status, ok := ctx.Value("http.response.status").(int) if ok && status >= 200 && status <= 399 { ctxu.GetResponseLogger(ctx).Infof("response completed") } }() defer defaultContextManager.release(ctx) // NOTE(stevvooe): Total hack to get instrumented responsewriter from context. var err error w, err = ctxu.GetResponseWriter(ctx) if err != nil { ctxu.GetLogger(ctx).Warnf("response writer not found in context") } // Set a header with the Docker Distribution API Version for all responses. w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") app.router.ServeHTTP(w, r) } // dispatchFunc takes a context and request and returns a constructed handler // for the route. The dispatcher will use this to dynamically create request // specific handlers for each endpoint without creating a new router for each // request. type dispatchFunc func(ctx *Context, r *http.Request) http.Handler // TODO(stevvooe): dispatchers should probably have some validation error // chain with proper error reporting. // dispatcher returns a handler that constructs a request specific context and // handler, using the dispatch factory function. func (app *App) dispatcher(dispatch dispatchFunc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { for headerName, headerValues := range app.Config.HTTP.Headers { for _, value := range headerValues { w.Header().Add(headerName, value) } } context := app.context(w, r) if err := app.authorized(w, r, context); err != nil { ctxu.GetLogger(context).Warnf("error authorizing context: %v", err) return } // Add username to request logging context.Context = ctxu.WithLogger(context.Context, ctxu.GetLogger(context.Context, "auth.user.name")) if app.nameRequired(r) { nameRef, err := reference.ParseNamed(getName(context)) if err != nil { ctxu.GetLogger(context).Errorf("error parsing reference from context: %v", err) context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{ Name: getName(context), Reason: err, }) if err := errcode.ServeJSON(w, context.Errors); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } repository, err := app.registry.Repository(context, nameRef) if err != nil { ctxu.GetLogger(context).Errorf("error resolving repository: %v", err) switch err := err.(type) { case distribution.ErrRepositoryUnknown: context.Errors = append(context.Errors, v2.ErrorCodeNameUnknown.WithDetail(err)) case distribution.ErrRepositoryNameInvalid: context.Errors = append(context.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) } if err := errcode.ServeJSON(w, context.Errors); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } // assign and decorate the authorized repository with an event bridge. context.Repository = notifications.Listen( repository, app.eventBridge(context, r)) context.Repository, err = applyRepoMiddleware(context.Context, context.Repository, app.Config.Middleware["repository"]) if err != nil { ctxu.GetLogger(context).Errorf("error initializing repository middleware: %v", err) context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) if err := errcode.ServeJSON(w, context.Errors); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return } } dispatch(context, r).ServeHTTP(w, r) // Automated error response handling here. Handlers may return their // own errors if they need different behavior (such as range errors // for layer upload). if context.Errors.Len() > 0 { if err := errcode.ServeJSON(w, context.Errors); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } app.logError(context, context.Errors) } }) } func (app *App) logError(context context.Context, errors errcode.Errors) { for _, e1 := range errors { var c ctxu.Context switch e1.(type) { case errcode.Error: e, _ := e1.(errcode.Error) c = ctxu.WithValue(context, "err.code", e.Code) c = ctxu.WithValue(c, "err.message", e.Code.Message()) c = ctxu.WithValue(c, "err.detail", e.Detail) case errcode.ErrorCode: e, _ := e1.(errcode.ErrorCode) c = ctxu.WithValue(context, "err.code", e) c = ctxu.WithValue(c, "err.message", e.Message()) default: // just normal go 'error' c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown) c = ctxu.WithValue(c, "err.message", e1.Error()) } c = ctxu.WithLogger(c, ctxu.GetLogger(c, "err.code", "err.message", "err.detail")) ctxu.GetResponseLogger(c).Errorf("response completed with error") } } // context constructs the context object for the application. This only be // called once per request. func (app *App) context(w http.ResponseWriter, r *http.Request) *Context { ctx := defaultContextManager.context(app, w, r) ctx = ctxu.WithVars(ctx, r) ctx = ctxu.WithLogger(ctx, ctxu.GetLogger(ctx, "vars.name", "vars.reference", "vars.digest", "vars.uuid")) context := &Context{ App: app, Context: ctx, } if app.httpHost.Scheme != "" && app.httpHost.Host != "" { // A "host" item in the configuration takes precedence over // X-Forwarded-Proto and X-Forwarded-Host headers, and the // hostname in the request. context.urlBuilder = v2.NewURLBuilder(&app.httpHost) } else { context.urlBuilder = v2.NewURLBuilderFromRequest(r) } return context } // authorized checks if the request can proceed with access to the requested // repository. If it succeeds, the context may access the requested // repository. An error will be returned if access is not available. func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error { ctxu.GetLogger(context).Debug("authorizing request") repo := getName(context) if app.accessController == nil { return nil // access controller is not enabled. } var accessRecords []auth.Access if repo != "" { accessRecords = appendAccessRecords(accessRecords, r.Method, repo) if fromRepo := r.FormValue("from"); fromRepo != "" { // mounting a blob from one repository to another requires pull (GET) // access to the source repository. accessRecords = appendAccessRecords(accessRecords, "GET", fromRepo) } } else { // Only allow the name not to be set on the base route. if app.nameRequired(r) { // For this to be properly secured, repo must always be set for a // resource that may make a modification. The only condition under // which name is not set and we still allow access is when the // base route is accessed. This section prevents us from making // that mistake elsewhere in the code, allowing any operation to // proceed. if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } return fmt.Errorf("forbidden: no repository name") } accessRecords = appendCatalogAccessRecord(accessRecords, r) } ctx, err := app.accessController.Authorized(context.Context, accessRecords...) if err != nil { switch err := err.(type) { case auth.Challenge: // Add the appropriate WWW-Auth header err.SetHeaders(w) if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil { ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors) } default: // This condition is a potential security problem either in // the configuration or whatever is backing the access // controller. Just return a bad request with no information // to avoid exposure. The request should not proceed. ctxu.GetLogger(context).Errorf("error checking authorization: %v", err) w.WriteHeader(http.StatusBadRequest) } return err } // TODO(stevvooe): This pattern needs to be cleaned up a bit. One context // should be replaced by another, rather than replacing the context on a // mutable object. context.Context = ctx return nil } // eventBridge returns a bridge for the current request, configured with the // correct actor and source. func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener { actor := notifications.ActorRecord{ Name: getUserName(ctx, r), } request := notifications.NewRequestRecord(ctxu.GetRequestID(ctx), r) return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink) } // nameRequired returns true if the route requires a name. func (app *App) nameRequired(r *http.Request) bool { route := mux.CurrentRoute(r) routeName := route.GetName() return route == nil || (routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog) } // apiBase implements a simple yes-man for doing overall checks against the // api. This can support auth roundtrips to support docker login. func apiBase(w http.ResponseWriter, r *http.Request) { const emptyJSON = "{}" // Provide a simple /v2/ 200 OK response with empty json response. w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON))) fmt.Fprint(w, emptyJSON) } // appendAccessRecords checks the method and adds the appropriate Access records to the records list. func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access { resource := auth.Resource{ Type: "repository", Name: repo, } switch method { case "GET", "HEAD": records = append(records, auth.Access{ Resource: resource, Action: "pull", }) case "POST", "PUT", "PATCH": records = append(records, auth.Access{ Resource: resource, Action: "pull", }, auth.Access{ Resource: resource, Action: "push", }) case "DELETE": // DELETE access requires full admin rights, which is represented // as "*". This may not be ideal. records = append(records, auth.Access{ Resource: resource, Action: "*", }) } return records } // Add the access record for the catalog if it's our current route func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access { route := mux.CurrentRoute(r) routeName := route.GetName() if routeName == v2.RouteNameCatalog { resource := auth.Resource{ Type: "registry", Name: "catalog", } accessRecords = append(accessRecords, auth.Access{ Resource: resource, Action: "*", }) } return accessRecords } // applyRegistryMiddleware wraps a registry instance with the configured middlewares func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, middlewares []configuration.Middleware) (distribution.Namespace, error) { for _, mw := range middlewares { rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry) if err != nil { return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err) } registry = rmw } return registry, nil } // applyRepoMiddleware wraps a repository with the configured middlewares func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) { for _, mw := range middlewares { rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository) if err != nil { return nil, err } repository = rmw } return repository, nil } // applyStorageMiddleware wraps a storage driver with the configured middlewares func applyStorageMiddleware(driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) { for _, mw := range middlewares { smw, err := storagemiddleware.Get(mw.Name, mw.Options, driver) if err != nil { return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err) } driver = smw } return driver, nil } // uploadPurgeDefaultConfig provides a default configuration for upload // purging to be used in the absence of configuration in the // confifuration file func uploadPurgeDefaultConfig() map[interface{}]interface{} { config := map[interface{}]interface{}{} config["enabled"] = true config["age"] = "168h" config["interval"] = "24h" config["dryrun"] = false return config } func badPurgeUploadConfig(reason string) { panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason)) } // startUploadPurger schedules a goroutine which will periodically // check upload directories for old files and delete them func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log ctxu.Logger, config map[interface{}]interface{}) { if config["enabled"] == false { return } var purgeAgeDuration time.Duration var err error purgeAge, ok := config["age"] if ok { ageStr, ok := purgeAge.(string) if !ok { badPurgeUploadConfig("age is not a string") } purgeAgeDuration, err = time.ParseDuration(ageStr) if err != nil { badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error())) } } else { badPurgeUploadConfig("age missing") } var intervalDuration time.Duration interval, ok := config["interval"] if ok { intervalStr, ok := interval.(string) if !ok { badPurgeUploadConfig("interval is not a string") } intervalDuration, err = time.ParseDuration(intervalStr) if err != nil { badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error())) } } else { badPurgeUploadConfig("interval missing") } var dryRunBool bool dryRun, ok := config["dryrun"] if ok { dryRunBool, ok = dryRun.(bool) if !ok { badPurgeUploadConfig("cannot parse dryrun") } } else { badPurgeUploadConfig("dryrun missing") } go func() { rand.Seed(time.Now().Unix()) jitter := time.Duration(rand.Int()%60) * time.Minute log.Infof("Starting upload purge in %s", jitter) time.Sleep(jitter) for { storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool) log.Infof("Starting upload purge in %s", intervalDuration) time.Sleep(intervalDuration) } }() } distribution-2.3.0/registry/handlers/app_test.go000066400000000000000000000172631265472114500220710ustar00rootroot00000000000000package handlers import ( "encoding/json" "net/http" "net/http/httptest" "net/url" "reflect" "testing" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/distribution/registry/storage" memorycache "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" ) // TestAppDispatcher builds an application with a test dispatcher and ensures // that requests are properly dispatched and the handlers are constructed. // This only tests the dispatch mechanism. The underlying dispatchers must be // tested individually. func TestAppDispatcher(t *testing.T) { driver := inmemory.New() ctx := context.Background() registry, err := storage.NewRegistry(ctx, driver, storage.BlobDescriptorCacheProvider(memorycache.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableDelete, storage.EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } app := &App{ Config: &configuration.Configuration{}, Context: ctx, router: v2.Router(), driver: driver, registry: registry, } server := httptest.NewServer(app) router := v2.Router() serverURL, err := url.Parse(server.URL) if err != nil { t.Fatalf("error parsing server url: %v", err) } varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc { return func(ctx *Context, r *http.Request) http.Handler { // Always checks the same name context if ctx.Repository.Name().Name() != getName(ctx) { t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar") } // Check that we have all that is expected for expectedK, expectedV := range expectedVars { if ctx.Value(expectedK) != expectedV { t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV) } } // Check that we only have variables that are expected for k, v := range ctx.Value("vars").(map[string]string) { _, ok := expectedVars[k] if !ok { // name is checked on context // We have an unexpected key, fail t.Fatalf("unexpected key %q in vars with value %q", k, v) } } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) } } // unflatten a list of variables, suitable for gorilla/mux, to a map[string]string unflatten := func(vars []string) map[string]string { m := make(map[string]string) for i := 0; i < len(vars)-1; i = i + 2 { m[vars[i]] = vars[i+1] } return m } for _, testcase := range []struct { endpoint string vars []string }{ { endpoint: v2.RouteNameManifest, vars: []string{ "name", "foo/bar", "reference", "sometag", }, }, { endpoint: v2.RouteNameTags, vars: []string{ "name", "foo/bar", }, }, { endpoint: v2.RouteNameBlobUpload, vars: []string{ "name", "foo/bar", }, }, { endpoint: v2.RouteNameBlobUploadChunk, vars: []string{ "name", "foo/bar", "uuid", "theuuid", }, }, } { app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars))) route := router.GetRoute(testcase.endpoint).Host(serverURL.Host) u, err := route.URL(testcase.vars...) if err != nil { t.Fatal(err) } resp, err := http.Get(u.String()) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusOK { t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK) } } } // TestNewApp covers the creation of an application via NewApp with a // configuration. func TestNewApp(t *testing.T) { ctx := context.Background() config := configuration.Configuration{ Storage: configuration.Storage{ "inmemory": nil, }, Auth: configuration.Auth{ // For now, we simply test that new auth results in a viable // application. "silly": { "realm": "realm-test", "service": "service-test", }, }, } // Mostly, with this test, given a sane configuration, we are simply // ensuring that NewApp doesn't panic. We might want to tweak this // behavior. app := NewApp(ctx, &config) server := httptest.NewServer(app) builder, err := v2.NewURLBuilderFromString(server.URL) if err != nil { t.Fatalf("error creating urlbuilder: %v", err) } baseURL, err := builder.BuildBaseURL() if err != nil { t.Fatalf("error creating baseURL: %v", err) } // TODO(stevvooe): The rest of this test might belong in the API tests. // Just hit the app and make sure we get a 401 Unauthorized error. req, err := http.Get(baseURL) if err != nil { t.Fatalf("unexpected error during GET: %v", err) } defer req.Body.Close() if req.StatusCode != http.StatusUnauthorized { t.Fatalf("unexpected status code during request: %v", err) } if req.Header.Get("Content-Type") != "application/json; charset=utf-8" { t.Fatalf("unexpected content-type: %v != %v", req.Header.Get("Content-Type"), "application/json; charset=utf-8") } expectedAuthHeader := "Bearer realm=\"realm-test\",service=\"service-test\"" if e, a := expectedAuthHeader, req.Header.Get("WWW-Authenticate"); e != a { t.Fatalf("unexpected WWW-Authenticate header: %q != %q", e, a) } var errs errcode.Errors dec := json.NewDecoder(req.Body) if err := dec.Decode(&errs); err != nil { t.Fatalf("error decoding error response: %v", err) } err2, ok := errs[0].(errcode.ErrorCoder) if !ok { t.Fatalf("not an ErrorCoder: %#v", errs[0]) } if err2.ErrorCode() != errcode.ErrorCodeUnauthorized { t.Fatalf("unexpected error code: %v != %v", err2.ErrorCode(), errcode.ErrorCodeUnauthorized) } } // Test the access record accumulator func TestAppendAccessRecords(t *testing.T) { repo := "testRepo" expectedResource := auth.Resource{ Type: "repository", Name: repo, } expectedPullRecord := auth.Access{ Resource: expectedResource, Action: "pull", } expectedPushRecord := auth.Access{ Resource: expectedResource, Action: "push", } expectedAllRecord := auth.Access{ Resource: expectedResource, Action: "*", } records := []auth.Access{} result := appendAccessRecords(records, "GET", repo) expectedResult := []auth.Access{expectedPullRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } records = []auth.Access{} result = appendAccessRecords(records, "HEAD", repo) expectedResult = []auth.Access{expectedPullRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } records = []auth.Access{} result = appendAccessRecords(records, "POST", repo) expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } records = []auth.Access{} result = appendAccessRecords(records, "PUT", repo) expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } records = []auth.Access{} result = appendAccessRecords(records, "PATCH", repo) expectedResult = []auth.Access{expectedPullRecord, expectedPushRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } records = []auth.Access{} result = appendAccessRecords(records, "DELETE", repo) expectedResult = []auth.Access{expectedAllRecord} if ok := reflect.DeepEqual(result, expectedResult); !ok { t.Fatalf("Actual access record differs from expected") } } distribution-2.3.0/registry/handlers/basicauth.go000066400000000000000000000002321265472114500222010ustar00rootroot00000000000000// +build go1.4 package handlers import ( "net/http" ) func basicAuth(r *http.Request) (username, password string, ok bool) { return r.BasicAuth() } distribution-2.3.0/registry/handlers/basicauth_prego14.go000066400000000000000000000020121265472114500235400ustar00rootroot00000000000000// +build !go1.4 package handlers import ( "encoding/base64" "net/http" "strings" ) // NOTE(stevvooe): This is basic auth support from go1.4 present to ensure we // can compile on go1.3 and earlier. // BasicAuth returns the username and password provided in the request's // Authorization header, if the request uses HTTP Basic Authentication. // See RFC 2617, Section 2. func basicAuth(r *http.Request) (username, password string, ok bool) { auth := r.Header.Get("Authorization") if auth == "" { return } return parseBasicAuth(auth) } // parseBasicAuth parses an HTTP Basic Authentication string. // "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). func parseBasicAuth(auth string) (username, password string, ok bool) { if !strings.HasPrefix(auth, "Basic ") { return } c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) if err != nil { return } cs := string(c) s := strings.IndexByte(cs, ':') if s < 0 { return } return cs[:s], cs[s+1:], true } distribution-2.3.0/registry/handlers/blob.go000066400000000000000000000052171265472114500211640ustar00rootroot00000000000000package handlers import ( "net/http" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) // blobDispatcher uses the request context to build a blobHandler. func blobDispatcher(ctx *Context, r *http.Request) http.Handler { dgst, err := getDigest(ctx) if err != nil { if err == errDigestNotAvailable { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx.Errors = append(ctx.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) }) } blobHandler := &blobHandler{ Context: ctx, Digest: dgst, } mhandler := handlers.MethodHandler{ "GET": http.HandlerFunc(blobHandler.GetBlob), "HEAD": http.HandlerFunc(blobHandler.GetBlob), } if !ctx.readOnly { mhandler["DELETE"] = http.HandlerFunc(blobHandler.DeleteBlob) } return mhandler } // blobHandler serves http blob requests. type blobHandler struct { *Context Digest digest.Digest } // GetBlob fetches the binary data from backend storage returns it in the // response. func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) { context.GetLogger(bh).Debug("GetBlob") blobs := bh.Repository.Blobs(bh) desc, err := blobs.Stat(bh, bh.Digest) if err != nil { if err == distribution.ErrBlobUnknown { bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown.WithDetail(bh.Digest)) } else { bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil { context.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err) bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } // DeleteBlob deletes a layer blob func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) { context.GetLogger(bh).Debug("DeleteBlob") blobs := bh.Repository.Blobs(bh) err := blobs.Delete(bh, bh.Digest) if err != nil { switch err { case distribution.ErrUnsupported: bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported) return case distribution.ErrBlobUnknown: bh.Errors = append(bh.Errors, v2.ErrorCodeBlobUnknown) return default: bh.Errors = append(bh.Errors, err) context.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error()) return } } w.Header().Set("Content-Length", "0") w.WriteHeader(http.StatusAccepted) } distribution-2.3.0/registry/handlers/blobupload.go000066400000000000000000000310351265472114500223660ustar00rootroot00000000000000package handlers import ( "fmt" "net/http" "net/url" "os" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/storage" "github.com/gorilla/handlers" ) // blobUploadDispatcher constructs and returns the blob upload handler for the // given request context. func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler { buh := &blobUploadHandler{ Context: ctx, UUID: getUploadUUID(ctx), } handler := handlers.MethodHandler{ "GET": http.HandlerFunc(buh.GetUploadStatus), "HEAD": http.HandlerFunc(buh.GetUploadStatus), } if !ctx.readOnly { handler["POST"] = http.HandlerFunc(buh.StartBlobUpload) handler["PATCH"] = http.HandlerFunc(buh.PatchBlobData) handler["PUT"] = http.HandlerFunc(buh.PutBlobUploadComplete) handler["DELETE"] = http.HandlerFunc(buh.CancelBlobUpload) } if buh.UUID != "" { state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state")) if err != nil { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("error resolving upload: %v", err) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } buh.State = state if state.Name != ctx.Repository.Name().Name() { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Name()) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } if state.UUID != buh.UUID { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID) buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) }) } blobs := ctx.Repository.Blobs(buh) upload, err := blobs.Resume(buh, buh.UUID) if err != nil { ctxu.GetLogger(ctx).Errorf("error resolving upload: %v", err) if err == distribution.ErrBlobUploadUnknown { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown.WithDetail(err)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) }) } buh.Upload = upload if state.Offset > 0 { // Seek the blob upload to the correct spot if it's non-zero. // These error conditions should be rare and demonstrate really // problems. We basically cancel the upload and tell the client to // start over. if nn, err := upload.Seek(buh.State.Offset, os.SEEK_SET); err != nil { defer upload.Close() ctxu.GetLogger(ctx).Infof("error seeking blob upload: %v", err) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) }) } else if nn != buh.State.Offset { defer upload.Close() ctxu.GetLogger(ctx).Infof("seek to wrong offest: %d != %d", nn, buh.State.Offset) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) upload.Cancel(buh) }) } } return closeResources(handler, buh.Upload) } return handler } // blobUploadHandler handles the http blob upload process. type blobUploadHandler struct { *Context // UUID identifies the upload instance for the current request. Using UUID // to key blob writers since this implementation uses UUIDs. UUID string Upload distribution.BlobWriter State blobUploadState } // StartBlobUpload begins the blob upload process and allocates a server-side // blob writer session, optionally mounting the blob from a separate repository. func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) { var options []distribution.BlobCreateOption fromRepo := r.FormValue("from") mountDigest := r.FormValue("mount") if mountDigest != "" && fromRepo != "" { opt, err := buh.createBlobMountOption(fromRepo, mountDigest) if opt != nil && err == nil { options = append(options, opt) } } blobs := buh.Repository.Blobs(buh) upload, err := blobs.Create(buh, options...) if err != nil { if ebm, ok := err.(distribution.ErrBlobMounted); ok { if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } } else if err == distribution.ErrUnsupported { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) } else { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } buh.Upload = upload defer buh.Upload.Close() if err := buh.blobUploadResponse(w, r, true); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } w.Header().Set("Docker-Upload-UUID", buh.Upload.ID()) w.WriteHeader(http.StatusAccepted) } // GetUploadStatus returns the status of a given upload, identified by id. func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } // TODO(dmcgowan): Set last argument to false in blobUploadResponse when // resumable upload is supported. This will enable returning a non-zero // range for clients to begin uploading at an offset. if err := buh.blobUploadResponse(w, r, true); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } w.Header().Set("Docker-Upload-UUID", buh.UUID) w.WriteHeader(http.StatusNoContent) } // PatchBlobData writes data to an upload. func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } ct := r.Header.Get("Content-Type") if ct != "" && ct != "application/octet-stream" { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("Bad Content-Type"))) // TODO(dmcgowan): encode error return } // TODO(dmcgowan): support Content-Range header to seek and write range if err := copyFullPayload(w, r, buh.Upload, buh, "blob PATCH", &buh.Errors); err != nil { // copyFullPayload reports the error if necessary return } if err := buh.blobUploadResponse(w, r, false); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } w.WriteHeader(http.StatusAccepted) } // PutBlobUploadComplete takes the final request of a blob upload. The // request may include all the blob data or no blob data. Any data // provided is received and verified. If successful, the blob is linked // into the blob store and 201 Created is returned with the canonical // url of the blob. func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters! if dgstStr == "" { // no digest? return error, but allow retry. buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest missing")) return } dgst, err := digest.ParseDigest(dgstStr) if err != nil { // no digest? return error, but allow retry. buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail("digest parsing failed")) return } if err := copyFullPayload(w, r, buh.Upload, buh, "blob PUT", &buh.Errors); err != nil { // copyFullPayload reports the error if necessary return } desc, err := buh.Upload.Commit(buh, distribution.Descriptor{ Digest: dgst, // TODO(stevvooe): This isn't wildly important yet, but we should // really set the length and mediatype. For now, we can let the // backend take care of this. }) if err != nil { switch err := err.(type) { case distribution.ErrBlobInvalidDigest: buh.Errors = append(buh.Errors, v2.ErrorCodeDigestInvalid.WithDetail(err)) default: switch err { case distribution.ErrUnsupported: buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported) case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported: buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadInvalid.WithDetail(err)) default: ctxu.GetLogger(buh).Errorf("unknown error completing upload: %#v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } } // Clean up the backend blob data if there was an error. if err := buh.Upload.Cancel(buh); err != nil { // If the cleanup fails, all we can do is observe and report. ctxu.GetLogger(buh).Errorf("error canceling upload after error: %v", err) } return } if err := buh.writeBlobCreatedHeaders(w, desc); err != nil { buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } // CancelBlobUpload cancels an in-progress upload of a blob. func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) { if buh.Upload == nil { buh.Errors = append(buh.Errors, v2.ErrorCodeBlobUploadUnknown) return } w.Header().Set("Docker-Upload-UUID", buh.UUID) if err := buh.Upload.Cancel(buh); err != nil { ctxu.GetLogger(buh).Errorf("error encountered canceling upload: %v", err) buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } w.WriteHeader(http.StatusNoContent) } // blobUploadResponse provides a standard request for uploading blobs and // chunk responses. This sets the correct headers but the response status is // left to the caller. The fresh argument is used to ensure that new blob // uploads always start at a 0 offset. This allows disabling resumable push by // always returning a 0 offset on check status. func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request, fresh bool) error { var offset int64 if !fresh { var err error offset, err = buh.Upload.Seek(0, os.SEEK_CUR) if err != nil { ctxu.GetLogger(buh).Errorf("unable get current offset of blob upload: %v", err) return err } } // TODO(stevvooe): Need a better way to manage the upload state automatically. buh.State.Name = buh.Repository.Name().Name() buh.State.UUID = buh.Upload.ID() buh.State.Offset = offset buh.State.StartedAt = buh.Upload.StartedAt() token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State) if err != nil { ctxu.GetLogger(buh).Infof("error building upload state token: %s", err) return err } uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL( buh.Repository.Name(), buh.Upload.ID(), url.Values{ "_state": []string{token}, }) if err != nil { ctxu.GetLogger(buh).Infof("error building upload url: %s", err) return err } endRange := offset if endRange > 0 { endRange = endRange - 1 } w.Header().Set("Docker-Upload-UUID", buh.UUID) w.Header().Set("Location", uploadURL) w.Header().Set("Content-Length", "0") w.Header().Set("Range", fmt.Sprintf("0-%d", endRange)) return nil } // mountBlob attempts to mount a blob from another repository by its digest. If // successful, the blob is linked into the blob store and 201 Created is // returned with the canonical url of the blob. func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) { dgst, err := digest.ParseDigest(mountDigest) if err != nil { return nil, err } ref, err := reference.ParseNamed(fromRepo) if err != nil { return nil, err } canonical, err := reference.WithDigest(ref, dgst) if err != nil { return nil, err } return storage.WithMountFrom(canonical), nil } // writeBlobCreatedHeaders writes the standard headers describing a newly // created blob. A 201 Created is written as well as the canonical URL and // blob digest. func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc distribution.Descriptor) error { ref, err := reference.WithDigest(buh.Repository.Name(), desc.Digest) if err != nil { return err } blobURL, err := buh.urlBuilder.BuildBlobURL(ref) if err != nil { return err } w.Header().Set("Location", blobURL) w.Header().Set("Content-Length", "0") w.Header().Set("Docker-Content-Digest", desc.Digest.String()) w.WriteHeader(http.StatusCreated) return nil } distribution-2.3.0/registry/handlers/catalog.go000066400000000000000000000042061265472114500216550ustar00rootroot00000000000000package handlers import ( "encoding/json" "fmt" "io" "net/http" "net/url" "strconv" "github.com/docker/distribution/registry/api/errcode" "github.com/gorilla/handlers" ) const maximumReturnedEntries = 100 func catalogDispatcher(ctx *Context, r *http.Request) http.Handler { catalogHandler := &catalogHandler{ Context: ctx, } return handlers.MethodHandler{ "GET": http.HandlerFunc(catalogHandler.GetCatalog), } } type catalogHandler struct { *Context } type catalogAPIResponse struct { Repositories []string `json:"repositories"` } func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) { var moreEntries = true q := r.URL.Query() lastEntry := q.Get("last") maxEntries, err := strconv.Atoi(q.Get("n")) if err != nil || maxEntries < 0 { maxEntries = maximumReturnedEntries } repos := make([]string, maxEntries) filled, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry) if err == io.EOF { moreEntries = false } else if err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } w.Header().Set("Content-Type", "application/json; charset=utf-8") // Add a link header if there are more entries to retrieve if moreEntries { lastEntry = repos[len(repos)-1] urlStr, err := createLinkEntry(r.URL.String(), maxEntries, lastEntry) if err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } w.Header().Set("Link", urlStr) } enc := json.NewEncoder(w) if err := enc.Encode(catalogAPIResponse{ Repositories: repos[0:filled], }); err != nil { ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } // Use the original URL from the request to create a new URL for // the link header func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) { calledURL, err := url.Parse(origURL) if err != nil { return "", err } v := url.Values{} v.Add("n", strconv.Itoa(maxEntries)) v.Add("last", lastEntry) calledURL.RawQuery = v.Encode() calledURL.Fragment = "" urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String()) return urlStr, nil } distribution-2.3.0/registry/handlers/context.go000066400000000000000000000104341265472114500217270ustar00rootroot00000000000000package handlers import ( "fmt" "net/http" "sync" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "golang.org/x/net/context" ) // Context should contain the request specific context for use in across // handlers. Resources that don't need to be shared across handlers should not // be on this object. type Context struct { // App points to the application structure that created this context. *App context.Context // Repository is the repository for the current request. All requests // should be scoped to a single repository. This field may be nil. Repository distribution.Repository // Errors is a collection of errors encountered during the request to be // returned to the client API. If errors are added to the collection, the // handler *must not* start the response via http.ResponseWriter. Errors errcode.Errors urlBuilder *v2.URLBuilder // TODO(stevvooe): The goal is too completely factor this context and // dispatching out of the web application. Ideally, we should lean on // context.Context for injection of these resources. } // Value overrides context.Context.Value to ensure that calls are routed to // correct context. func (ctx *Context) Value(key interface{}) interface{} { return ctx.Context.Value(key) } func getName(ctx context.Context) (name string) { return ctxu.GetStringValue(ctx, "vars.name") } func getReference(ctx context.Context) (reference string) { return ctxu.GetStringValue(ctx, "vars.reference") } var errDigestNotAvailable = fmt.Errorf("digest not available in context") func getDigest(ctx context.Context) (dgst digest.Digest, err error) { dgstStr := ctxu.GetStringValue(ctx, "vars.digest") if dgstStr == "" { ctxu.GetLogger(ctx).Errorf("digest not available") return "", errDigestNotAvailable } d, err := digest.ParseDigest(dgstStr) if err != nil { ctxu.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err) return "", err } return d, nil } func getUploadUUID(ctx context.Context) (uuid string) { return ctxu.GetStringValue(ctx, "vars.uuid") } // getUserName attempts to resolve a username from the context and request. If // a username cannot be resolved, the empty string is returned. func getUserName(ctx context.Context, r *http.Request) string { username := ctxu.GetStringValue(ctx, "auth.user.name") // Fallback to request user with basic auth if username == "" { var ok bool uname, _, ok := basicAuth(r) if ok { username = uname } } return username } // contextManager allows us to associate net/context.Context instances with a // request, based on the memory identity of http.Request. This prepares http- // level context, which is not application specific. If this is called, // (*contextManager).release must be called on the context when the request is // completed. // // Providing this circumvents a lot of necessity for dispatchers with the // benefit of instantiating the request context much earlier. // // TODO(stevvooe): Consider making this facility a part of the context package. type contextManager struct { contexts map[*http.Request]context.Context mu sync.Mutex } // defaultContextManager is just a global instance to register request contexts. var defaultContextManager = newContextManager() func newContextManager() *contextManager { return &contextManager{ contexts: make(map[*http.Request]context.Context), } } // context either returns a new context or looks it up in the manager. func (cm *contextManager) context(parent context.Context, w http.ResponseWriter, r *http.Request) context.Context { cm.mu.Lock() defer cm.mu.Unlock() ctx, ok := cm.contexts[r] if ok { return ctx } if parent == nil { parent = ctxu.Background() } ctx = ctxu.WithRequest(parent, r) ctx, w = ctxu.WithResponseWriter(ctx, w) ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) cm.contexts[r] = ctx return ctx } // releases frees any associated with resources from request. func (cm *contextManager) release(ctx context.Context) { cm.mu.Lock() defer cm.mu.Unlock() r, err := ctxu.GetRequest(ctx) if err != nil { ctxu.GetLogger(ctx).Errorf("no request found in context during release") return } delete(cm.contexts, r) } distribution-2.3.0/registry/handlers/health_test.go000066400000000000000000000104471265472114500225530ustar00rootroot00000000000000package handlers import ( "io/ioutil" "net" "net/http" "net/http/httptest" "os" "testing" "time" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/health" ) func TestFileHealthCheck(t *testing.T) { interval := time.Second tmpfile, err := ioutil.TempFile(os.TempDir(), "healthcheck") if err != nil { t.Fatalf("could not create temporary file: %v", err) } defer tmpfile.Close() config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, Health: configuration.Health{ FileCheckers: []configuration.FileChecker{ { Interval: interval, File: tmpfile.Name(), }, }, }, } ctx := context.Background() app := NewApp(ctx, config) healthRegistry := health.NewRegistry() app.RegisterHealthChecks(healthRegistry) // Wait for health check to happen <-time.After(2 * interval) status := healthRegistry.CheckStatus() if len(status) != 1 { t.Fatal("expected 1 item in health check results") } if status[tmpfile.Name()] != "file exists" { t.Fatal(`did not get "file exists" result for health check`) } os.Remove(tmpfile.Name()) <-time.After(2 * interval) if len(healthRegistry.CheckStatus()) != 0 { t.Fatal("expected 0 items in health check results") } } func TestTCPHealthCheck(t *testing.T) { interval := time.Second ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("could not create listener: %v", err) } addrStr := ln.Addr().String() // Start accepting go func() { for { conn, err := ln.Accept() if err != nil { // listener was closed return } defer conn.Close() } }() config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, Health: configuration.Health{ TCPCheckers: []configuration.TCPChecker{ { Interval: interval, Addr: addrStr, Timeout: 500 * time.Millisecond, }, }, }, } ctx := context.Background() app := NewApp(ctx, config) healthRegistry := health.NewRegistry() app.RegisterHealthChecks(healthRegistry) // Wait for health check to happen <-time.After(2 * interval) if len(healthRegistry.CheckStatus()) != 0 { t.Fatal("expected 0 items in health check results") } ln.Close() <-time.After(2 * interval) // Health check should now fail status := healthRegistry.CheckStatus() if len(status) != 1 { t.Fatal("expected 1 item in health check results") } if status[addrStr] != "connection to "+addrStr+" failed" { t.Fatal(`did not get "connection failed" result for health check`) } } func TestHTTPHealthCheck(t *testing.T) { interval := time.Second threshold := 3 stopFailing := make(chan struct{}) checkedServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "HEAD" { t.Fatalf("expected HEAD request, got %s", r.Method) } select { case <-stopFailing: w.WriteHeader(http.StatusOK) default: w.WriteHeader(http.StatusInternalServerError) } })) config := &configuration.Configuration{ Storage: configuration.Storage{ "inmemory": configuration.Parameters{}, }, Health: configuration.Health{ HTTPCheckers: []configuration.HTTPChecker{ { Interval: interval, URI: checkedServer.URL, Threshold: threshold, }, }, }, } ctx := context.Background() app := NewApp(ctx, config) healthRegistry := health.NewRegistry() app.RegisterHealthChecks(healthRegistry) for i := 0; ; i++ { <-time.After(interval) status := healthRegistry.CheckStatus() if i < threshold-1 { // definitely shouldn't have hit the threshold yet if len(status) != 0 { t.Fatal("expected 1 item in health check results") } continue } if i < threshold+1 { // right on the threshold - don't expect a failure yet continue } if len(status) != 1 { t.Fatal("expected 1 item in health check results") } if status[checkedServer.URL] != "downstream service returned unexpected status: 500" { t.Fatal("did not get expected result for health check") } break } // Signal HTTP handler to start returning 200 close(stopFailing) <-time.After(2 * interval) if len(healthRegistry.CheckStatus()) != 0 { t.Fatal("expected 0 items in health check results") } } distribution-2.3.0/registry/handlers/helpers.go000066400000000000000000000041701265472114500217050ustar00rootroot00000000000000package handlers import ( "errors" "io" "net/http" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" ) // closeResources closes all the provided resources after running the target // handler. func closeResources(handler http.Handler, closers ...io.Closer) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { for _, closer := range closers { defer closer.Close() } handler.ServeHTTP(w, r) }) } // copyFullPayload copies the payload of a HTTP request to destWriter. If it // receives less content than expected, and the client disconnected during the // upload, it avoids sending a 400 error to keep the logs cleaner. func copyFullPayload(responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, context ctxu.Context, action string, errSlice *errcode.Errors) error { // Get a channel that tells us if the client disconnects var clientClosed <-chan bool if notifier, ok := responseWriter.(http.CloseNotifier); ok { clientClosed = notifier.CloseNotify() } else { ctxu.GetLogger(context).Warnf("the ResponseWriter does not implement CloseNotifier (type: %T)", responseWriter) } // Read in the data, if any. copied, err := io.Copy(destWriter, r.Body) if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) { // Didn't recieve as much content as expected. Did the client // disconnect during the request? If so, avoid returning a 400 // error to keep the logs cleaner. select { case <-clientClosed: // Set the response code to "499 Client Closed Request" // Even though the connection has already been closed, // this causes the logger to pick up a 499 error // instead of showing 0 for the HTTP status. responseWriter.WriteHeader(499) ctxu.GetLogger(context).Error("client disconnected during " + action) return errors.New("client disconnected") default: } } if err != nil { ctxu.GetLogger(context).Errorf("unknown error reading request payload: %v", err) *errSlice = append(*errSlice, errcode.ErrorCodeUnknown.WithDetail(err)) return err } return nil } distribution-2.3.0/registry/handlers/hmac.go000066400000000000000000000033561265472114500211600ustar00rootroot00000000000000package handlers import ( "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "time" ) // blobUploadState captures the state serializable state of the blob upload. type blobUploadState struct { // name is the primary repository under which the blob will be linked. Name string // UUID identifies the upload. UUID string // offset contains the current progress of the upload. Offset int64 // StartedAt is the original start time of the upload. StartedAt time.Time } type hmacKey string // unpackUploadState unpacks and validates the blob upload state from the // token, using the hmacKey secret. func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) { var state blobUploadState tokenBytes, err := base64.URLEncoding.DecodeString(token) if err != nil { return state, err } mac := hmac.New(sha256.New, []byte(secret)) if len(tokenBytes) < mac.Size() { return state, fmt.Errorf("Invalid token") } macBytes := tokenBytes[:mac.Size()] messageBytes := tokenBytes[mac.Size():] mac.Write(messageBytes) if !hmac.Equal(mac.Sum(nil), macBytes) { return state, fmt.Errorf("Invalid token") } if err := json.Unmarshal(messageBytes, &state); err != nil { return state, err } return state, nil } // packUploadState packs the upload state signed with and hmac digest using // the hmacKey secret, encoding to url safe base64. The resulting token can be // used to share data with minimized risk of external tampering. func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) { mac := hmac.New(sha256.New, []byte(secret)) p, err := json.Marshal(lus) if err != nil { return "", err } mac.Write(p) return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil } distribution-2.3.0/registry/handlers/hmac_test.go000066400000000000000000000054671265472114500222240ustar00rootroot00000000000000package handlers import "testing" var blobUploadStates = []blobUploadState{ { Name: "hello", UUID: "abcd-1234-qwer-0987", Offset: 0, }, { Name: "hello-world", UUID: "abcd-1234-qwer-0987", Offset: 0, }, { Name: "h3ll0_w0rld", UUID: "abcd-1234-qwer-0987", Offset: 1337, }, { Name: "ABCDEFG", UUID: "ABCD-1234-QWER-0987", Offset: 1234567890, }, { Name: "this-is-A-sort-of-Long-name-for-Testing", UUID: "dead-1234-beef-0987", Offset: 8675309, }, } var secrets = []string{ "supersecret", "12345", "a", "SuperSecret", "Sup3r... S3cr3t!", "This is a reasonably long secret key that is used for the purpose of testing.", "\u2603+\u2744", // snowman+snowflake } // TestLayerUploadTokens constructs stateTokens from LayerUploadStates and // validates that the tokens can be used to reconstruct the proper upload state. func TestLayerUploadTokens(t *testing.T) { secret := hmacKey("supersecret") for _, testcase := range blobUploadStates { token, err := secret.packUploadState(testcase) if err != nil { t.Fatal(err) } lus, err := secret.unpackUploadState(token) if err != nil { t.Fatal(err) } assertBlobUploadStateEquals(t, testcase, lus) } } // TestHMACValidate ensures that any HMAC token providers are compatible if and // only if they share the same secret. func TestHMACValidation(t *testing.T) { for _, secret := range secrets { secret1 := hmacKey(secret) secret2 := hmacKey(secret) badSecret := hmacKey("DifferentSecret") for _, testcase := range blobUploadStates { token, err := secret1.packUploadState(testcase) if err != nil { t.Fatal(err) } lus, err := secret2.unpackUploadState(token) if err != nil { t.Fatal(err) } assertBlobUploadStateEquals(t, testcase, lus) _, err = badSecret.unpackUploadState(token) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", token) } badToken, err := badSecret.packUploadState(lus) if err != nil { t.Fatal(err) } _, err = secret1.unpackUploadState(badToken) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) } _, err = secret2.unpackUploadState(badToken) if err == nil { t.Fatalf("Expected token provider to fail at retrieving state from token: %s", badToken) } } } } func assertBlobUploadStateEquals(t *testing.T, expected blobUploadState, received blobUploadState) { if expected.Name != received.Name { t.Fatalf("Expected Name=%q, Received Name=%q", expected.Name, received.Name) } if expected.UUID != received.UUID { t.Fatalf("Expected UUID=%q, Received UUID=%q", expected.UUID, received.UUID) } if expected.Offset != received.Offset { t.Fatalf("Expected Offset=%d, Received Offset=%d", expected.Offset, received.Offset) } } distribution-2.3.0/registry/handlers/hooks.go000066400000000000000000000021111265472114500213570ustar00rootroot00000000000000package handlers import ( "bytes" "errors" "fmt" "strings" "text/template" "github.com/Sirupsen/logrus" ) // logHook is for hooking Panic in web application type logHook struct { LevelsParam []string Mail *mailer } // Fire forwards an error to LogHook func (hook *logHook) Fire(entry *logrus.Entry) error { addr := strings.Split(hook.Mail.Addr, ":") if len(addr) != 2 { return errors.New("Invalid Mail Address") } host := addr[0] subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message) html := ` {{.Message}} {{range $key, $value := .Data}} {{$key}}: {{$value}} {{end}} ` b := bytes.NewBuffer(make([]byte, 0)) t := template.Must(template.New("mail body").Parse(html)) if err := t.Execute(b, entry); err != nil { return err } body := fmt.Sprintf("%s", b) return hook.Mail.sendMail(subject, body) } // Levels contains hook levels to be catched func (hook *logHook) Levels() []logrus.Level { levels := []logrus.Level{} for _, v := range hook.LevelsParam { lv, _ := logrus.ParseLevel(v) levels = append(levels, lv) } return levels } distribution-2.3.0/registry/handlers/images.go000066400000000000000000000253241265472114500215140ustar00rootroot00000000000000package handlers import ( "bytes" "fmt" "net/http" "github.com/docker/distribution" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) // These constants determine which architecture and OS to choose from a // manifest list when downconverting it to a schema1 manifest. const ( defaultArch = "amd64" defaultOS = "linux" ) // imageManifestDispatcher takes the request context and builds the // appropriate handler for handling image manifest requests. func imageManifestDispatcher(ctx *Context, r *http.Request) http.Handler { imageManifestHandler := &imageManifestHandler{ Context: ctx, } reference := getReference(ctx) dgst, err := digest.ParseDigest(reference) if err != nil { // We just have a tag imageManifestHandler.Tag = reference } else { imageManifestHandler.Digest = dgst } mhandler := handlers.MethodHandler{ "GET": http.HandlerFunc(imageManifestHandler.GetImageManifest), "HEAD": http.HandlerFunc(imageManifestHandler.GetImageManifest), } if !ctx.readOnly { mhandler["PUT"] = http.HandlerFunc(imageManifestHandler.PutImageManifest) mhandler["DELETE"] = http.HandlerFunc(imageManifestHandler.DeleteImageManifest) } return mhandler } // imageManifestHandler handles http operations on image manifests. type imageManifestHandler struct { *Context // One of tag or digest gets set, depending on what is present in context. Tag string Digest digest.Digest } // GetImageManifest fetches the image manifest from the storage backend, if it exists. func (imh *imageManifestHandler) GetImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("GetImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { imh.Errors = append(imh.Errors, err) return } var manifest distribution.Manifest if imh.Tag != "" { tags := imh.Repository.Tags(imh) desc, err := tags.Get(imh, imh.Tag) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } imh.Digest = desc.Digest } if etagMatch(r, imh.Digest.String()) { w.WriteHeader(http.StatusNotModified) return } manifest, err = manifests.Get(imh, imh.Digest) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } supportsSchema2 := false supportsManifestList := false if acceptHeaders, ok := r.Header["Accept"]; ok { for _, mediaType := range acceptHeaders { if mediaType == schema2.MediaTypeManifest { supportsSchema2 = true } if mediaType == manifestlist.MediaTypeManifestList { supportsManifestList = true } } } schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest) manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList) // Only rewrite schema2 manifests when they are being fetched by tag. // If they are being fetched by digest, we can't return something not // matching the digest. if imh.Tag != "" && isSchema2 && !supportsSchema2 { // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest %s in schema1 format to support old client", imh.Digest.String()) manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { return } } else if imh.Tag != "" && isManifestList && !supportsManifestList { // Rewrite manifest in schema1 format ctxu.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String()) // Find the image manifest corresponding to the default // platform var manifestDigest digest.Digest for _, manifestDescriptor := range manifestList.Manifests { if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS { manifestDigest = manifestDescriptor.Digest break } } if manifestDigest == "" { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) return } manifest, err = manifests.Get(imh, manifestDigest) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown.WithDetail(err)) return } // If necessary, convert the image manifest if schema2Manifest, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supportsSchema2 { manifest, err = imh.convertSchema2Manifest(schema2Manifest) if err != nil { return } } } ct, p, err := manifest.Payload() if err != nil { return } w.Header().Set("Content-Type", ct) w.Header().Set("Content-Length", fmt.Sprint(len(p))) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest)) w.Write(p) } func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { targetDescriptor := schema2Manifest.Target() blobs := imh.Repository.Blobs(imh) configJSON, err := blobs.Get(imh, targetDescriptor.Digest) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } ref := imh.Repository.Name() if imh.Tag != "" { ref, err = reference.WithTag(imh.Repository.Name(), imh.Tag) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) return nil, err } } builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) for _, d := range schema2Manifest.References() { if err := builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } } manifest, err := builder.Build(imh) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } return manifest, nil } func etagMatch(r *http.Request, etag string) bool { for _, headerVal := range r.Header["If-None-Match"] { if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted return true } } return false } // PutImageManifest validates and stores an image in the registry. func (imh *imageManifestHandler) PutImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("PutImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { imh.Errors = append(imh.Errors, err) return } var jsonBuf bytes.Buffer if err := copyFullPayload(w, r, &jsonBuf, imh, "image manifest PUT", &imh.Errors); err != nil { // copyFullPayload reports the error if necessary return } mediaType := r.Header.Get("Content-Type") manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes()) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return } if imh.Digest != "" { if desc.Digest != imh.Digest { ctxu.GetLogger(imh).Errorf("payload digest does match: %q != %q", desc.Digest, imh.Digest) imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return } } else if imh.Tag != "" { imh.Digest = desc.Digest } else { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail("no tag or digest specified")) return } _, err = manifests.Put(imh, manifest) if err != nil { // TODO(stevvooe): These error handling switches really need to be // handled by an app global mapper. if err == distribution.ErrUnsupported { imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) return } switch err := err.(type) { case distribution.ErrManifestVerification: for _, verificationError := range err { switch verificationError := verificationError.(type) { case distribution.ErrManifestBlobUnknown: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest)) case distribution.ErrManifestNameInvalid: imh.Errors = append(imh.Errors, v2.ErrorCodeNameInvalid.WithDetail(err)) case distribution.ErrManifestUnverified: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnverified) default: if verificationError == digest.ErrDigestInvalidFormat { imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) } else { imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError) } } } default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } // Tag this manifest if imh.Tag != "" { tags := imh.Repository.Tags(imh) err = tags.Tag(imh, imh.Tag, desc) if err != nil { imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } // Construct a canonical url for the uploaded manifest. ref, err := reference.WithDigest(imh.Repository.Name(), imh.Digest) if err != nil { imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } location, err := imh.urlBuilder.BuildManifestURL(ref) if err != nil { // NOTE(stevvooe): Given the behavior above, this absurdly unlikely to // happen. We'll log the error here but proceed as if it worked. Worst // case, we set an empty location header. ctxu.GetLogger(imh).Errorf("error building manifest url from digest: %v", err) } w.Header().Set("Location", location) w.Header().Set("Docker-Content-Digest", imh.Digest.String()) w.WriteHeader(http.StatusCreated) } // DeleteImageManifest removes the manifest with the given digest from the registry. func (imh *imageManifestHandler) DeleteImageManifest(w http.ResponseWriter, r *http.Request) { ctxu.GetLogger(imh).Debug("DeleteImageManifest") manifests, err := imh.Repository.Manifests(imh) if err != nil { imh.Errors = append(imh.Errors, err) return } err = manifests.Delete(imh, imh.Digest) if err != nil { switch err { case digest.ErrDigestUnsupported: case digest.ErrDigestInvalidFormat: imh.Errors = append(imh.Errors, v2.ErrorCodeDigestInvalid) return case distribution.ErrBlobUnknown: imh.Errors = append(imh.Errors, v2.ErrorCodeManifestUnknown) return case distribution.ErrUnsupported: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported) return default: imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown) return } } tagService := imh.Repository.Tags(imh) referencedTags, err := tagService.Lookup(imh, distribution.Descriptor{Digest: imh.Digest}) if err != nil { imh.Errors = append(imh.Errors, err) return } for _, tag := range referencedTags { if err := tagService.Untag(imh, tag); err != nil { imh.Errors = append(imh.Errors, err) return } } w.WriteHeader(http.StatusAccepted) } distribution-2.3.0/registry/handlers/mail.go000066400000000000000000000016231265472114500211650ustar00rootroot00000000000000package handlers import ( "errors" "net/smtp" "strings" ) // mailer provides fields of email configuration for sending. type mailer struct { Addr, Username, Password, From string Insecure bool To []string } // sendMail allows users to send email, only if mail parameters is configured correctly. func (mail *mailer) sendMail(subject, message string) error { addr := strings.Split(mail.Addr, ":") if len(addr) != 2 { return errors.New("Invalid Mail Address") } host := addr[0] msg := []byte("To:" + strings.Join(mail.To, ";") + "\r\nFrom: " + mail.From + "\r\nSubject: " + subject + "\r\nContent-Type: text/plain\r\n\r\n" + message) auth := smtp.PlainAuth( "", mail.Username, mail.Password, host, ) err := smtp.SendMail( mail.Addr, auth, mail.From, mail.To, []byte(msg), ) if err != nil { return err } return nil } distribution-2.3.0/registry/handlers/tags.go000066400000000000000000000030011265472114500211710ustar00rootroot00000000000000package handlers import ( "encoding/json" "net/http" "github.com/docker/distribution" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/gorilla/handlers" ) // tagsDispatcher constructs the tags handler api endpoint. func tagsDispatcher(ctx *Context, r *http.Request) http.Handler { tagsHandler := &tagsHandler{ Context: ctx, } return handlers.MethodHandler{ "GET": http.HandlerFunc(tagsHandler.GetTags), } } // tagsHandler handles requests for lists of tags under a repository name. type tagsHandler struct { *Context } type tagsAPIResponse struct { Name string `json:"name"` Tags []string `json:"tags"` } // GetTags returns a json list of tags for a specific image name. func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() tagService := th.Repository.Tags(th) tags, err := tagService.All(th) if err != nil { switch err := err.(type) { case distribution.ErrRepositoryUnknown: th.Errors = append(th.Errors, v2.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Name().Name()})) default: th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) } return } w.Header().Set("Content-Type", "application/json; charset=utf-8") enc := json.NewEncoder(w) if err := enc.Encode(tagsAPIResponse{ Name: th.Repository.Name().Name(), Tags: tags, }); err != nil { th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err)) return } } distribution-2.3.0/registry/listener/000077500000000000000000000000001265472114500177375ustar00rootroot00000000000000distribution-2.3.0/registry/listener/listener.go000066400000000000000000000032501265472114500221130ustar00rootroot00000000000000package listener import ( "fmt" "net" "os" "time" ) // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted // connections. It's used by ListenAndServe and ListenAndServeTLS so // dead TCP connections (e.g. closing laptop mid-download) eventually // go away. // it is a plain copy-paste from net/http/server.go type tcpKeepAliveListener struct { *net.TCPListener } func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { tc, err := ln.AcceptTCP() if err != nil { return } tc.SetKeepAlive(true) tc.SetKeepAlivePeriod(3 * time.Minute) return tc, nil } // NewListener announces on laddr and net. Accepted values of the net are // 'unix' and 'tcp' func NewListener(net, laddr string) (net.Listener, error) { switch net { case "unix": return newUnixListener(laddr) case "tcp", "": // an empty net means tcp return newTCPListener(laddr) default: return nil, fmt.Errorf("unknown address type %s", net) } } func newUnixListener(laddr string) (net.Listener, error) { fi, err := os.Stat(laddr) if err == nil { // the file exists. // try to remove it if it's a socket if !isSocket(fi.Mode()) { return nil, fmt.Errorf("file %s exists and is not a socket", laddr) } if err := os.Remove(laddr); err != nil { return nil, err } } else if !os.IsNotExist(err) { // we can't do stat on the file. // it means we can not remove it return nil, err } return net.Listen("unix", laddr) } func isSocket(m os.FileMode) bool { return m&os.ModeSocket != 0 } func newTCPListener(laddr string) (net.Listener, error) { ln, err := net.Listen("tcp", laddr) if err != nil { return nil, err } return tcpKeepAliveListener{ln.(*net.TCPListener)}, nil } distribution-2.3.0/registry/middleware/000077500000000000000000000000001265472114500202275ustar00rootroot00000000000000distribution-2.3.0/registry/middleware/registry/000077500000000000000000000000001265472114500220775ustar00rootroot00000000000000distribution-2.3.0/registry/middleware/registry/middleware.go000066400000000000000000000023541265472114500245470ustar00rootroot00000000000000package middleware import ( "fmt" "github.com/docker/distribution" "github.com/docker/distribution/context" ) // InitFunc is the type of a RegistryMiddleware factory function and is // used to register the constructor for different RegistryMiddleware backends. type InitFunc func(ctx context.Context, registry distribution.Namespace, options map[string]interface{}) (distribution.Namespace, error) var middlewares map[string]InitFunc // Register is used to register an InitFunc for // a RegistryMiddleware backend with the given name. func Register(name string, initFunc InitFunc) error { if middlewares == nil { middlewares = make(map[string]InitFunc) } if _, exists := middlewares[name]; exists { return fmt.Errorf("name already registered: %s", name) } middlewares[name] = initFunc return nil } // Get constructs a RegistryMiddleware with the given options using the named backend. func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace) (distribution.Namespace, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(ctx, registry, options) } } return nil, fmt.Errorf("no registry middleware registered with name: %s", name) } distribution-2.3.0/registry/middleware/repository/000077500000000000000000000000001265472114500224465ustar00rootroot00000000000000distribution-2.3.0/registry/middleware/repository/middleware.go000066400000000000000000000024001265472114500251060ustar00rootroot00000000000000package middleware import ( "fmt" "github.com/docker/distribution" "github.com/docker/distribution/context" ) // InitFunc is the type of a RepositoryMiddleware factory function and is // used to register the constructor for different RepositoryMiddleware backends. type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error) var middlewares map[string]InitFunc // Register is used to register an InitFunc for // a RepositoryMiddleware backend with the given name. func Register(name string, initFunc InitFunc) error { if middlewares == nil { middlewares = make(map[string]InitFunc) } if _, exists := middlewares[name]; exists { return fmt.Errorf("name already registered: %s", name) } middlewares[name] = initFunc return nil } // Get constructs a RepositoryMiddleware with the given options using the named backend. func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) { if middlewares != nil { if initFunc, exists := middlewares[name]; exists { return initFunc(ctx, repository, options) } } return nil, fmt.Errorf("no repository middleware registered with name: %s", name) } distribution-2.3.0/registry/proxy/000077500000000000000000000000001265472114500172735ustar00rootroot00000000000000distribution-2.3.0/registry/proxy/proxyauth.go000066400000000000000000000020541265472114500216660ustar00rootroot00000000000000package proxy import ( "net/http" "net/url" "github.com/docker/distribution/registry/client/auth" ) const tokenURL = "https://auth.docker.io/token" type userpass struct { username string password string } type credentials struct { creds map[string]userpass } func (c credentials) Basic(u *url.URL) (string, string) { up := c.creds[u.String()] return up.username, up.password } // ConfigureAuth authorizes with the upstream registry func ConfigureAuth(remoteURL, username, password string, cm auth.ChallengeManager) (auth.CredentialStore, error) { if err := ping(cm, remoteURL+"/v2/", "Docker-Distribution-Api-Version"); err != nil { return nil, err } creds := map[string]userpass{ tokenURL: { username: username, password: password, }, } return credentials{creds: creds}, nil } func ping(manager auth.ChallengeManager, endpoint, versionHeader string) error { resp, err := http.Get(endpoint) if err != nil { return err } defer resp.Body.Close() if err := manager.AddResponse(resp); err != nil { return err } return nil } distribution-2.3.0/registry/proxy/proxyblobstore.go000066400000000000000000000120461265472114500227220ustar00rootroot00000000000000package proxy import ( "io" "net/http" "strconv" "sync" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) // todo(richardscothern): from cache control header or config file const blobTTL = time.Duration(24 * 7 * time.Hour) type proxyBlobStore struct { localStore distribution.BlobStore remoteStore distribution.BlobService scheduler *scheduler.TTLExpirationScheduler repositoryName reference.Named } var _ distribution.BlobStore = &proxyBlobStore{} // inflight tracks currently downloading blobs var inflight = make(map[digest.Digest]struct{}) // mu protects inflight var mu sync.Mutex func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) { w.Header().Set("Content-Length", strconv.FormatInt(length, 10)) w.Header().Set("Content-Type", mediaType) w.Header().Set("Docker-Content-Digest", digest.String()) w.Header().Set("Etag", digest.String()) } func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer) (distribution.Descriptor, error) { desc, err := pbs.remoteStore.Stat(ctx, dgst) if err != nil { return distribution.Descriptor{}, err } if w, ok := writer.(http.ResponseWriter); ok { setResponseHeaders(w, desc.Size, desc.MediaType, dgst) } remoteReader, err := pbs.remoteStore.Open(ctx, dgst) if err != nil { return distribution.Descriptor{}, err } _, err = io.CopyN(writer, remoteReader, desc.Size) if err != nil { return distribution.Descriptor{}, err } proxyMetrics.BlobPush(uint64(desc.Size)) return desc, nil } func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) { localDesc, err := pbs.localStore.Stat(ctx, dgst) if err != nil { // Stat can report a zero sized file here if it's checked between creation // and population. Return nil error, and continue return false, nil } if err == nil { proxyMetrics.BlobPush(uint64(localDesc.Size)) return true, pbs.localStore.ServeBlob(ctx, w, r, dgst) } return false, nil } func (pbs *proxyBlobStore) storeLocal(ctx context.Context, dgst digest.Digest) error { defer func() { mu.Lock() delete(inflight, dgst) mu.Unlock() }() var desc distribution.Descriptor var err error var bw distribution.BlobWriter bw, err = pbs.localStore.Create(ctx) if err != nil { return err } desc, err = pbs.copyContent(ctx, dgst, bw) if err != nil { return err } _, err = bw.Commit(ctx, desc) if err != nil { return err } return nil } func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { served, err := pbs.serveLocal(ctx, w, r, dgst) if err != nil { context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) return err } if served { return nil } mu.Lock() _, ok := inflight[dgst] if ok { mu.Unlock() _, err := pbs.copyContent(ctx, dgst, w) return err } inflight[dgst] = struct{}{} mu.Unlock() go func(dgst digest.Digest) { if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } blobRef, err := reference.WithDigest(pbs.repositoryName, dgst) if err != nil { context.GetLogger(ctx).Errorf("Error creating reference: %s", err) return } pbs.scheduler.AddBlob(blobRef, repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) if err != nil { return err } return nil } func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { desc, err := pbs.localStore.Stat(ctx, dgst) if err == nil { return desc, err } if err != distribution.ErrBlobUnknown { return distribution.Descriptor{}, err } return pbs.remoteStore.Stat(ctx, dgst) } // Unsupported functions func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { return nil, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { return distribution.Descriptor{}, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { return nil, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { return nil, distribution.ErrUnsupported } func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } distribution-2.3.0/registry/proxy/proxyblobstore_test.go000066400000000000000000000205621265472114500237630ustar00rootroot00000000000000package proxy import ( "io/ioutil" "math/rand" "net/http" "net/http/httptest" "sync" "testing" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/filesystem" "github.com/docker/distribution/registry/storage/driver/inmemory" ) var sbsMu sync.Mutex type statsBlobStore struct { stats map[string]int blobs distribution.BlobStore } func (sbs statsBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { sbsMu.Lock() sbs.stats["put"]++ sbsMu.Unlock() return sbs.blobs.Put(ctx, mediaType, p) } func (sbs statsBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { sbsMu.Lock() sbs.stats["get"]++ sbsMu.Unlock() return sbs.blobs.Get(ctx, dgst) } func (sbs statsBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { sbsMu.Lock() sbs.stats["create"]++ sbsMu.Unlock() return sbs.blobs.Create(ctx, options...) } func (sbs statsBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { sbsMu.Lock() sbs.stats["resume"]++ sbsMu.Unlock() return sbs.blobs.Resume(ctx, id) } func (sbs statsBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { sbsMu.Lock() sbs.stats["open"]++ sbsMu.Unlock() return sbs.blobs.Open(ctx, dgst) } func (sbs statsBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { sbsMu.Lock() sbs.stats["serveblob"]++ sbsMu.Unlock() return sbs.blobs.ServeBlob(ctx, w, r, dgst) } func (sbs statsBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { sbsMu.Lock() sbs.stats["stat"]++ sbsMu.Unlock() return sbs.blobs.Stat(ctx, dgst) } func (sbs statsBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { sbsMu.Lock() sbs.stats["delete"]++ sbsMu.Unlock() return sbs.blobs.Delete(ctx, dgst) } type testEnv struct { numUnique int inRemote []distribution.Descriptor store proxyBlobStore ctx context.Context } func (te *testEnv) LocalStats() *map[string]int { sbsMu.Lock() ls := te.store.localStore.(statsBlobStore).stats sbsMu.Unlock() return &ls } func (te *testEnv) RemoteStats() *map[string]int { sbsMu.Lock() rs := te.store.remoteStore.(statsBlobStore).stats sbsMu.Unlock() return &rs } // Populate remote store and record the digests func makeTestEnv(t *testing.T, name string) *testEnv { nameRef, err := reference.ParseNamed(name) if err != nil { t.Fatalf("unable to parse reference: %s", err) } ctx := context.Background() truthDir, err := ioutil.TempDir("", "truth") if err != nil { t.Fatalf("unable to create tempdir: %s", err) } cacheDir, err := ioutil.TempDir("", "cache") if err != nil { t.Fatalf("unable to create tempdir: %s", err) } // todo: create a tempfile area here localRegistry, err := storage.NewRegistry(ctx, filesystem.New(truthDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) if err != nil { t.Fatalf("error creating registry: %v", err) } localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } truthRegistry, err := storage.NewRegistry(ctx, filesystem.New(cacheDir), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } truthBlobs := statsBlobStore{ stats: make(map[string]int), blobs: truthRepo.Blobs(ctx), } localBlobs := statsBlobStore{ stats: make(map[string]int), blobs: localRepo.Blobs(ctx), } s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") proxyBlobStore := proxyBlobStore{ repositoryName: nameRef, remoteStore: truthBlobs, localStore: localBlobs, scheduler: s, } te := &testEnv{ store: proxyBlobStore, ctx: ctx, } return te } func makeBlob(size int) []byte { blob := make([]byte, size, size) for i := 0; i < size; i++ { blob[i] = byte('A' + rand.Int()%48) } return blob } func init() { rand.Seed(42) } func perm(m []distribution.Descriptor) []distribution.Descriptor { for i := 0; i < len(m); i++ { j := rand.Intn(i + 1) tmp := m[i] m[i] = m[j] m[j] = tmp } return m } func populate(t *testing.T, te *testEnv, blobCount, size, numUnique int) { var inRemote []distribution.Descriptor for i := 0; i < numUnique; i++ { bytes := makeBlob(size) for j := 0; j < blobCount/numUnique; j++ { desc, err := te.store.remoteStore.Put(te.ctx, "", bytes) if err != nil { t.Fatalf("Put in store") } inRemote = append(inRemote, desc) } } te.inRemote = inRemote te.numUnique = numUnique } func TestProxyStoreStat(t *testing.T) { te := makeTestEnv(t, "foo/bar") remoteBlobCount := 1 populate(t, te, remoteBlobCount, 10, 1) localStats := te.LocalStats() remoteStats := te.RemoteStats() // Stat - touches both stores for _, d := range te.inRemote { _, err := te.store.Stat(te.ctx, d.Digest) if err != nil { t.Fatalf("Error stating proxy store") } } if (*localStats)["stat"] != remoteBlobCount { t.Errorf("Unexpected local stat count") } if (*remoteStats)["stat"] != remoteBlobCount { t.Errorf("Unexpected remote stat count") } } func TestProxyStoreServeHighConcurrency(t *testing.T) { te := makeTestEnv(t, "foo/bar") blobSize := 200 blobCount := 10 numUnique := 1 populate(t, te, blobCount, blobSize, numUnique) numClients := 16 testProxyStoreServe(t, te, numClients) } func TestProxyStoreServeMany(t *testing.T) { te := makeTestEnv(t, "foo/bar") blobSize := 200 blobCount := 10 numUnique := 4 populate(t, te, blobCount, blobSize, numUnique) numClients := 4 testProxyStoreServe(t, te, numClients) } // todo(richardscothern): blobCount must be smaller than num clients func TestProxyStoreServeBig(t *testing.T) { te := makeTestEnv(t, "foo/bar") blobSize := 2 << 20 blobCount := 4 numUnique := 2 populate(t, te, blobCount, blobSize, numUnique) numClients := 4 testProxyStoreServe(t, te, numClients) } // testProxyStoreServe will create clients to consume all blobs // populated in the truth store func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) { localStats := te.LocalStats() remoteStats := te.RemoteStats() var wg sync.WaitGroup for i := 0; i < numClients; i++ { // Serveblob - pulls through blobs wg.Add(1) go func() { defer wg.Done() for _, remoteBlob := range te.inRemote { w := httptest.NewRecorder() r, err := http.NewRequest("GET", "", nil) if err != nil { t.Fatal(err) } err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest) if err != nil { t.Fatalf(err.Error()) } bodyBytes := w.Body.Bytes() localDigest := digest.FromBytes(bodyBytes) if localDigest != remoteBlob.Digest { t.Fatalf("Mismatching blob fetch from proxy") } } }() } wg.Wait() remoteBlobCount := len(te.inRemote) if (*localStats)["stat"] != remoteBlobCount*numClients && (*localStats)["create"] != te.numUnique { t.Fatal("Expected: stat:", remoteBlobCount*numClients, "create:", remoteBlobCount) } // Wait for any async storage goroutines to finish time.Sleep(3 * time.Second) remoteStatCount := (*remoteStats)["stat"] remoteOpenCount := (*remoteStats)["open"] // Serveblob - blobs come from local for _, dr := range te.inRemote { w := httptest.NewRecorder() r, err := http.NewRequest("GET", "", nil) if err != nil { t.Fatal(err) } err = te.store.ServeBlob(te.ctx, w, r, dr.Digest) if err != nil { t.Fatalf(err.Error()) } dl := digest.FromBytes(w.Body.Bytes()) if dl != dr.Digest { t.Errorf("Mismatching blob fetch from proxy") } } localStats = te.LocalStats() remoteStats = te.RemoteStats() // Ensure remote unchanged if (*remoteStats)["stat"] != remoteStatCount && (*remoteStats)["open"] != remoteOpenCount { t.Fatalf("unexpected remote stats: %#v", remoteStats) } } distribution-2.3.0/registry/proxy/proxymanifeststore.go000066400000000000000000000051761265472114500236200ustar00rootroot00000000000000package proxy import ( "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" ) // todo(richardscothern): from cache control header or config const repositoryTTL = time.Duration(24 * 7 * time.Hour) type proxyManifestStore struct { ctx context.Context localManifests distribution.ManifestService remoteManifests distribution.ManifestService repositoryName reference.Named scheduler *scheduler.TTLExpirationScheduler } var _ distribution.ManifestService = &proxyManifestStore{} func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { exists, err := pms.localManifests.Exists(ctx, dgst) if err != nil { return false, err } if exists { return true, nil } return pms.remoteManifests.Exists(ctx, dgst) } func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { // At this point `dgst` was either specified explicitly, or returned by the // tagstore with the most recent association. var fromRemote bool manifest, err := pms.localManifests.Get(ctx, dgst, options...) if err != nil { manifest, err = pms.remoteManifests.Get(ctx, dgst, options...) if err != nil { return nil, err } fromRemote = true } _, payload, err := manifest.Payload() if err != nil { return nil, err } proxyMetrics.ManifestPush(uint64(len(payload))) if fromRemote { proxyMetrics.ManifestPull(uint64(len(payload))) _, err = pms.localManifests.Put(ctx, manifest) if err != nil { return nil, err } // Schedule the manifest blob for removal repoBlob, err := reference.WithDigest(pms.repositoryName, dgst) if err != nil { context.GetLogger(ctx).Errorf("Error creating reference: %s", err) return nil, err } pms.scheduler.AddManifest(repoBlob, repositoryTTL) // Ensure the manifest blob is cleaned up //pms.scheduler.AddBlob(blobRef, repositoryTTL) } return manifest, err } func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { var d digest.Digest return d, distribution.ErrUnsupported } func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } /*func (pms proxyManifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { return 0, distribution.ErrUnsupported } */ distribution-2.3.0/registry/proxy/proxymanifeststore_test.go000066400000000000000000000150641265472114500246540ustar00rootroot00000000000000package proxy import ( "io" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" ) type statsManifest struct { manifests distribution.ManifestService stats map[string]int } type manifestStoreTestEnv struct { manifestDigest digest.Digest // digest of the signed manifest in the local storage manifests proxyManifestStore } func (te manifestStoreTestEnv) LocalStats() *map[string]int { ls := te.manifests.localManifests.(statsManifest).stats return &ls } func (te manifestStoreTestEnv) RemoteStats() *map[string]int { rs := te.manifests.remoteManifests.(statsManifest).stats return &rs } func (sm statsManifest) Delete(ctx context.Context, dgst digest.Digest) error { sm.stats["delete"]++ return sm.manifests.Delete(ctx, dgst) } func (sm statsManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { sm.stats["exists"]++ return sm.manifests.Exists(ctx, dgst) } func (sm statsManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { sm.stats["get"]++ return sm.manifests.Get(ctx, dgst) } func (sm statsManifest) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { sm.stats["put"]++ return sm.manifests.Put(ctx, manifest) } /*func (sm statsManifest) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { sm.stats["enumerate"]++ return sm.manifests.Enumerate(ctx, manifests, last) } */ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { nameRef, err := reference.ParseNamed(name) if err != nil { t.Fatalf("unable to parse reference: %s", err) } ctx := context.Background() truthRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider())) if err != nil { t.Fatalf("error creating registry: %v", err) } truthRepo, err := truthRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } tr, err := truthRepo.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } truthManifests := statsManifest{ manifests: tr, stats: make(map[string]int), } manifestDigest, err := populateRepo(t, ctx, truthRepo, name, tag) if err != nil { t.Fatalf(err.Error()) } localRegistry, err := storage.NewRegistry(ctx, inmemory.New(), storage.BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), storage.EnableRedirect, storage.DisableDigestResumption) if err != nil { t.Fatalf("error creating registry: %v", err) } localRepo, err := localRegistry.Repository(ctx, nameRef) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } lr, err := localRepo.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } localManifests := statsManifest{ manifests: lr, stats: make(map[string]int), } s := scheduler.New(ctx, inmemory.New(), "/scheduler-state.json") return &manifestStoreTestEnv{ manifestDigest: manifestDigest, manifests: proxyManifestStore{ ctx: ctx, localManifests: localManifests, remoteManifests: truthManifests, scheduler: s, repositoryName: nameRef, }, } } func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, } for i := 0; i < 2; i++ { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } rs, ts, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } dgst := digest.Digest(ts) if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } ms, err := repository.Manifests(ctx) if err != nil { t.Fatalf(err.Error()) } dgst, err := ms.Put(ctx, sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } return dgst, nil } // TestProxyManifests contains basic acceptance tests // for the pull-through behavior func TestProxyManifests(t *testing.T) { name := "foo/bar" env := newManifestStoreTestEnv(t, name, "latest") localStats := env.LocalStats() remoteStats := env.RemoteStats() ctx := context.Background() // Stat - must check local and remote exists, err := env.manifests.Exists(ctx, env.manifestDigest) if err != nil { t.Fatalf("Error checking existance") } if !exists { t.Errorf("Unexpected non-existant manifest") } if (*localStats)["exists"] != 1 && (*remoteStats)["exists"] != 1 { t.Errorf("Unexpected exists count : \n%v \n%v", localStats, remoteStats) } // Get - should succeed and pull manifest into local _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } if (*localStats)["get"] != 1 && (*remoteStats)["get"] != 1 { t.Errorf("Unexpected get count") } if (*localStats)["put"] != 1 { t.Errorf("Expected local put") } // Stat - should only go to local exists, err = env.manifests.Exists(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } if !exists { t.Errorf("Unexpected non-existant manifest") } if (*localStats)["exists"] != 2 && (*remoteStats)["exists"] != 1 { t.Errorf("Unexpected exists count") } // Get - should get from remote, to test freshness _, err = env.manifests.Get(ctx, env.manifestDigest) if err != nil { t.Fatal(err) } if (*remoteStats)["get"] != 2 && (*remoteStats)["exists"] != 1 && (*localStats)["put"] != 1 { t.Errorf("Unexpected get count") } } func TestProxyTagService(t *testing.T) { } distribution-2.3.0/registry/proxy/proxymetrics.go000066400000000000000000000040031265472114500223670ustar00rootroot00000000000000package proxy import ( "expvar" "sync/atomic" ) // Metrics is used to hold metric counters // related to the proxy type Metrics struct { Requests uint64 Hits uint64 Misses uint64 BytesPulled uint64 BytesPushed uint64 } type proxyMetricsCollector struct { blobMetrics Metrics manifestMetrics Metrics } // BlobPull tracks metrics about blobs pulled into the cache func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) { atomic.AddUint64(&pmc.blobMetrics.Misses, 1) atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled) } // BlobPush tracks metrics about blobs pushed to clients func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64) { atomic.AddUint64(&pmc.blobMetrics.Requests, 1) atomic.AddUint64(&pmc.blobMetrics.Hits, 1) atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed) } // ManifestPull tracks metrics related to Manifests pulled into the cache func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) { atomic.AddUint64(&pmc.manifestMetrics.Misses, 1) atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled) } // ManifestPush tracks metrics about manifests pushed to clients func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64) { atomic.AddUint64(&pmc.manifestMetrics.Requests, 1) atomic.AddUint64(&pmc.manifestMetrics.Hits, 1) atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed) } // proxyMetrics tracks metrics about the proxy cache. This is // kept globally and made available via expvar. var proxyMetrics = &proxyMetricsCollector{} func init() { registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } pm := registry.(*expvar.Map).Get("proxy") if pm == nil { pm = &expvar.Map{} pm.(*expvar.Map).Init() registry.(*expvar.Map).Set("proxy", pm) } pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} { return proxyMetrics.blobMetrics })) pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} { return proxyMetrics.manifestMetrics })) } distribution-2.3.0/registry/proxy/proxyregistry.go000066400000000000000000000117441265472114500226030ustar00rootroot00000000000000package proxy import ( "net/http" "net/url" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/distribution/registry/proxy/scheduler" "github.com/docker/distribution/registry/storage" "github.com/docker/distribution/registry/storage/driver" ) // proxyingRegistry fetches content from a remote registry and caches it locally type proxyingRegistry struct { embedded distribution.Namespace // provides local registry functionality scheduler *scheduler.TTLExpirationScheduler remoteURL string credentialStore auth.CredentialStore challengeManager auth.ChallengeManager } // NewRegistryPullThroughCache creates a registry acting as a pull through cache func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) { _, err := url.Parse(config.RemoteURL) if err != nil { return nil, err } v := storage.NewVacuum(ctx, driver) s := scheduler.New(ctx, driver, "/scheduler-state.json") s.OnBlobExpire(func(ref reference.Reference) error { var r reference.Canonical var ok bool if r, ok = ref.(reference.Canonical); !ok { return fmt.Errorf("unexpected reference type : %T", ref) } repo, err := registry.Repository(ctx, r) if err != nil { return err } blobs := repo.Blobs(ctx) // Clear the repository reference and descriptor caches err = blobs.Delete(ctx, r.Digest()) if err != nil { return err } err = v.RemoveBlob(r.Digest().String()) if err != nil { return err } return nil }) s.OnManifestExpire(func(ref reference.Reference) error { var r reference.Canonical var ok bool if r, ok = ref.(reference.Canonical); !ok { return fmt.Errorf("unexpected reference type : %T", ref) } repo, err := registry.Repository(ctx, r) if err != nil { return err } manifests, err := repo.Manifests(ctx) if err != nil { return err } err = manifests.Delete(ctx, r.Digest()) if err != nil { return err } return nil }) err = s.Start() if err != nil { return nil, err } challengeManager := auth.NewSimpleChallengeManager() cs, err := ConfigureAuth(config.RemoteURL, config.Username, config.Password, challengeManager) if err != nil { return nil, err } return &proxyingRegistry{ embedded: registry, scheduler: s, challengeManager: challengeManager, credentialStore: cs, remoteURL: config.RemoteURL, }, nil } func (pr *proxyingRegistry) Scope() distribution.Scope { return distribution.GlobalScope } func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { return pr.embedded.Repositories(ctx, repos, last) } func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) { tr := transport.NewTransport(http.DefaultTransport, auth.NewAuthorizer(pr.challengeManager, auth.NewTokenHandler(http.DefaultTransport, pr.credentialStore, name.Name(), "pull"))) localRepo, err := pr.embedded.Repository(ctx, name) if err != nil { return nil, err } localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification()) if err != nil { return nil, err } remoteRepo, err := client.NewRepository(ctx, name, pr.remoteURL, tr) if err != nil { return nil, err } remoteManifests, err := remoteRepo.Manifests(ctx) if err != nil { return nil, err } return &proxiedRepository{ blobStore: &proxyBlobStore{ localStore: localRepo.Blobs(ctx), remoteStore: remoteRepo.Blobs(ctx), scheduler: pr.scheduler, repositoryName: name, }, manifests: &proxyManifestStore{ repositoryName: name, localManifests: localManifests, // Options? remoteManifests: remoteManifests, ctx: ctx, scheduler: pr.scheduler, }, name: name, tags: &proxyTagService{ localTags: localRepo.Tags(ctx), remoteTags: remoteRepo.Tags(ctx), }, }, nil } // proxiedRepository uses proxying blob and manifest services to serve content // locally, or pulling it through from a remote and caching it locally if it doesn't // already exist type proxiedRepository struct { blobStore distribution.BlobStore manifests distribution.ManifestService name reference.Named tags distribution.TagService } func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { return pr.manifests, nil } func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore { return pr.blobStore } func (pr *proxiedRepository) Name() reference.Named { return pr.name } func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService { return pr.tags } distribution-2.3.0/registry/proxy/proxytagservice.go000066400000000000000000000030261265472114500230610ustar00rootroot00000000000000package proxy import ( "github.com/docker/distribution" "github.com/docker/distribution/context" ) // proxyTagService supports local and remote lookup of tags. type proxyTagService struct { localTags distribution.TagService remoteTags distribution.TagService } var _ distribution.TagService = proxyTagService{} // Get attempts to get the most recent digest for the tag by checking the remote // tag service first and then caching it locally. If the remote is unavailable // the local association is returned func (pt proxyTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { desc, err := pt.remoteTags.Get(ctx, tag) if err == nil { err := pt.localTags.Tag(ctx, tag, desc) if err != nil { return distribution.Descriptor{}, err } return desc, nil } desc, err = pt.localTags.Get(ctx, tag) if err != nil { return distribution.Descriptor{}, err } return desc, nil } func (pt proxyTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { return distribution.ErrUnsupported } func (pt proxyTagService) Untag(ctx context.Context, tag string) error { err := pt.localTags.Untag(ctx, tag) if err != nil { return err } return nil } func (pt proxyTagService) All(ctx context.Context) ([]string, error) { tags, err := pt.remoteTags.All(ctx) if err == nil { return tags, err } return pt.localTags.All(ctx) } func (pt proxyTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { return []string{}, distribution.ErrUnsupported } distribution-2.3.0/registry/proxy/proxytagservice_test.go000066400000000000000000000070221265472114500241200ustar00rootroot00000000000000package proxy import ( "sort" "sync" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" ) type mockTagStore struct { mapping map[string]distribution.Descriptor sync.Mutex } var _ distribution.TagService = &mockTagStore{} func (m *mockTagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { m.Lock() defer m.Unlock() if d, ok := m.mapping[tag]; ok { return d, nil } return distribution.Descriptor{}, distribution.ErrTagUnknown{} } func (m *mockTagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { m.Lock() defer m.Unlock() m.mapping[tag] = desc return nil } func (m *mockTagStore) Untag(ctx context.Context, tag string) error { m.Lock() defer m.Unlock() if _, ok := m.mapping[tag]; ok { delete(m.mapping, tag) return nil } return distribution.ErrTagUnknown{} } func (m *mockTagStore) All(ctx context.Context) ([]string, error) { m.Lock() defer m.Unlock() var tags []string for tag := range m.mapping { tags = append(tags, tag) } return tags, nil } func (m *mockTagStore) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { panic("not implemented") } func testProxyTagService(local, remote map[string]distribution.Descriptor) *proxyTagService { if local == nil { local = make(map[string]distribution.Descriptor) } if remote == nil { remote = make(map[string]distribution.Descriptor) } return &proxyTagService{ localTags: &mockTagStore{mapping: local}, remoteTags: &mockTagStore{mapping: remote}, } } func TestGet(t *testing.T) { remoteDesc := distribution.Descriptor{Size: 42} remoteTag := "remote" proxyTags := testProxyTagService(map[string]distribution.Descriptor{remoteTag: remoteDesc}, nil) ctx := context.Background() // Get pre-loaded tag d, err := proxyTags.Get(ctx, remoteTag) if err != nil { t.Fatal(err) } if d != remoteDesc { t.Fatal("unable to get put tag") } local, err := proxyTags.localTags.Get(ctx, remoteTag) if err != nil { t.Fatal("remote tag not pulled into store") } if local != remoteDesc { t.Fatalf("unexpected descriptor pulled through") } // Manually overwrite remote tag newRemoteDesc := distribution.Descriptor{Size: 43} err = proxyTags.remoteTags.Tag(ctx, remoteTag, newRemoteDesc) if err != nil { t.Fatal(err) } d, err = proxyTags.Get(ctx, remoteTag) if err != nil { t.Fatal(err) } if d != newRemoteDesc { t.Fatal("unable to get put tag") } _, err = proxyTags.localTags.Get(ctx, remoteTag) if err != nil { t.Fatal("remote tag not pulled into store") } // untag, ensure it's removed locally, but present in remote err = proxyTags.Untag(ctx, remoteTag) if err != nil { t.Fatal(err) } _, err = proxyTags.localTags.Get(ctx, remoteTag) if err == nil { t.Fatalf("Expected error getting Untag'd tag") } _, err = proxyTags.remoteTags.Get(ctx, remoteTag) if err != nil { t.Fatalf("remote tag should not be untagged with proxyTag.Untag") } _, err = proxyTags.Get(ctx, remoteTag) if err != nil { t.Fatal("untagged tag should be pulled through") } // Add another tag. Ensure both tags appear in enumerate err = proxyTags.remoteTags.Tag(ctx, "funtag", distribution.Descriptor{Size: 42}) if err != nil { t.Fatal(err) } all, err := proxyTags.All(ctx) if err != nil { t.Fatal(err) } if len(all) != 2 { t.Fatalf("Unexpected tag length returned from All() : %d ", len(all)) } sort.Strings(all) if all[0] != "funtag" && all[1] != "remote" { t.Fatalf("Unexpected tags returned from All() : %v ", all) } } distribution-2.3.0/registry/proxy/scheduler/000077500000000000000000000000001265472114500212515ustar00rootroot00000000000000distribution-2.3.0/registry/proxy/scheduler/scheduler.go000066400000000000000000000135601265472114500235630ustar00rootroot00000000000000package scheduler import ( "encoding/json" "fmt" "sync" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" ) // onTTLExpiryFunc is called when a repository's TTL expires type expiryFunc func(reference.Reference) error const ( entryTypeBlob = iota entryTypeManifest indexSaveFrequency = 5 * time.Second ) // schedulerEntry represents an entry in the scheduler // fields are exported for serialization type schedulerEntry struct { Key string `json:"Key"` Expiry time.Time `json:"ExpiryData"` EntryType int `json:"EntryType"` timer *time.Timer } // New returns a new instance of the scheduler func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler { return &TTLExpirationScheduler{ entries: make(map[string]*schedulerEntry), driver: driver, pathToStateFile: path, ctx: ctx, stopped: true, doneChan: make(chan struct{}), saveTimer: time.NewTicker(indexSaveFrequency), } } // TTLExpirationScheduler is a scheduler used to perform actions // when TTLs expire type TTLExpirationScheduler struct { sync.Mutex entries map[string]*schedulerEntry driver driver.StorageDriver ctx context.Context pathToStateFile string stopped bool onBlobExpire expiryFunc onManifestExpire expiryFunc indexDirty bool saveTimer *time.Ticker doneChan chan struct{} } // OnBlobExpire is called when a scheduled blob's TTL expires func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) { ttles.Lock() defer ttles.Unlock() ttles.onBlobExpire = f } // OnManifestExpire is called when a scheduled manifest's TTL expires func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) { ttles.Lock() defer ttles.Unlock() ttles.onManifestExpire = f } // AddBlob schedules a blob cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } ttles.add(blobRef, ttl, entryTypeBlob) return nil } // AddManifest schedules a manifest cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } ttles.add(manifestRef, ttl, entryTypeManifest) return nil } // Start starts the scheduler func (ttles *TTLExpirationScheduler) Start() error { ttles.Lock() defer ttles.Unlock() err := ttles.readState() if err != nil { return err } if !ttles.stopped { return fmt.Errorf("Scheduler already started") } context.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...") ttles.stopped = false // Start timer for each deserialized entry for _, entry := range ttles.entries { entry.timer = ttles.startTimer(entry, entry.Expiry.Sub(time.Now())) } // Start a ticker to periodically save the entries index go func() { for { select { case <-ttles.saveTimer.C: if !ttles.indexDirty { continue } ttles.Lock() err := ttles.writeState() if err != nil { context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } else { ttles.indexDirty = false } ttles.Unlock() case <-ttles.doneChan: return } } }() return nil } func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) { entry := &schedulerEntry{ Key: r.String(), Expiry: time.Now().Add(ttl), EntryType: eType, } context.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, entry.Expiry.Sub(time.Now())) if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil { oldEntry.timer.Stop() } ttles.entries[entry.Key] = entry entry.timer = ttles.startTimer(entry, ttl) ttles.indexDirty = true } func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { return time.AfterFunc(ttl, func() { ttles.Lock() defer ttles.Unlock() var f expiryFunc switch entry.EntryType { case entryTypeBlob: f = ttles.onBlobExpire case entryTypeManifest: f = ttles.onManifestExpire default: f = func(reference.Reference) error { return fmt.Errorf("scheduler entry type") } } ref, err := reference.Parse(entry.Key) if err == nil { if err := f(ref); err != nil { context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) } } else { context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) } delete(ttles.entries, entry.Key) ttles.indexDirty = true }) } // Stop stops the scheduler. func (ttles *TTLExpirationScheduler) Stop() { ttles.Lock() defer ttles.Unlock() if err := ttles.writeState(); err != nil { context.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err) } for _, entry := range ttles.entries { entry.timer.Stop() } close(ttles.doneChan) ttles.saveTimer.Stop() ttles.stopped = true } func (ttles *TTLExpirationScheduler) writeState() error { jsonBytes, err := json.Marshal(ttles.entries) if err != nil { return err } err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes) if err != nil { return err } return nil } func (ttles *TTLExpirationScheduler) readState() error { if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil { switch err := err.(type) { case driver.PathNotFoundError: return nil default: return err } } bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile) if err != nil { return err } err = json.Unmarshal(bytes, &ttles.entries) if err != nil { return err } return nil } distribution-2.3.0/registry/proxy/scheduler/scheduler_test.go000066400000000000000000000110341265472114500246140ustar00rootroot00000000000000package scheduler import ( "encoding/json" "testing" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { t.Fatalf("could not parse reference: %v", err) } ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") if err != nil { t.Fatalf("could not parse reference: %v", err) } ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") if err != nil { t.Fatalf("could not parse reference: %v", err) } return ref1, ref2, ref3 } func TestSchedule(t *testing.T) { ref1, ref2, ref3 := testRefs(t) timeUnit := time.Millisecond remainingRepos := map[string]bool{ ref1.String(): true, ref2.String(): true, ref3.String(): true, } s := New(context.Background(), inmemory.New(), "/ttl") deleteFunc := func(repoName reference.Reference) error { if len(remainingRepos) == 0 { t.Fatalf("Incorrect expiry count") } _, ok := remainingRepos[repoName.String()] if !ok { t.Fatalf("Trying to remove nonexistant repo: %s", repoName) } t.Log("removing", repoName) delete(remainingRepos, repoName.String()) return nil } s.onBlobExpire = deleteFunc err := s.Start() if err != nil { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } s.add(ref1, 3*timeUnit, entryTypeBlob) s.add(ref2, 1*timeUnit, entryTypeBlob) func() { s.add(ref3, 1*timeUnit, entryTypeBlob) }() // Ensure all repos are deleted <-time.After(50 * timeUnit) if len(remainingRepos) != 0 { t.Fatalf("Repositories remaining: %#v", remainingRepos) } } func TestRestoreOld(t *testing.T) { ref1, ref2, _ := testRefs(t) remainingRepos := map[string]bool{ ref1.String(): true, ref2.String(): true, } deleteFunc := func(r reference.Reference) error { if r.String() == ref1.String() && len(remainingRepos) == 2 { t.Errorf("ref1 should be removed first") } _, ok := remainingRepos[r.String()] if !ok { t.Fatalf("Trying to remove nonexistant repo: %s", r) } delete(remainingRepos, r.String()) return nil } timeUnit := time.Millisecond serialized, err := json.Marshal(&map[string]schedulerEntry{ ref1.String(): { Expiry: time.Now().Add(1 * timeUnit), Key: ref1.String(), EntryType: 0, }, ref2.String(): { Expiry: time.Now().Add(-3 * timeUnit), // TTL passed, should be removed first Key: ref2.String(), EntryType: 0, }, }) if err != nil { t.Fatalf("Error serializing test data: %s", err.Error()) } ctx := context.Background() pathToStatFile := "/ttl" fs := inmemory.New() err = fs.PutContent(ctx, pathToStatFile, serialized) if err != nil { t.Fatal("Unable to write serialized data to fs") } s := New(context.Background(), fs, "/ttl") s.onBlobExpire = deleteFunc err = s.Start() if err != nil { t.Fatalf("Error starting ttlExpirationScheduler: %s", err) } <-time.After(50 * timeUnit) if len(remainingRepos) != 0 { t.Fatalf("Repositories remaining: %#v", remainingRepos) } } func TestStopRestore(t *testing.T) { ref1, ref2, _ := testRefs(t) timeUnit := time.Millisecond remainingRepos := map[string]bool{ ref1.String(): true, ref2.String(): true, } deleteFunc := func(r reference.Reference) error { delete(remainingRepos, r.String()) return nil } fs := inmemory.New() pathToStateFile := "/ttl" s := New(context.Background(), fs, pathToStateFile) s.onBlobExpire = deleteFunc err := s.Start() if err != nil { t.Fatalf(err.Error()) } s.add(ref1, 300*timeUnit, entryTypeBlob) s.add(ref2, 100*timeUnit, entryTypeBlob) // Start and stop before all operations complete // state will be written to fs s.Stop() time.Sleep(10 * time.Millisecond) // v2 will restore state from fs s2 := New(context.Background(), fs, pathToStateFile) s2.onBlobExpire = deleteFunc err = s2.Start() if err != nil { t.Fatalf("Error starting v2: %s", err.Error()) } <-time.After(500 * timeUnit) if len(remainingRepos) != 0 { t.Fatalf("Repositories remaining: %#v", remainingRepos) } } func TestDoubleStart(t *testing.T) { s := New(context.Background(), inmemory.New(), "/ttl") err := s.Start() if err != nil { t.Fatalf("Unable to start scheduler") } err = s.Start() if err == nil { t.Fatalf("Scheduler started twice without error") } } distribution-2.3.0/registry/registry.go000066400000000000000000000222371265472114500203170ustar00rootroot00000000000000package registry import ( "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net/http" "os" "time" log "github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus/formatters/logstash" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/configuration" "github.com/docker/distribution/context" "github.com/docker/distribution/health" "github.com/docker/distribution/registry/handlers" "github.com/docker/distribution/registry/listener" "github.com/docker/distribution/uuid" "github.com/docker/distribution/version" gorhandlers "github.com/gorilla/handlers" "github.com/spf13/cobra" "github.com/yvasiyarov/gorelic" ) // Cmd is a cobra command for running the registry. var Cmd = &cobra.Command{ Use: "registry ", Short: "registry stores and distributes Docker images", Long: "registry stores and distributes Docker images.", Run: func(cmd *cobra.Command, args []string) { if showVersion { version.PrintVersion() return } // setup context ctx := context.WithVersion(context.Background(), version.Version) config, err := resolveConfiguration(args) if err != nil { fmt.Fprintf(os.Stderr, "configuration error: %v\n", err) cmd.Usage() os.Exit(1) } if config.HTTP.Debug.Addr != "" { go func(addr string) { log.Infof("debug server listening %v", addr) if err := http.ListenAndServe(addr, nil); err != nil { log.Fatalf("error listening on debug interface: %v", err) } }(config.HTTP.Debug.Addr) } registry, err := NewRegistry(ctx, config) if err != nil { log.Fatalln(err) } if err = registry.ListenAndServe(); err != nil { log.Fatalln(err) } }, } var showVersion bool func init() { Cmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit") } // A Registry represents a complete instance of the registry. // TODO(aaronl): It might make sense for Registry to become an interface. type Registry struct { config *configuration.Configuration app *handlers.App server *http.Server } // NewRegistry creates a new registry from a context and configuration struct. func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) { var err error ctx, err = configureLogging(ctx, config) if err != nil { return nil, fmt.Errorf("error configuring logger: %v", err) } // inject a logger into the uuid library. warns us if there is a problem // with uuid generation under low entropy. uuid.Loggerf = context.GetLogger(ctx).Warnf app := handlers.NewApp(ctx, config) // TODO(aaronl): The global scope of the health checks means NewRegistry // can only be called once per process. app.RegisterHealthChecks() handler := configureReporting(app) handler = alive("/", handler) handler = health.Handler(handler) handler = panicHandler(handler) handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler) server := &http.Server{ Handler: handler, } return &Registry{ app: app, config: config, server: server, }, nil } // ListenAndServe runs the registry's HTTP server. func (registry *Registry) ListenAndServe() error { config := registry.config ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr) if err != nil { return err } if config.HTTP.TLS.Certificate != "" { tlsConf := &tls.Config{ ClientAuth: tls.NoClientCert, NextProtos: []string{"http/1.1"}, Certificates: make([]tls.Certificate, 1), MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, }, } tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key) if err != nil { return err } if len(config.HTTP.TLS.ClientCAs) != 0 { pool := x509.NewCertPool() for _, ca := range config.HTTP.TLS.ClientCAs { caPem, err := ioutil.ReadFile(ca) if err != nil { return err } if ok := pool.AppendCertsFromPEM(caPem); !ok { return fmt.Errorf("Could not add CA to pool") } } for _, subj := range pool.Subjects() { context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj)) } tlsConf.ClientAuth = tls.RequireAndVerifyClientCert tlsConf.ClientCAs = pool } ln = tls.NewListener(ln, tlsConf) context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr()) } else { context.GetLogger(registry.app).Infof("listening on %v", ln.Addr()) } return registry.server.Serve(ln) } func configureReporting(app *handlers.App) http.Handler { var handler http.Handler = app if app.Config.Reporting.Bugsnag.APIKey != "" { bugsnagConfig := bugsnag.Configuration{ APIKey: app.Config.Reporting.Bugsnag.APIKey, // TODO(brianbland): provide the registry version here // AppVersion: "2.0", } if app.Config.Reporting.Bugsnag.ReleaseStage != "" { bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage } if app.Config.Reporting.Bugsnag.Endpoint != "" { bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint } bugsnag.Configure(bugsnagConfig) handler = bugsnag.Handler(handler) } if app.Config.Reporting.NewRelic.LicenseKey != "" { agent := gorelic.NewAgent() agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey if app.Config.Reporting.NewRelic.Name != "" { agent.NewrelicName = app.Config.Reporting.NewRelic.Name } agent.CollectHTTPStat = true agent.Verbose = app.Config.Reporting.NewRelic.Verbose agent.Run() handler = agent.WrapHTTPHandler(handler) } return handler } // configureLogging prepares the context with a logger using the // configuration. func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) { if config.Log.Level == "" && config.Log.Formatter == "" { // If no config for logging is set, fallback to deprecated "Loglevel". log.SetLevel(logLevel(config.Loglevel)) ctx = context.WithLogger(ctx, context.GetLogger(ctx)) return ctx, nil } log.SetLevel(logLevel(config.Log.Level)) formatter := config.Log.Formatter if formatter == "" { formatter = "text" // default formatter } switch formatter { case "json": log.SetFormatter(&log.JSONFormatter{ TimestampFormat: time.RFC3339Nano, }) case "text": log.SetFormatter(&log.TextFormatter{ TimestampFormat: time.RFC3339Nano, }) case "logstash": log.SetFormatter(&logstash.LogstashFormatter{ TimestampFormat: time.RFC3339Nano, }) default: // just let the library use default on empty string. if config.Log.Formatter != "" { return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter) } } if config.Log.Formatter != "" { log.Debugf("using %q logging formatter", config.Log.Formatter) } if len(config.Log.Fields) > 0 { // build up the static fields, if present. var fields []interface{} for k := range config.Log.Fields { fields = append(fields, k) } ctx = context.WithValues(ctx, config.Log.Fields) ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...)) } return ctx, nil } func logLevel(level configuration.Loglevel) log.Level { l, err := log.ParseLevel(string(level)) if err != nil { l = log.InfoLevel log.Warnf("error parsing level %q: %v, using %q ", level, err, l) } return l } // panicHandler add a HTTP handler to web app. The handler recover the happening // panic. logrus.Panic transmits panic message to pre-config log hooks, which is // defined in config.yml. func panicHandler(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { if err := recover(); err != nil { log.Panic(fmt.Sprintf("%v", err)) } }() handler.ServeHTTP(w, r) }) } // alive simply wraps the handler with a route that always returns an http 200 // response when the path is matched. If the path is not matched, the request // is passed to the provided handler. There is no guarantee of anything but // that the server is up. Wrap with other handlers (such as health.Handler) // for greater affect. func alive(path string, handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == path { w.Header().Set("Cache-Control", "no-cache") w.WriteHeader(http.StatusOK) return } handler.ServeHTTP(w, r) }) } func resolveConfiguration(args []string) (*configuration.Configuration, error) { var configurationPath string if len(args) > 0 { configurationPath = args[0] } else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" { configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH") } if configurationPath == "" { return nil, fmt.Errorf("configuration path unspecified") } fp, err := os.Open(configurationPath) if err != nil { return nil, err } defer fp.Close() config, err := configuration.Parse(fp) if err != nil { return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err) } return config, nil } distribution-2.3.0/registry/storage/000077500000000000000000000000001265472114500175565ustar00rootroot00000000000000distribution-2.3.0/registry/storage/blob_test.go000066400000000000000000000367761265472114500221050ustar00rootroot00000000000000package storage import ( "bytes" "crypto/sha256" "fmt" "io" "io/ioutil" "os" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" ) // TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { randomDataReader, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } bs := repository.Blobs(ctx) h := sha256.New() rd := io.TeeReader(randomDataReader, h) blobUpload, err := bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } // Cancel the upload then restart it if err := blobUpload.Cancel(ctx); err != nil { t.Fatalf("unexpected error during upload cancellation: %v", err) } // Do a resume, get unknown upload blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != distribution.ErrBlobUploadUnknown { t.Fatalf("unexpected error resuming upload, should be unknown: %v", err) } // Restart! blobUpload, err = bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } // Get the size of our random tarfile randomDataSize, err := seekerSize(randomDataReader) if err != nil { t.Fatalf("error getting seeker size of random data: %v", err) } nn, err := io.Copy(blobUpload, rd) if err != nil { t.Fatalf("unexpected error uploading layer data: %v", err) } if nn != randomDataSize { t.Fatalf("layer data write incomplete") } offset, err := blobUpload.Seek(0, os.SEEK_CUR) if err != nil { t.Fatalf("unexpected error seeking layer upload: %v", err) } if offset != nn { t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn) } blobUpload.Close() // Do a resume, for good fun blobUpload, err = bs.Resume(ctx, blobUpload.ID()) if err != nil { t.Fatalf("unexpected error resuming upload: %v", err) } sha256Digest := digest.NewDigest("sha256", h) desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) if err != nil { t.Fatalf("unexpected error finishing layer upload: %v", err) } // After finishing an upload, it should no longer exist. if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown { t.Fatalf("expected layer upload to be unknown, got %v", err) } // Test for existence. statDesc, err := bs.Stat(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } if statDesc != desc { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } rc, err := bs.Open(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error opening blob for read: %v", err) } defer rc.Close() h.Reset() nn, err = io.Copy(h, rc) if err != nil { t.Fatalf("error reading layer: %v", err) } if nn != randomDataSize { t.Fatalf("incorrect read length") } if digest.NewDigest("sha256", h) != sha256Digest { t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest) } // Delete a blob err = bs.Delete(ctx, desc.Digest) if err != nil { t.Fatalf("Unexpected error deleting blob") } d, err := bs.Stat(ctx, desc.Digest) if err == nil { t.Fatalf("unexpected non-error stating deleted blob: %v", d) } switch err { case distribution.ErrBlobUnknown: break default: t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) } _, err = bs.Open(ctx, desc.Digest) if err == nil { t.Fatalf("unexpected success opening deleted blob for read") } switch err { case distribution.ErrBlobUnknown: break default: t.Errorf("Unexpected error type getting deleted manifest: %#v", err) } // Re-upload the blob randomBlob, err := ioutil.ReadAll(randomDataReader) if err != nil { t.Fatalf("Error reading all of blob %s", err.Error()) } expectedDigest := digest.FromBytes(randomBlob) simpleUpload(t, bs, randomBlob, expectedDigest) d, err = bs.Stat(ctx, expectedDigest) if err != nil { t.Errorf("unexpected error stat-ing blob") } if d.Digest != expectedDigest { t.Errorf("Mismatching digest with restored blob") } _, err = bs.Open(ctx, expectedDigest) if err != nil { t.Errorf("Unexpected error opening blob") } // Reuse state to test delete with a delete-disabled registry registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repository, err = registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } bs = repository.Blobs(ctx) err = bs.Delete(ctx, desc.Digest) if err == nil { t.Errorf("Unexpected success deleting while disabled") } } // TestSimpleBlobRead just creates a simple blob file and ensures that basic // open, read, seek, read works. More specific edge cases should be covered in // other tests. func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } bs := repository.Blobs(ctx) randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. if err != nil { t.Fatalf("error creating random data: %v", err) } // Test for existence. desc, err := bs.Stat(ctx, dgst) if err != distribution.ErrBlobUnknown { t.Fatalf("expected not found error when testing for existence: %v", err) } rc, err := bs.Open(ctx, dgst) if err != distribution.ErrBlobUnknown { t.Fatalf("expected not found error when opening non-existent blob: %v", err) } randomLayerSize, err := seekerSize(randomLayerReader) if err != nil { t.Fatalf("error getting seeker size for random layer: %v", err) } descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize} t.Logf("desc: %v", descBefore) desc, err = addBlob(ctx, bs, descBefore, randomLayerReader) if err != nil { t.Fatalf("error adding blob to blobservice: %v", err) } if desc.Size != randomLayerSize { t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize) } rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest. if err != nil { t.Fatalf("error opening blob with %v: %v", dgst, err) } defer rc.Close() // Now check the sha digest and ensure its the same h := sha256.New() nn, err := io.Copy(h, rc) if err != nil { t.Fatalf("unexpected error copying to hash: %v", err) } if nn != randomLayerSize { t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize) } sha256Digest := digest.NewDigest("sha256", h) if sha256Digest != desc.Digest { t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest) } // Now seek back the blob, read the whole thing and check against randomLayerData offset, err := rc.Seek(0, os.SEEK_SET) if err != nil { t.Fatalf("error seeking blob: %v", err) } if offset != 0 { t.Fatalf("seek failed: expected 0 offset, got %d", offset) } p, err := ioutil.ReadAll(rc) if err != nil { t.Fatalf("error reading all of blob: %v", err) } if len(p) != int(randomLayerSize) { t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize) } // Reset the randomLayerReader and read back the buffer _, err = randomLayerReader.Seek(0, os.SEEK_SET) if err != nil { t.Fatalf("error resetting layer reader: %v", err) } randomLayerData, err := ioutil.ReadAll(randomLayerReader) if err != nil { t.Fatalf("random layer read failed: %v", err) } if !bytes.Equal(p, randomLayerData) { t.Fatalf("layer data not equal") } } // TestBlobMount covers the blob mount process, exercising common // error paths that might be seen during a mount. func TestBlobMount(t *testing.T) { randomDataReader, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") sourceImageName, _ := reference.ParseNamed("foo/source") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } sourceRepository, err := registry.Repository(ctx, sourceImageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } sbs := sourceRepository.Blobs(ctx) blobUpload, err := sbs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting layer upload: %s", err) } // Get the size of our random tarfile randomDataSize, err := seekerSize(randomDataReader) if err != nil { t.Fatalf("error getting seeker size of random data: %v", err) } nn, err := io.Copy(blobUpload, randomDataReader) if err != nil { t.Fatalf("unexpected error uploading layer data: %v", err) } desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) if err != nil { t.Fatalf("unexpected error finishing layer upload: %v", err) } // Test for existence. statDesc, err := sbs.Stat(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) } if statDesc != desc { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } bs := repository.Blobs(ctx) // Test destination for existence. statDesc, err = bs.Stat(ctx, desc.Digest) if err == nil { t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) } canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest) if err != nil { t.Fatal(err) } bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) if bw != nil { t.Fatal("unexpected blobwriter returned from Create call, should mount instead") } ebm, ok := err.(distribution.ErrBlobMounted) if !ok { t.Fatalf("unexpected error mounting layer: %v", err) } if ebm.Descriptor != desc { t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) } // Test for existence. statDesc, err = bs.Stat(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) } if statDesc != desc { t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) } rc, err := bs.Open(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error opening blob for read: %v", err) } defer rc.Close() h := sha256.New() nn, err = io.Copy(h, rc) if err != nil { t.Fatalf("error reading layer: %v", err) } if nn != randomDataSize { t.Fatalf("incorrect read length") } if digest.NewDigest("sha256", h) != dgst { t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) } // Delete the blob from the source repo err = sbs.Delete(ctx, desc.Digest) if err != nil { t.Fatalf("Unexpected error deleting blob") } d, err := bs.Stat(ctx, desc.Digest) if err != nil { t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) } d, err = sbs.Stat(ctx, desc.Digest) if err == nil { t.Fatalf("unexpected non-error stating deleted blob: %v", d) } switch err { case distribution.ErrBlobUnknown: break default: t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) } // Delete the blob from the dest repo err = bs.Delete(ctx, desc.Digest) if err != nil { t.Fatalf("Unexpected error deleting blob") } d, err = bs.Stat(ctx, desc.Digest) if err == nil { t.Fatalf("unexpected non-error stating deleted blob: %v", d) } switch err { case distribution.ErrBlobUnknown: break default: t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) } } // TestLayerUploadZeroLength uploads zero-length func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repository, err := registry.Repository(ctx, imageName) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } bs := repository.Blobs(ctx) simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar) } func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expectedDigest digest.Digest) { ctx := context.Background() wr, err := bs.Create(ctx) if err != nil { t.Fatalf("unexpected error starting upload: %v", err) } nn, err := io.Copy(wr, bytes.NewReader(blob)) if err != nil { t.Fatalf("error copying into blob writer: %v", err) } if nn != 0 { t.Fatalf("unexpected number of bytes copied: %v > 0", nn) } dgst, err := digest.FromReader(bytes.NewReader(blob)) if err != nil { t.Fatalf("error getting digest: %v", err) } if dgst != expectedDigest { // sanity check on zero digest t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) } desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) if err != nil { t.Fatalf("unexpected error committing write: %v", err) } if desc.Digest != dgst { t.Fatalf("unexpected digest: %v != %v", desc.Digest, dgst) } } // seekerSize seeks to the end of seeker, checks the size and returns it to // the original state, returning the size. The state of the seeker should be // treated as unknown if an error is returned. func seekerSize(seeker io.ReadSeeker) (int64, error) { current, err := seeker.Seek(0, os.SEEK_CUR) if err != nil { return 0, err } end, err := seeker.Seek(0, os.SEEK_END) if err != nil { return 0, err } resumed, err := seeker.Seek(current, os.SEEK_SET) if err != nil { return 0, err } if resumed != current { return 0, fmt.Errorf("error returning seeker to original state, could not seek back to original location") } return end, nil } // addBlob simply consumes the reader and inserts into the blob service, // returning a descriptor on success. func addBlob(ctx context.Context, bs distribution.BlobIngester, desc distribution.Descriptor, rd io.Reader) (distribution.Descriptor, error) { wr, err := bs.Create(ctx) if err != nil { return distribution.Descriptor{}, err } defer wr.Cancel(ctx) if nn, err := io.Copy(wr, rd); err != nil { return distribution.Descriptor{}, err } else if nn != desc.Size { return distribution.Descriptor{}, fmt.Errorf("incorrect number of bytes copied: %v != %v", nn, desc.Size) } return wr.Commit(ctx, desc) } distribution-2.3.0/registry/storage/blobcachemetrics.go000066400000000000000000000030711265472114500233770ustar00rootroot00000000000000package storage import ( "expvar" "sync/atomic" "github.com/docker/distribution/registry/storage/cache" ) type blobStatCollector struct { metrics cache.Metrics } func (bsc *blobStatCollector) Hit() { atomic.AddUint64(&bsc.metrics.Requests, 1) atomic.AddUint64(&bsc.metrics.Hits, 1) } func (bsc *blobStatCollector) Miss() { atomic.AddUint64(&bsc.metrics.Requests, 1) atomic.AddUint64(&bsc.metrics.Misses, 1) } func (bsc *blobStatCollector) Metrics() cache.Metrics { return bsc.metrics } // blobStatterCacheMetrics keeps track of cache metrics for blob descriptor // cache requests. Note this is kept globally and made available via expvar. // For more detailed metrics, its recommend to instrument a particular cache // implementation. var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{} func init() { registry := expvar.Get("registry") if registry == nil { registry = expvar.NewMap("registry") } cache := registry.(*expvar.Map).Get("cache") if cache == nil { cache = &expvar.Map{} cache.(*expvar.Map).Init() registry.(*expvar.Map).Set("cache", cache) } storage := cache.(*expvar.Map).Get("storage") if storage == nil { storage = &expvar.Map{} storage.(*expvar.Map).Init() cache.(*expvar.Map).Set("storage", storage) } storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} { // no need for synchronous access: the increments are atomic and // during reading, we don't care if the data is up to date. The // numbers will always *eventually* be reported correctly. return blobStatterCacheMetrics })) } distribution-2.3.0/registry/storage/blobserver.go000066400000000000000000000041571265472114500222610ustar00rootroot00000000000000package storage import ( "fmt" "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" ) // TODO(stevvooe): This should configurable in the future. const blobCacheControlMaxAge = 365 * 24 * time.Hour // blobServer simply serves blobs from a driver instance using a path function // to identify paths and a descriptor service to fill in metadata. type blobServer struct { driver driver.StorageDriver statter distribution.BlobStatter pathFn func(dgst digest.Digest) (string, error) redirect bool // allows disabling URLFor redirects } func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { desc, err := bs.statter.Stat(ctx, dgst) if err != nil { return err } path, err := bs.pathFn(desc.Digest) if err != nil { return err } if bs.redirect { redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) switch err.(type) { case nil: // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err case driver.ErrUnsupportedMethod: // Fallback to serving the content directly. default: // Some unexpected error. return err } } br, err := newFileReader(ctx, bs.driver, path, desc.Size) if err != nil { return err } defer br.Close() w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) if w.Header().Get("Docker-Content-Digest") == "" { w.Header().Set("Docker-Content-Digest", desc.Digest.String()) } if w.Header().Get("Content-Type") == "" { // Set the content type if not already set. w.Header().Set("Content-Type", desc.MediaType) } if w.Header().Get("Content-Length") == "" { // Set the content length if not already set. w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) } http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) return nil } distribution-2.3.0/registry/storage/blobstore.go000066400000000000000000000131601265472114500221010ustar00rootroot00000000000000package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" ) // blobStore implements the read side of the blob store interface over a // driver without enforcing per-repository membership. This object is // intentionally a leaky abstraction, providing utility methods that support // creating and traversing backend links. type blobStore struct { driver driver.StorageDriver statter distribution.BlobStatter } var _ distribution.BlobProvider = &blobStore{} // Get implements the BlobReadService.Get call. func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { bp, err := bs.path(dgst) if err != nil { return nil, err } p, err := bs.driver.GetContent(ctx, bp) if err != nil { switch err.(type) { case driver.PathNotFoundError: return nil, distribution.ErrBlobUnknown } return nil, err } return p, err } func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { desc, err := bs.statter.Stat(ctx, dgst) if err != nil { return nil, err } path, err := bs.path(desc.Digest) if err != nil { return nil, err } return newFileReader(ctx, bs.driver, path, desc.Size) } // Put stores the content p in the blob store, calculating the digest. If the // content is already present, only the digest will be returned. This should // only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { dgst := digest.FromBytes(p) desc, err := bs.statter.Stat(ctx, dgst) if err == nil { // content already present return desc, nil } else if err != distribution.ErrBlobUnknown { context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) // real error, return it return distribution.Descriptor{}, err } bp, err := bs.path(dgst) if err != nil { return distribution.Descriptor{}, err } // TODO(stevvooe): Write out mediatype here, as well. return distribution.Descriptor{ Size: int64(len(p)), // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value // for the specific repository. MediaType: "application/octet-stream", Digest: dgst, }, bs.driver.PutContent(ctx, bp, p) } // path returns the canonical path for the blob identified by digest. The blob // may or may not exist. func (bs *blobStore) path(dgst digest.Digest) (string, error) { bp, err := pathFor(blobDataPathSpec{ digest: dgst, }) if err != nil { return "", err } return bp, nil } // link links the path to the provided digest by writing the digest into the // target file. Caller must ensure that the blob actually exists. func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error { // The contents of the "link" file are the exact string contents of the // digest, which is specified in that package. return bs.driver.PutContent(ctx, path, []byte(dgst)) } // readlink returns the linked digest at path. func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) { content, err := bs.driver.GetContent(ctx, path) if err != nil { return "", err } linked, err := digest.ParseDigest(string(content)) if err != nil { return "", err } return linked, nil } // resolve reads the digest link at path and returns the blob store path. func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) { dgst, err := bs.readlink(ctx, path) if err != nil { return "", err } return bs.path(dgst) } type blobStatter struct { driver driver.StorageDriver } var _ distribution.BlobDescriptorService = &blobStatter{} // Stat implements BlobStatter.Stat by returning the descriptor for the blob // in the main blob store. If this method returns successfully, there is // strong guarantee that the blob exists and is available. func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { path, err := pathFor(blobDataPathSpec{ digest: dgst, }) if err != nil { return distribution.Descriptor{}, err } fi, err := bs.driver.Stat(ctx, path) if err != nil { switch err := err.(type) { case driver.PathNotFoundError: return distribution.Descriptor{}, distribution.ErrBlobUnknown default: return distribution.Descriptor{}, err } } if fi.IsDir() { // NOTE(stevvooe): This represents a corruption situation. Somehow, we // calculated a blob path and then detected a directory. We log the // error and then error on the side of not knowing about the blob. context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path) return distribution.Descriptor{}, distribution.ErrBlobUnknown } // TODO(stevvooe): Add method to resolve the mediatype. We can store and // cache a "global" media type for the blob, even if a specific repo has a // mediatype that overrides the main one. return distribution.Descriptor{ Size: fi.Size(), // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value // for the specific repository. MediaType: "application/octet-stream", Digest: dgst, }, nil } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { return distribution.ErrUnsupported } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { return distribution.ErrUnsupported } distribution-2.3.0/registry/storage/blobwriter.go000066400000000000000000000262051265472114500222650ustar00rootroot00000000000000package storage import ( "errors" "fmt" "io" "path" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) var ( errResumableDigestNotAvailable = errors.New("resumable digest not available") ) // layerWriter is used to control the various aspects of resumable // layer upload. It implements the LayerUpload interface. type blobWriter struct { blobStore *linkedBlobStore id string startedAt time.Time digester digest.Digester written int64 // track the contiguous write // implementes io.WriteSeeker, io.ReaderFrom and io.Closer to satisfy // LayerUpload Interface bufferedFileWriter resumableDigestEnabled bool } var _ distribution.BlobWriter = &blobWriter{} // ID returns the identifier for this upload. func (bw *blobWriter) ID() string { return bw.id } func (bw *blobWriter) StartedAt() time.Time { return bw.startedAt } // Commit marks the upload as completed, returning a valid descriptor. The // final size and digest are checked against the first descriptor provided. func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { context.GetLogger(ctx).Debug("(*blobWriter).Commit") if err := bw.bufferedFileWriter.Close(); err != nil { return distribution.Descriptor{}, err } canonical, err := bw.validateBlob(ctx, desc) if err != nil { return distribution.Descriptor{}, err } if err := bw.moveBlob(ctx, canonical); err != nil { return distribution.Descriptor{}, err } if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil { return distribution.Descriptor{}, err } if err := bw.removeResources(ctx); err != nil { return distribution.Descriptor{}, err } err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical) if err != nil { return distribution.Descriptor{}, err } return canonical, nil } // Rollback the blob upload process, releasing any resources associated with // the writer and canceling the operation. func (bw *blobWriter) Cancel(ctx context.Context) error { context.GetLogger(ctx).Debug("(*blobWriter).Rollback") if err := bw.removeResources(ctx); err != nil { return err } bw.Close() return nil } func (bw *blobWriter) Write(p []byte) (int, error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { return 0, err } n, err := io.MultiWriter(&bw.bufferedFileWriter, bw.digester.Hash()).Write(p) bw.written += int64(n) return n, err } func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) { // Ensure that the current write offset matches how many bytes have been // written to the digester. If not, we need to update the digest state to // match the current write position. if err := bw.resumeDigestAt(bw.blobStore.ctx, bw.offset); err != nil && err != errResumableDigestNotAvailable { return 0, err } nn, err := bw.bufferedFileWriter.ReadFrom(io.TeeReader(r, bw.digester.Hash())) bw.written += nn return nn, err } func (bw *blobWriter) Close() error { if bw.err != nil { return bw.err } if err := bw.storeHashState(bw.blobStore.ctx); err != nil { return err } return bw.bufferedFileWriter.Close() } // validateBlob checks the data against the digest, returning an error if it // does not match. The canonical descriptor is returned. func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { var ( verified, fullHash bool canonical digest.Digest ) if desc.Digest == "" { // if no descriptors are provided, we have nothing to validate // against. We don't really want to support this for the registry. return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ Reason: fmt.Errorf("cannot validate against empty digest"), } } // Stat the on disk file if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): We really don't care if the file is // not actually present for the reader. We now assume // that the desc length is zero. desc.Size = 0 default: // Any other error we want propagated up the stack. return distribution.Descriptor{}, err } } else { if fi.IsDir() { return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path) } bw.size = fi.Size() } if desc.Size > 0 { if desc.Size != bw.size { return distribution.Descriptor{}, distribution.ErrBlobInvalidLength } } else { // if provided 0 or negative length, we can assume caller doesn't know or // care about length. desc.Size = bw.size } // TODO(stevvooe): This section is very meandering. Need to be broken down // to be a lot more clear. if err := bw.resumeDigestAt(ctx, bw.size); err == nil { canonical = bw.digester.Digest() if canonical.Algorithm() == desc.Digest.Algorithm() { // Common case: client and server prefer the same canonical digest // algorithm - currently SHA256. verified = desc.Digest == canonical } else { // The client wants to use a different digest algorithm. They'll just // have to be patient and wait for us to download and re-hash the // uploaded content using that digest algorithm. fullHash = true } } else if err == errResumableDigestNotAvailable { // Not using resumable digests, so we need to hash the entire layer. fullHash = true } else { return distribution.Descriptor{}, err } if fullHash { // a fantastic optimization: if the the written data and the size are // the same, we don't need to read the data from the backend. This is // because we've written the entire file in the lifecycle of the // current instance. if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() { canonical = bw.digester.Digest() verified = desc.Digest == canonical } // If the check based on size fails, we fall back to the slowest of // paths. We may be able to make the size-based check a stronger // guarantee, so this may be defensive. if !verified { digester := digest.Canonical.New() digestVerifier, err := digest.NewDigestVerifier(desc.Digest) if err != nil { return distribution.Descriptor{}, err } // Read the file from the backend driver and validate it. fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size) if err != nil { return distribution.Descriptor{}, err } defer fr.Close() tr := io.TeeReader(fr, digester.Hash()) if _, err := io.Copy(digestVerifier, tr); err != nil { return distribution.Descriptor{}, err } canonical = digester.Digest() verified = digestVerifier.Verified() } } if !verified { context.GetLoggerWithFields(ctx, map[interface{}]interface{}{ "canonical": canonical, "provided": desc.Digest, }, "canonical", "provided"). Errorf("canonical digest does match provided digest") return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{ Digest: desc.Digest, Reason: fmt.Errorf("content does not match digest"), } } // update desc with canonical hash desc.Digest = canonical if desc.MediaType == "" { desc.MediaType = "application/octet-stream" } return desc, nil } // moveBlob moves the data into its final, hash-qualified destination, // identified by dgst. The layer should be validated before commencing the // move. func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error { blobPath, err := pathFor(blobDataPathSpec{ digest: desc.Digest, }) if err != nil { return err } // Check for existence if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // ensure that it doesn't exist. default: return err } } else { // If the path exists, we can assume that the content has already // been uploaded, since the blob storage is content-addressable. // While it may be corrupted, detection of such corruption belongs // elsewhere. return nil } // If no data was received, we may not actually have a file on disk. Check // the size here and write a zero-length file to blobPath if this is the // case. For the most part, this should only ever happen with zero-length // tars. if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // HACK(stevvooe): This is slightly dangerous: if we verify above, // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing // to this happen for the digest of an empty tar. if desc.Digest == digest.DigestSha256EmptyTar { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } // We let this fail during the move below. logrus. WithField("upload.id", bw.ID()). WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest") default: return err // unrelated error } } // TODO(stevvooe): We should also write the mediatype when executing this move. return bw.blobStore.driver.Move(ctx, bw.path, blobPath) } // removeResources should clean up all resources associated with the upload // instance. An error will be returned if the clean up cannot proceed. If the // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { dataPath, err := pathFor(uploadDataPathSpec{ name: bw.blobStore.repository.Name().Name(), id: bw.id, }) if err != nil { return err } // Resolve and delete the containing directory, which should include any // upload related files. dirPath := path.Dir(dataPath) if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: break // already gone! default: // This should be uncommon enough such that returning an error // should be okay. At this point, the upload should be mostly // complete, but perhaps the backend became unaccessible. context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err) return err } } return nil } func (bw *blobWriter) Reader() (io.ReadCloser, error) { // todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4 try := 1 for try <= 5 { _, err := bw.bufferedFileWriter.driver.Stat(bw.ctx, bw.path) if err == nil { break } switch err.(type) { case storagedriver.PathNotFoundError: context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try) time.Sleep(1 * time.Second) try++ default: return nil, err } } readCloser, err := bw.bufferedFileWriter.driver.ReadStream(bw.ctx, bw.path, 0) if err != nil { return nil, err } return readCloser, nil } distribution-2.3.0/registry/storage/blobwriter_nonresumable.go000066400000000000000000000007251265472114500250360ustar00rootroot00000000000000// +build noresumabledigest package storage import ( "github.com/docker/distribution/context" ) // resumeHashAt is a noop when resumable digest support is disabled. func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { return errResumableDigestNotAvailable } // storeHashState is a noop when resumable digest support is disabled. func (bw *blobWriter) storeHashState(ctx context.Context) error { return errResumableDigestNotAvailable } distribution-2.3.0/registry/storage/blobwriter_resumable.go000066400000000000000000000113521265472114500243210ustar00rootroot00000000000000// +build !noresumabledigest package storage import ( "fmt" "io" "os" "path" "strconv" "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/stevvooe/resumable" // register resumable hashes with import _ "github.com/stevvooe/resumable/sha256" _ "github.com/stevvooe/resumable/sha512" ) // resumeDigestAt attempts to restore the state of the internal hash function // by loading the most recent saved hash state less than or equal to the given // offset. Any unhashed bytes remaining less than the given offset are hashed // from the content uploaded so far. func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { if !bw.resumableDigestEnabled { return errResumableDigestNotAvailable } if offset < 0 { return fmt.Errorf("cannot resume hash at negative offset: %d", offset) } h, ok := bw.digester.Hash().(resumable.Hash) if !ok { return errResumableDigestNotAvailable } if offset == int64(h.Len()) { // State of digester is already at the requested offset. return nil } // List hash states from storage backend. var hashStateMatch hashStateEntry hashStates, err := bw.getStoredHashStates(ctx) if err != nil { return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err) } // Find the highest stored hashState with offset less than or equal to // the requested offset. for _, hashState := range hashStates { if hashState.offset == offset { hashStateMatch = hashState break // Found an exact offset match. } else if hashState.offset < offset && hashState.offset > hashStateMatch.offset { // This offset is closer to the requested offset. hashStateMatch = hashState } else if hashState.offset > offset { // Remove any stored hash state with offsets higher than this one // as writes to this resumed hasher will make those invalid. This // is probably okay to skip for now since we don't expect anyone to // use the API in this way. For that reason, we don't treat an // an error here as a fatal error, but only log it. if err := bw.driver.Delete(ctx, hashState.path); err != nil { logrus.Errorf("unable to delete stale hash state %q: %s", hashState.path, err) } } } if hashStateMatch.offset == 0 { // No need to load any state, just reset the hasher. h.Reset() } else { storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path) if err != nil { return err } if err = h.Restore(storedState); err != nil { return err } } // Mind the gap. if gapLen := offset - int64(h.Len()); gapLen > 0 { // Need to read content from the upload to catch up to the desired offset. fr, err := newFileReader(ctx, bw.driver, bw.path, bw.size) if err != nil { return err } defer fr.Close() if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) } if _, err := io.CopyN(h, fr, gapLen); err != nil { return err } } return nil } type hashStateEntry struct { offset int64 path string } // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, }) if err != nil { return nil, err } paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix) if err != nil { if _, ok := err.(storagedriver.PathNotFoundError); !ok { return nil, err } // Treat PathNotFoundError as no entries. paths = nil } hashStateEntries := make([]hashStateEntry, 0, len(paths)) for _, p := range paths { pathSuffix := path.Base(p) // The suffix should be the offset. offset, err := strconv.ParseInt(pathSuffix, 0, 64) if err != nil { logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err) } hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p}) } return hashStateEntries, nil } func (bw *blobWriter) storeHashState(ctx context.Context) error { if !bw.resumableDigestEnabled { return errResumableDigestNotAvailable } h, ok := bw.digester.Hash().(resumable.Hash) if !ok { return errResumableDigestNotAvailable } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), }) if err != nil { return err } hashState, err := h.State() if err != nil { return err } return bw.driver.PutContent(ctx, uploadHashStatePath, hashState) } distribution-2.3.0/registry/storage/cache/000077500000000000000000000000001265472114500206215ustar00rootroot00000000000000distribution-2.3.0/registry/storage/cache/cache.go000066400000000000000000000016141265472114500222150ustar00rootroot00000000000000// Package cache provides facilities to speed up access to the storage // backend. package cache import ( "fmt" "github.com/docker/distribution" ) // BlobDescriptorCacheProvider provides repository scoped // BlobDescriptorService cache instances and a global descriptor cache. type BlobDescriptorCacheProvider interface { distribution.BlobDescriptorService RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) } // ValidateDescriptor provides a helper function to ensure that caches have // common criteria for admitting descriptors. func ValidateDescriptor(desc distribution.Descriptor) error { if err := desc.Digest.Validate(); err != nil { return err } if desc.Size < 0 { return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) } if desc.MediaType == "" { return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) } return nil } distribution-2.3.0/registry/storage/cache/cachecheck/000077500000000000000000000000001265472114500226625ustar00rootroot00000000000000distribution-2.3.0/registry/storage/cache/cachecheck/suite.go000066400000000000000000000136021265472114500243440ustar00rootroot00000000000000package cachecheck import ( "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/cache" ) // CheckBlobDescriptorCache takes a cache implementation through a common set // of operations. If adding new tests, please add them here so new // implementations get the benefit. This should be used for unit tests. func CheckBlobDescriptorCache(t *testing.T, provider cache.BlobDescriptorCacheProvider) { ctx := context.Background() checkBlobDescriptorCacheEmptyRepository(t, ctx, provider) checkBlobDescriptorCacheSetAndRead(t, ctx, provider) checkBlobDescriptorCacheClear(t, ctx, provider) } func checkBlobDescriptorCacheEmptyRepository(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { if _, err := provider.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty store: %v", err) } cache, err := provider.RepositoryScoped("") if err == nil { t.Fatalf("expected an error when asking for invalid repo") } cache, err = provider.RepositoryScoped("foo/bar") if err != nil { t.Fatalf("unexpected error getting repository: %v", err) } if err := cache.SetDescriptor(ctx, "", distribution.Descriptor{ Digest: "sha384:abc", Size: 10, MediaType: "application/octet-stream"}); err != digest.ErrDigestInvalidFormat { t.Fatalf("expected error with invalid digest: %v", err) } if err := cache.SetDescriptor(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", distribution.Descriptor{ Digest: "", Size: 10, MediaType: "application/octet-stream"}); err == nil { t.Fatalf("expected error setting value on invalid descriptor") } if _, err := cache.Stat(ctx, ""); err != digest.ErrDigestInvalidFormat { t.Fatalf("expected error checking for cache item with empty digest: %v", err) } if _, err := cache.Stat(ctx, "sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); err != distribution.ErrBlobUnknown { t.Fatalf("expected unknown blob error with empty repo: %v", err) } } func checkBlobDescriptorCacheSetAndRead(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:abc111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ Digest: "sha256:abc1111111111111111111111111111111111111111111111111111111111111", Size: 10, MediaType: "application/octet-stream"} cache, err := provider.RepositoryScoped("foo/bar") if err != nil { t.Fatalf("unexpected error getting scoped cache: %v", err) } if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { t.Fatalf("error setting descriptor: %v", err) } desc, err := cache.Stat(ctx, localDigest) if err != nil { t.Fatalf("unexpected error statting fake2:abc: %v", err) } if expected != desc { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } // also check that we set the canonical key ("fake:abc") desc, err = cache.Stat(ctx, localDigest) if err != nil { t.Fatalf("descriptor not returned for canonical key: %v", err) } if expected != desc { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } // ensure that global gets extra descriptor mapping desc, err = provider.Stat(ctx, localDigest) if err != nil { t.Fatalf("expected blob unknown in global cache: %v, %v", err, desc) } if desc != expected { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } // get at it through canonical descriptor desc, err = provider.Stat(ctx, expected.Digest) if err != nil { t.Fatalf("unexpected error checking glboal descriptor: %v", err) } if desc != expected { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } // now, we set the repo local mediatype to something else and ensure it // doesn't get changed in the provider cache. expected.MediaType = "application/json" if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { t.Fatalf("unexpected error setting descriptor: %v", err) } desc, err = cache.Stat(ctx, localDigest) if err != nil { t.Fatalf("unexpected error getting descriptor: %v", err) } if desc != expected { t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } desc, err = provider.Stat(ctx, localDigest) if err != nil { t.Fatalf("unexpected error getting global descriptor: %v", err) } expected.MediaType = "application/octet-stream" // expect original mediatype in global if desc != expected { t.Fatalf("unexpected descriptor: %#v != %#v", desc, expected) } } func checkBlobDescriptorCacheClear(t *testing.T, ctx context.Context, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", Size: 10, MediaType: "application/octet-stream"} cache, err := provider.RepositoryScoped("foo/bar") if err != nil { t.Fatalf("unexpected error getting scoped cache: %v", err) } if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { t.Fatalf("error setting descriptor: %v", err) } desc, err := cache.Stat(ctx, localDigest) if err != nil { t.Fatalf("unexpected error statting fake2:abc: %v", err) } if expected != desc { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } err = cache.Clear(ctx, localDigest) if err != nil { t.Error(err) } desc, err = cache.Stat(ctx, localDigest) if err == nil { t.Fatalf("expected error statting deleted blob: %v", err) } } distribution-2.3.0/registry/storage/cache/cachedblobdescriptorstore.go000066400000000000000000000051241265472114500263740ustar00rootroot00000000000000package cache import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution" ) // Metrics is used to hold metric counters // related to the number of times a cache was // hit or missed. type Metrics struct { Requests uint64 Hits uint64 Misses uint64 } // MetricsTracker represents a metric tracker // which simply counts the number of hits and misses. type MetricsTracker interface { Hit() Miss() Metrics() Metrics } type cachedBlobStatter struct { cache distribution.BlobDescriptorService backend distribution.BlobDescriptorService tracker MetricsTracker } // NewCachedBlobStatter creates a new statter which prefers a cache and // falls back to a backend. func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { return &cachedBlobStatter{ cache: cache, backend: backend, } } // NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and // falls back to a backend. Hits and misses will send to the tracker. func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { return &cachedBlobStatter{ cache: cache, backend: backend, tracker: tracker, } } func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { desc, err := cbds.cache.Stat(ctx, dgst) if err != nil { if err != distribution.ErrBlobUnknown { context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) } goto fallback } if cbds.tracker != nil { cbds.tracker.Hit() } return desc, nil fallback: if cbds.tracker != nil { cbds.tracker.Miss() } desc, err = cbds.backend.Stat(ctx, dgst) if err != nil { return desc, err } if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return desc, err } func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { err := cbds.cache.Clear(ctx, dgst) if err != nil { return err } err = cbds.backend.Clear(ctx, dgst) if err != nil { return err } return nil } func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) } return nil } distribution-2.3.0/registry/storage/cache/memory/000077500000000000000000000000001265472114500221315ustar00rootroot00000000000000distribution-2.3.0/registry/storage/cache/memory/memory.go000066400000000000000000000117221265472114500237730ustar00rootroot00000000000000package memory import ( "sync" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" ) type inMemoryBlobDescriptorCacheProvider struct { global *mapBlobDescriptorCache repositories map[string]*mapBlobDescriptorCache mu sync.RWMutex } // NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for // storing blob descriptor data. func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { return &inMemoryBlobDescriptorCacheProvider{ global: newMapBlobDescriptorCache(), repositories: make(map[string]*mapBlobDescriptorCache), } } func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { if _, err := reference.ParseNamed(repo); err != nil { return nil, err } imbdcp.mu.RLock() defer imbdcp.mu.RUnlock() return &repositoryScopedInMemoryBlobDescriptorCache{ repo: repo, parent: imbdcp, repository: imbdcp.repositories[repo], }, nil } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return imbdcp.global.Stat(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { return imbdcp.global.Clear(ctx, dgst) } func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { _, err := imbdcp.Stat(ctx, dgst) if err == distribution.ErrBlobUnknown { if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { // if the digests differ, set the other canonical mapping if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { return err } } // unknown, just set it return imbdcp.global.SetDescriptor(ctx, dgst, desc) } // we already know it, do nothing return err } // repositoryScopedInMemoryBlobDescriptorCache provides the request scoped // repository cache. Instances are not thread-safe but the delegated // operations are. type repositoryScopedInMemoryBlobDescriptorCache struct { repo string parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map repository *mapBlobDescriptorCache } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if rsimbdcp.repository == nil { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return rsimbdcp.repository.Stat(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { if rsimbdcp.repository == nil { return distribution.ErrBlobUnknown } return rsimbdcp.repository.Clear(ctx, dgst) } func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if rsimbdcp.repository == nil { // allocate map since we are setting it now. rsimbdcp.parent.mu.Lock() var ok bool // have to read back value since we may have allocated elsewhere. rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] if !ok { rsimbdcp.repository = newMapBlobDescriptorCache() rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository } rsimbdcp.parent.mu.Unlock() } if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { return err } return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) } // mapBlobDescriptorCache provides a simple map-based implementation of the // descriptor cache. type mapBlobDescriptorCache struct { descriptors map[digest.Digest]distribution.Descriptor mu sync.RWMutex } var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} func newMapBlobDescriptorCache() *mapBlobDescriptorCache { return &mapBlobDescriptorCache{ descriptors: make(map[digest.Digest]distribution.Descriptor), } } func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } mbdc.mu.RLock() defer mbdc.mu.RUnlock() desc, ok := mbdc.descriptors[dgst] if !ok { return distribution.Descriptor{}, distribution.ErrBlobUnknown } return desc, nil } func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { mbdc.mu.Lock() defer mbdc.mu.Unlock() delete(mbdc.descriptors, dgst) return nil } func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err } if err := cache.ValidateDescriptor(desc); err != nil { return err } mbdc.mu.Lock() defer mbdc.mu.Unlock() mbdc.descriptors[dgst] = desc return nil } distribution-2.3.0/registry/storage/cache/memory/memory_test.go000066400000000000000000000005111265472114500250240ustar00rootroot00000000000000package memory import ( "testing" "github.com/docker/distribution/registry/storage/cache/cachecheck" ) // TestInMemoryBlobInfoCache checks the in memory implementation is working // correctly. func TestInMemoryBlobInfoCache(t *testing.T) { cachecheck.CheckBlobDescriptorCache(t, NewInMemoryBlobDescriptorCacheProvider()) } distribution-2.3.0/registry/storage/cache/redis/000077500000000000000000000000001265472114500217275ustar00rootroot00000000000000distribution-2.3.0/registry/storage/cache/redis/redis.go000066400000000000000000000206611265472114500233710ustar00rootroot00000000000000package redis import ( "fmt" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" "github.com/garyburd/redigo/redis" ) // redisBlobStatService provides an implementation of // BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in // two parts. The first provide fast access to repository membership through a // redis set for each repo. The second is a redis hash keyed by the digest of // the layer, providing path, length and mediatype information. There is also // a per-repository redis hash of the blob descriptor, allowing override of // data. This is currently used to override the mediatype on a per-repository // basis. // // Note that there is no implied relationship between these two caches. The // layer may exist in one, both or none and the code must be written this way. type redisBlobDescriptorService struct { pool *redis.Pool // TODO(stevvooe): We use a pool because we don't have great control over // the cache lifecycle to manage connections. A new connection if fetched // for each operation. Once we have better lifecycle management of the // request objects, we can change this to a connection. } // NewRedisBlobDescriptorCacheProvider returns a new redis-based // BlobDescriptorCacheProvider using the provided redis connection pool. func NewRedisBlobDescriptorCacheProvider(pool *redis.Pool) cache.BlobDescriptorCacheProvider { return &redisBlobDescriptorService{ pool: pool, } } // RepositoryScoped returns the scoped cache. func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { if _, err := reference.ParseNamed(repo); err != nil { return nil, err } return &repositoryScopedRedisBlobDescriptorService{ repo: repo, upstream: rbds, }, nil } // Stat retrieves the descriptor data from the redis hash entry. func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } conn := rbds.pool.Get() defer conn.Close() return rbds.stat(ctx, conn, dgst) } func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { if err := dgst.Validate(); err != nil { return err } conn := rbds.pool.Get() defer conn.Close() // Not atomic in redis <= 2.3 reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype") if err != nil { return err } if reply == 0 { return distribution.ErrBlobUnknown } return nil } // stat provides an internal stat call that takes a connection parameter. This // allows some internal management of the connection scope. func (rbds *redisBlobDescriptorService) stat(ctx context.Context, conn redis.Conn, dgst digest.Digest) (distribution.Descriptor, error) { reply, err := redis.Values(conn.Do("HMGET", rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")) if err != nil { return distribution.Descriptor{}, err } // NOTE(stevvooe): The "size" field used to be "length". We treat a // missing "size" field here as an unknown blob, which causes a cache // miss, effectively migrating the field. if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil return distribution.Descriptor{}, distribution.ErrBlobUnknown } var desc distribution.Descriptor if _, err := redis.Scan(reply, &desc.Digest, &desc.Size, &desc.MediaType); err != nil { return distribution.Descriptor{}, err } return desc, nil } // SetDescriptor sets the descriptor data for the given digest using a redis // hash. A hash is used here since we may store unrelated fields about a layer // in the future. func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err } if err := cache.ValidateDescriptor(desc); err != nil { return err } conn := rbds.pool.Get() defer conn.Close() return rbds.setDescriptor(ctx, conn, dgst, desc) } func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { if _, err := conn.Do("HMSET", rbds.blobDescriptorHashKey(dgst), "digest", desc.Digest, "size", desc.Size); err != nil { return err } // Only set mediatype if not already set. if _, err := conn.Do("HSETNX", rbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { return err } return nil } func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { return "blobs::" + dgst.String() } type repositoryScopedRedisBlobDescriptorService struct { repo string upstream *redisBlobDescriptorService } var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{} // Stat ensures that the digest is a member of the specified repository and // forwards the descriptor request to the global blob store. If the media type // differs for the repository, we override it. func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } conn := rsrbds.upstream.pool.Get() defer conn.Close() // Check membership to repository first member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) if err != nil { return distribution.Descriptor{}, err } if !member { return distribution.Descriptor{}, distribution.ErrBlobUnknown } upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) if err != nil { return distribution.Descriptor{}, err } // We allow a per repository mediatype, let's look it up here. mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) if err != nil { return distribution.Descriptor{}, err } if mediatype != "" { upstream.MediaType = mediatype } return upstream, nil } // Clear removes the descriptor from the cache and forwards to the upstream descriptor store func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error { if err := dgst.Validate(); err != nil { return err } conn := rsrbds.upstream.pool.Get() defer conn.Close() // Check membership to repository first member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) if err != nil { return err } if !member { return distribution.ErrBlobUnknown } return rsrbds.upstream.Clear(ctx, dgst) } func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { if err := dgst.Validate(); err != nil { return err } if err := cache.ValidateDescriptor(desc); err != nil { return err } if dgst != desc.Digest { if dgst.Algorithm() == desc.Digest.Algorithm() { return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest) } } conn := rsrbds.upstream.pool.Get() defer conn.Close() return rsrbds.setDescriptor(ctx, conn, dgst, desc) } func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error { if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil { return err } if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil { return err } // Override repository mediatype. if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil { return err } // Also set the values for the primary descriptor, if they differ by // algorithm (ie sha256 vs sha512). if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() { if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil { return err } } return nil } func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string { return "repository::" + rsrbds.repo + "::blobs::" + dgst.String() } func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string { return "repository::" + rsrbds.repo + "::blobs" } distribution-2.3.0/registry/storage/cache/redis/redis_test.go000066400000000000000000000024241265472114500244250ustar00rootroot00000000000000package redis import ( "flag" "os" "testing" "time" "github.com/docker/distribution/registry/storage/cache/cachecheck" "github.com/garyburd/redigo/redis" ) var redisAddr string func init() { flag.StringVar(&redisAddr, "test.registry.storage.cache.redis.addr", "", "configure the address of a test instance of redis") } // TestRedisLayerInfoCache exercises a live redis instance using the cache // implementation. func TestRedisBlobDescriptorCacheProvider(t *testing.T) { if redisAddr == "" { // fallback to an environement variable redisAddr = os.Getenv("TEST_REGISTRY_STORAGE_CACHE_REDIS_ADDR") } if redisAddr == "" { // skip if still not set t.Skip("please set -registry.storage.cache.redis to test layer info cache against redis") } pool := &redis.Pool{ Dial: func() (redis.Conn, error) { return redis.Dial("tcp", redisAddr) }, MaxIdle: 1, MaxActive: 2, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") return err }, Wait: false, // if a connection is not avialable, proceed without cache. } // Clear the database if _, err := pool.Get().Do("FLUSHDB"); err != nil { t.Fatalf("unexpected error flushing redis db: %v", err) } cachecheck.CheckBlobDescriptorCache(t, NewRedisBlobDescriptorCacheProvider(pool)) } distribution-2.3.0/registry/storage/catalog.go000066400000000000000000000032141265472114500215170ustar00rootroot00000000000000package storage import ( "errors" "io" "path" "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) // ErrFinishedWalk is used when the called walk function no longer wants // to accept any more values. This is used for pagination when the // required number of repos have been found. var ErrFinishedWalk = errors.New("finished walk") // Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { var foundRepos []string if len(repos) == 0 { return 0, errors.New("no space in slice") } root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return 0, err } err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() // lop the base path off repoPath := filePath[len(root)+1:] _, file := path.Split(repoPath) if file == "_layers" { repoPath = strings.TrimSuffix(repoPath, "/_layers") if repoPath > last { foundRepos = append(foundRepos, repoPath) } return ErrSkipDir } else if strings.HasPrefix(file, "_") { return ErrSkipDir } // if we've filled our array, no need to walk any further if len(foundRepos) == len(repos) { return ErrFinishedWalk } return nil }) n = copy(repos, foundRepos) // Signal that we have no more entries by setting EOF if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { errVal = io.EOF } return n, errVal } distribution-2.3.0/registry/storage/catalog_test.go000066400000000000000000000053031265472114500225570ustar00rootroot00000000000000package storage import ( "io" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) type setupEnv struct { ctx context.Context driver driver.StorageDriver expected []string registry distribution.Namespace } func setupFS(t *testing.T) *setupEnv { d := inmemory.New() c := []byte("") ctx := context.Background() registry, err := NewRegistry(ctx, d, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } rootpath, _ := pathFor(repositoriesRootPathSpec{}) repos := []string{ "/foo/a/_layers/1", "/foo/b/_layers/2", "/bar/c/_layers/3", "/bar/d/_layers/4", "/foo/d/in/_layers/5", "/an/invalid/repo", "/bar/d/_layers/ignored/dir/6", } for _, repo := range repos { if err := d.PutContent(ctx, rootpath+repo, c); err != nil { t.Fatalf("Unable to put to inmemory fs") } } expected := []string{ "bar/c", "bar/d", "foo/a", "foo/b", "foo/d/in", } return &setupEnv{ ctx: ctx, driver: d, expected: expected, registry: registry, } } func TestCatalog(t *testing.T) { env := setupFS(t) p := make([]string, 50) numFilled, err := env.registry.Repositories(env.ctx, p, "") if !testEq(p, env.expected, numFilled) { t.Errorf("Expected catalog repos err") } if err != io.EOF { t.Errorf("Catalog has more values which we aren't expecting") } } func TestCatalogInParts(t *testing.T) { env := setupFS(t) chunkLen := 2 p := make([]string, chunkLen) numFilled, err := env.registry.Repositories(env.ctx, p, "") if err == io.EOF || numFilled != len(p) { t.Errorf("Expected more values in catalog") } if !testEq(p, env.expected[0:chunkLen], numFilled) { t.Errorf("Expected catalog first chunk err") } lastRepo := p[len(p)-1] numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) if err == io.EOF || numFilled != len(p) { t.Errorf("Expected more values in catalog") } if !testEq(p, env.expected[chunkLen:chunkLen*2], numFilled) { t.Errorf("Expected catalog second chunk err") } lastRepo = p[len(p)-1] numFilled, err = env.registry.Repositories(env.ctx, p, lastRepo) if err != io.EOF { t.Errorf("Catalog has more values which we aren't expecting") } if !testEq(p, env.expected[chunkLen*2:chunkLen*3-1], numFilled) { t.Errorf("Expected catalog third chunk err") } } func testEq(a, b []string, size int) bool { for cnt := 0; cnt < size-1; cnt++ { if a[cnt] != b[cnt] { return false } } return true } distribution-2.3.0/registry/storage/doc.go000066400000000000000000000002401265472114500206460ustar00rootroot00000000000000// Package storage contains storage services for use in the registry // application. It should be considered an internal package, as of Go 1.4. package storage distribution-2.3.0/registry/storage/driver/000077500000000000000000000000001265472114500210515ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/azure/000077500000000000000000000000001265472114500221775ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/azure/azure.go000066400000000000000000000236751265472114500236710ustar00rootroot00000000000000// Package azure provides a storagedriver.StorageDriver implementation to // store blobs in Microsoft Azure Blob Storage Service. package azure import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "strings" "time" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" azure "github.com/Azure/azure-sdk-for-go/storage" ) const driverName = "azure" const ( paramAccountName = "accountname" paramAccountKey = "accountkey" paramContainer = "container" paramRealm = "realm" ) type driver struct { client azure.BlobStorageClient container string } type baseEmbed struct{ base.Base } // Driver is a storagedriver.StorageDriver implementation backed by // Microsoft Azure Blob Storage Service. type Driver struct{ baseEmbed } func init() { factory.Register(driverName, &azureDriverFactory{}) } type azureDriverFactory struct{} func (factory *azureDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } // FromParameters constructs a new Driver with a given parameters map. func FromParameters(parameters map[string]interface{}) (*Driver, error) { accountName, ok := parameters[paramAccountName] if !ok || fmt.Sprint(accountName) == "" { return nil, fmt.Errorf("No %s parameter provided", paramAccountName) } accountKey, ok := parameters[paramAccountKey] if !ok || fmt.Sprint(accountKey) == "" { return nil, fmt.Errorf("No %s parameter provided", paramAccountKey) } container, ok := parameters[paramContainer] if !ok || fmt.Sprint(container) == "" { return nil, fmt.Errorf("No %s parameter provided", paramContainer) } realm, ok := parameters[paramRealm] if !ok || fmt.Sprint(realm) == "" { realm = azure.DefaultBaseURL } return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) } // New constructs a new Driver with the given Azure Storage Account credentials func New(accountName, accountKey, container, realm string) (*Driver, error) { api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true) if err != nil { return nil, err } blobClient := api.GetBlobService() // Create registry container if _, err = blobClient.CreateContainerIfNotExists(container, azure.ContainerAccessTypePrivate); err != nil { return nil, err } d := &driver{ client: blobClient, container: container} return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil } // Implement the storagedriver.StorageDriver interface. func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { blob, err := d.client.GetBlob(d.container, path) if err != nil { if is404(err) { return nil, storagedriver.PathNotFoundError{Path: path} } return nil, err } return ioutil.ReadAll(blob) } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil { return err } if err := d.client.CreateBlockBlob(d.container, path); err != nil { return err } bs := newAzureBlockStorage(d.client) bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) _, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents)) return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if !ok { return nil, storagedriver.PathNotFoundError{Path: path} } info, err := d.client.GetBlobProperties(d.container, path) if err != nil { return nil, err } size := int64(info.ContentLength) if offset >= size { return ioutil.NopCloser(bytes.NewReader(nil)), nil } bytesRange := fmt.Sprintf("%v-", offset) resp, err := d.client.GetBlobRange(d.container, path, bytesRange) if err != nil { return nil, err } return resp, nil } // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { if blobExists, err := d.client.BlobExists(d.container, path); err != nil { return 0, err } else if !blobExists { err := d.client.CreateBlockBlob(d.container, path) if err != nil { return 0, err } } if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } bs := newAzureBlockStorage(d.client) bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize) zw := newZeroFillWriter(&bw) return zw.Write(d.container, path, offset, reader) } // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { // Check if the path is a blob if ok, err := d.client.BlobExists(d.container, path); err != nil { return nil, err } else if ok { blob, err := d.client.GetBlobProperties(d.container, path) if err != nil { return nil, err } mtim, err := time.Parse(http.TimeFormat, blob.LastModified) if err != nil { return nil, err } return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ Path: path, Size: int64(blob.ContentLength), ModTime: mtim, IsDir: false, }}, nil } // Check if path is a virtual container virtContainerPath := path if !strings.HasSuffix(virtContainerPath, "/") { virtContainerPath += "/" } blobs, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ Prefix: virtContainerPath, MaxResults: 1, }) if err != nil { return nil, err } if len(blobs.Blobs) > 0 { // path is a virtual container return storagedriver.FileInfoInternal{FileInfoFields: storagedriver.FileInfoFields{ Path: path, IsDir: true, }}, nil } // path is not a blob or virtual container return nil, storagedriver.PathNotFoundError{Path: path} } // List returns a list of the objects that are direct descendants of the given // path. func (d *driver) List(ctx context.Context, path string) ([]string, error) { if path == "/" { path = "" } blobs, err := d.listBlobs(d.container, path) if err != nil { return blobs, err } list := directDescendants(blobs, path) return list, nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath) err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) if err != nil { if is404(err) { return storagedriver.PathNotFoundError{Path: sourcePath} } return err } return d.client.DeleteBlob(d.container, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { ok, err := d.client.DeleteBlobIfExists(d.container, path) if err != nil { return err } if ok { return nil // was a blob and deleted, return } // Not a blob, see if path is a virtual container with blobs blobs, err := d.listBlobs(d.container, path) if err != nil { return err } for _, b := range blobs { if err = d.client.DeleteBlob(d.container, b); err != nil { return err } } if len(blobs) == 0 { return storagedriver.PathNotFoundError{Path: path} } return nil } // URLFor returns a publicly accessible URL for the blob stored at given path // for specified duration by making use of Azure Storage Shared Access Signatures (SAS). // See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx for more info. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { expiresTime := time.Now().UTC().Add(20 * time.Minute) // default expiration expires, ok := options["expiry"] if ok { t, ok := expires.(time.Time) if ok { expiresTime = t } } return d.client.GetBlobSASURI(d.container, path, expiresTime, "r") } // directDescendants will find direct descendants (blobs or virtual containers) // of from list of blob paths and will return their full paths. Elements in blobs // list must be prefixed with a "/" and // // Example: direct descendants of "/" in {"/foo", "/bar/1", "/bar/2"} is // {"/foo", "/bar"} and direct descendants of "bar" is {"/bar/1", "/bar/2"} func directDescendants(blobs []string, prefix string) []string { if !strings.HasPrefix(prefix, "/") { // add trailing '/' prefix = "/" + prefix } if !strings.HasSuffix(prefix, "/") { // containerify the path prefix += "/" } out := make(map[string]bool) for _, b := range blobs { if strings.HasPrefix(b, prefix) { rel := b[len(prefix):] c := strings.Count(rel, "/") if c == 0 { out[b] = true } else { out[prefix+rel[:strings.Index(rel, "/")]] = true } } } var keys []string for k := range out { keys = append(keys, k) } return keys } func (d *driver) listBlobs(container, virtPath string) ([]string, error) { if virtPath != "" && !strings.HasSuffix(virtPath, "/") { // containerify the path virtPath += "/" } out := []string{} marker := "" for { resp, err := d.client.ListBlobs(d.container, azure.ListBlobsParameters{ Marker: marker, Prefix: virtPath, }) if err != nil { return out, err } for _, b := range resp.Blobs { out = append(out, b.Name) } if len(resp.Blobs) == 0 || resp.NextMarker == "" { break } marker = resp.NextMarker } return out, nil } func is404(err error) bool { e, ok := err.(azure.AzureStorageServiceError) return ok && e.StatusCode == http.StatusNotFound } distribution-2.3.0/registry/storage/driver/azure/azure_test.go000066400000000000000000000025721265472114500247210ustar00rootroot00000000000000package azure import ( "fmt" "os" "strings" "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" . "gopkg.in/check.v1" ) const ( envAccountName = "AZURE_STORAGE_ACCOUNT_NAME" envAccountKey = "AZURE_STORAGE_ACCOUNT_KEY" envContainer = "AZURE_STORAGE_CONTAINER" envRealm = "AZURE_STORAGE_REALM" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } func init() { var ( accountName string accountKey string container string realm string ) config := []struct { env string value *string }{ {envAccountName, &accountName}, {envAccountKey, &accountKey}, {envContainer, &container}, {envRealm, &realm}, } missing := []string{} for _, v := range config { *v.value = os.Getenv(v.env) if *v.value == "" { missing = append(missing, v.env) } } azureDriverConstructor := func() (storagedriver.StorageDriver, error) { return New(accountName, accountKey, container, realm) } // Skip Azure storage driver tests if environment variable parameters are not provided skipCheck := func() string { if len(missing) > 0 { return fmt.Sprintf("Must set %s environment variables to run Azure tests", strings.Join(missing, ", ")) } return "" } testsuites.RegisterSuite(azureDriverConstructor, skipCheck) } distribution-2.3.0/registry/storage/driver/azure/blockblob.go000066400000000000000000000011341265472114500244560ustar00rootroot00000000000000package azure import ( "fmt" "io" azure "github.com/Azure/azure-sdk-for-go/storage" ) // azureBlockStorage is adaptor between azure.BlobStorageClient and // blockStorage interface. type azureBlockStorage struct { azure.BlobStorageClient } func (b *azureBlockStorage) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { return b.BlobStorageClient.GetBlobRange(container, blob, fmt.Sprintf("%v-%v", start, start+length-1)) } func newAzureBlockStorage(b azure.BlobStorageClient) azureBlockStorage { a := azureBlockStorage{} a.BlobStorageClient = b return a } distribution-2.3.0/registry/storage/driver/azure/blockblob_test.go000066400000000000000000000071151265472114500255220ustar00rootroot00000000000000package azure import ( "bytes" "fmt" "io" "io/ioutil" azure "github.com/Azure/azure-sdk-for-go/storage" ) type StorageSimulator struct { blobs map[string]*BlockBlob } type BlockBlob struct { blocks map[string]*DataBlock blockList []string } type DataBlock struct { data []byte committed bool } func (s *StorageSimulator) path(container, blob string) string { return fmt.Sprintf("%s/%s", container, blob) } func (s *StorageSimulator) BlobExists(container, blob string) (bool, error) { _, ok := s.blobs[s.path(container, blob)] return ok, nil } func (s *StorageSimulator) GetBlob(container, blob string) (io.ReadCloser, error) { bb, ok := s.blobs[s.path(container, blob)] if !ok { return nil, fmt.Errorf("blob not found") } var readers []io.Reader for _, bID := range bb.blockList { readers = append(readers, bytes.NewReader(bb.blocks[bID].data)) } return ioutil.NopCloser(io.MultiReader(readers...)), nil } func (s *StorageSimulator) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) { r, err := s.GetBlob(container, blob) if err != nil { return nil, err } b, err := ioutil.ReadAll(r) if err != nil { return nil, err } return ioutil.NopCloser(bytes.NewReader(b[start : start+length])), nil } func (s *StorageSimulator) CreateBlockBlob(container, blob string) error { path := s.path(container, blob) bb := &BlockBlob{ blocks: make(map[string]*DataBlock), blockList: []string{}, } s.blobs[path] = bb return nil } func (s *StorageSimulator) PutBlock(container, blob, blockID string, chunk []byte) error { path := s.path(container, blob) bb, ok := s.blobs[path] if !ok { return fmt.Errorf("blob not found") } data := make([]byte, len(chunk)) copy(data, chunk) bb.blocks[blockID] = &DataBlock{data: data, committed: false} // add block to blob return nil } func (s *StorageSimulator) GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) { resp := azure.BlockListResponse{} bb, ok := s.blobs[s.path(container, blob)] if !ok { return resp, fmt.Errorf("blob not found") } // Iterate committed blocks (in order) if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { for _, blockID := range bb.blockList { b := bb.blocks[blockID] block := azure.BlockResponse{ Name: blockID, Size: int64(len(b.data)), } resp.CommittedBlocks = append(resp.CommittedBlocks, block) } } // Iterate uncommitted blocks (in no order) if blockType == azure.BlockListTypeAll || blockType == azure.BlockListTypeCommitted { for blockID, b := range bb.blocks { block := azure.BlockResponse{ Name: blockID, Size: int64(len(b.data)), } if !b.committed { resp.UncommittedBlocks = append(resp.UncommittedBlocks, block) } } } return resp, nil } func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.Block) error { bb, ok := s.blobs[s.path(container, blob)] if !ok { return fmt.Errorf("blob not found") } var blockIDs []string for _, v := range blocks { bl, ok := bb.blocks[v.ID] if !ok { // check if block ID exists return fmt.Errorf("Block id '%s' not found", v.ID) } bl.committed = true blockIDs = append(blockIDs, v.ID) } // Mark all other blocks uncommitted for k, b := range bb.blocks { inList := false for _, v := range blockIDs { if k == v { inList = true break } } if !inList { b.committed = false } } bb.blockList = blockIDs return nil } func NewStorageSimulator() StorageSimulator { return StorageSimulator{ blobs: make(map[string]*BlockBlob), } } distribution-2.3.0/registry/storage/driver/azure/blockid.go000066400000000000000000000023771265472114500241460ustar00rootroot00000000000000package azure import ( "encoding/base64" "fmt" "math/rand" "sync" "time" azure "github.com/Azure/azure-sdk-for-go/storage" ) type blockIDGenerator struct { pool map[string]bool r *rand.Rand m sync.Mutex } // Generate returns an unused random block id and adds the generated ID // to list of used IDs so that the same block name is not used again. func (b *blockIDGenerator) Generate() string { b.m.Lock() defer b.m.Unlock() var id string for { id = toBlockID(int(b.r.Int())) if !b.exists(id) { break } } b.pool[id] = true return id } func (b *blockIDGenerator) exists(id string) bool { _, used := b.pool[id] return used } func (b *blockIDGenerator) Feed(blocks azure.BlockListResponse) { b.m.Lock() defer b.m.Unlock() for _, bl := range append(blocks.CommittedBlocks, blocks.UncommittedBlocks...) { b.pool[bl.Name] = true } } func newBlockIDGenerator() *blockIDGenerator { return &blockIDGenerator{ pool: make(map[string]bool), r: rand.New(rand.NewSource(time.Now().UnixNano()))} } // toBlockId converts given integer to base64-encoded block ID of a fixed length. func toBlockID(i int) string { s := fmt.Sprintf("%029d", i) // add zero padding for same length-blobs return base64.StdEncoding.EncodeToString([]byte(s)) } distribution-2.3.0/registry/storage/driver/azure/blockid_test.go000066400000000000000000000035341265472114500252010ustar00rootroot00000000000000package azure import ( "math" "testing" azure "github.com/Azure/azure-sdk-for-go/storage" ) func Test_blockIdGenerator(t *testing.T) { r := newBlockIDGenerator() for i := 1; i <= 10; i++ { if expected := i - 1; len(r.pool) != expected { t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) } if id := r.Generate(); id == "" { t.Fatal("returned empty id") } if expected := i; len(r.pool) != expected { t.Fatalf("rand pool has wrong number of items: %d, expected:%d", len(r.pool), expected) } } } func Test_blockIdGenerator_Feed(t *testing.T) { r := newBlockIDGenerator() if expected := 0; len(r.pool) != expected { t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) } // feed empty list blocks := azure.BlockListResponse{} r.Feed(blocks) if expected := 0; len(r.pool) != expected { t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) } // feed blocks blocks = azure.BlockListResponse{ CommittedBlocks: []azure.BlockResponse{ {"1", 1}, {"2", 2}, }, UncommittedBlocks: []azure.BlockResponse{ {"3", 3}, }} r.Feed(blocks) if expected := 3; len(r.pool) != expected { t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) } // feed same block IDs with committed/uncommitted place changed blocks = azure.BlockListResponse{ CommittedBlocks: []azure.BlockResponse{ {"3", 3}, }, UncommittedBlocks: []azure.BlockResponse{ {"1", 1}, }} r.Feed(blocks) if expected := 3; len(r.pool) != expected { t.Fatalf("rand pool had wrong number of items: %d, expected:%d", len(r.pool), expected) } } func Test_toBlockId(t *testing.T) { min := 0 max := math.MaxInt64 if len(toBlockID(min)) != len(toBlockID(max)) { t.Fatalf("different-sized blockIDs are returned") } } distribution-2.3.0/registry/storage/driver/azure/randomwriter.go000066400000000000000000000153751265472114500252560ustar00rootroot00000000000000package azure import ( "fmt" "io" "io/ioutil" azure "github.com/Azure/azure-sdk-for-go/storage" ) // blockStorage is the interface required from a block storage service // client implementation type blockStorage interface { CreateBlockBlob(container, blob string) error GetBlob(container, blob string) (io.ReadCloser, error) GetSectionReader(container, blob string, start, length int64) (io.ReadCloser, error) PutBlock(container, blob, blockID string, chunk []byte) error GetBlockList(container, blob string, blockType azure.BlockListType) (azure.BlockListResponse, error) PutBlockList(container, blob string, blocks []azure.Block) error } // randomBlobWriter enables random access semantics on Azure block blobs // by enabling writing arbitrary length of chunks to arbitrary write offsets // within the blob. Normally, Azure Blob Storage does not support random // access semantics on block blobs; however, this writer can download, split and // reupload the overlapping blocks and discards those being overwritten entirely. type randomBlobWriter struct { bs blockStorage blockSize int } func newRandomBlobWriter(bs blockStorage, blockSize int) randomBlobWriter { return randomBlobWriter{bs: bs, blockSize: blockSize} } // WriteBlobAt writes the given chunk to the specified position of an existing blob. // The offset must be equals to size of the blob or smaller than it. func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) { rand := newBlockIDGenerator() blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) if err != nil { return 0, err } rand.Feed(blocks) // load existing block IDs // Check for write offset for existing blob size := getBlobSize(blocks) if offset < 0 || offset > size { return 0, fmt.Errorf("wrong offset for Write: %v", offset) } // Upload the new chunk as blocks blockList, nn, err := r.writeChunkToBlocks(container, blob, chunk, rand) if err != nil { return 0, err } // For non-append operations, existing blocks may need to be splitted if offset != size { // Split the block on the left end (if any) leftBlocks, err := r.blocksLeftSide(container, blob, offset, rand) if err != nil { return 0, err } blockList = append(leftBlocks, blockList...) // Split the block on the right end (if any) rightBlocks, err := r.blocksRightSide(container, blob, offset, nn, rand) if err != nil { return 0, err } blockList = append(blockList, rightBlocks...) } else { // Use existing block list var existingBlocks []azure.Block for _, v := range blocks.CommittedBlocks { existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } blockList = append(existingBlocks, blockList...) } // Put block list return nn, r.bs.PutBlockList(container, blob, blockList) } func (r *randomBlobWriter) GetSize(container, blob string) (int64, error) { blocks, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeCommitted) if err != nil { return 0, err } return getBlobSize(blocks), nil } // writeChunkToBlocks writes given chunk to one or multiple blocks within specified // blob and returns their block representations. Those blocks are not committed, yet func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.Reader, rand *blockIDGenerator) ([]azure.Block, int64, error) { var newBlocks []azure.Block var nn int64 // Read chunks of at most size N except the last chunk to // maximize block size and minimize block count. buf := make([]byte, r.blockSize) for { n, err := io.ReadFull(chunk, buf) if err == io.EOF { break } nn += int64(n) data := buf[:n] blockID := rand.Generate() if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { return newBlocks, nn, err } newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted}) } return newBlocks, nn, nil } // blocksLeftSide returns the blocks that are going to be at the left side of // the writeOffset: [0, writeOffset) by identifying blocks that will remain // the same and splitting blocks and reuploading them as needed. func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset int64, rand *blockIDGenerator) ([]azure.Block, error) { var left []azure.Block bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) if err != nil { return left, err } o := writeOffset elapsed := int64(0) for _, v := range bx.CommittedBlocks { blkSize := int64(v.Size) if o >= blkSize { // use existing block left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) o -= blkSize elapsed += blkSize } else if o > 0 { // current block needs to be splitted start := elapsed size := o part, err := r.bs.GetSectionReader(container, blob, start, size) if err != nil { return left, err } newBlockID := rand.Generate() data, err := ioutil.ReadAll(part) if err != nil { return left, err } if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return left, err } left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) break } } return left, nil } // blocksRightSide returns the blocks that are going to be at the right side of // the written chunk: [writeOffset+size, +inf) by identifying blocks that will remain // the same and splitting blocks and reuploading them as needed. func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset int64, chunkSize int64, rand *blockIDGenerator) ([]azure.Block, error) { var right []azure.Block bx, err := r.bs.GetBlockList(container, blob, azure.BlockListTypeAll) if err != nil { return nil, err } re := writeOffset + chunkSize - 1 // right end of written chunk var elapsed int64 for _, v := range bx.CommittedBlocks { var ( bs = elapsed // left end of current block be = elapsed + int64(v.Size) - 1 // right end of current block ) if bs > re { // take the block as is right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted}) } else if be > re { // current block needs to be splitted part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) if err != nil { return right, err } newBlockID := rand.Generate() data, err := ioutil.ReadAll(part) if err != nil { return right, err } if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { return right, err } right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted}) } elapsed += int64(v.Size) } return right, nil } func getBlobSize(blocks azure.BlockListResponse) int64 { var n int64 for _, v := range blocks.CommittedBlocks { n += int64(v.Size) } return n } distribution-2.3.0/registry/storage/driver/azure/randomwriter_test.go000066400000000000000000000266321265472114500263130ustar00rootroot00000000000000package azure import ( "bytes" "io" "io/ioutil" "math/rand" "reflect" "strings" "testing" azure "github.com/Azure/azure-sdk-for-go/storage" ) func TestRandomWriter_writeChunkToBlocks(t *testing.T) { s := NewStorageSimulator() rw := newRandomBlobWriter(&s, 3) rand := newBlockIDGenerator() c := []byte("AAABBBCCCD") if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } bw, nn, err := rw.writeChunkToBlocks("a", "b", bytes.NewReader(c), rand) if err != nil { t.Fatal(err) } if expected := int64(len(c)); nn != expected { t.Fatalf("wrong nn:%v, expected:%v", nn, expected) } if expected := 4; len(bw) != expected { t.Fatal("unexpected written block count") } bx, err := s.GetBlockList("a", "b", azure.BlockListTypeAll) if err != nil { t.Fatal(err) } if expected := 0; len(bx.CommittedBlocks) != expected { t.Fatal("unexpected committed block count") } if expected := 4; len(bx.UncommittedBlocks) != expected { t.Fatalf("unexpected uncommitted block count: %d -- %#v", len(bx.UncommittedBlocks), bx) } if err := rw.bs.PutBlockList("a", "b", bw); err != nil { t.Fatal(err) } r, err := rw.bs.GetBlob("a", "b") if err != nil { t.Fatal(err) } assertBlobContents(t, r, c) } func TestRandomWriter_blocksLeftSide(t *testing.T) { blob := "AAAAABBBBBCCC" cases := []struct { offset int64 expectedBlob string expectedPattern []azure.BlockStatus }{ {0, "", []azure.BlockStatus{}}, // write to beginning, discard all {13, blob, []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to end, no change {1, "A", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // write at 1 {5, "AAAAA", []azure.BlockStatus{azure.BlockStatusCommitted}}, // write just after first block {6, "AAAAAB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // split the second block {9, "AAAAABBBB", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusUncommitted}}, // write just after first block } for _, c := range cases { s := NewStorageSimulator() rw := newRandomBlobWriter(&s, 5) rand := newBlockIDGenerator() if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) if err != nil { t.Fatal(err) } if err := rw.bs.PutBlockList("a", "b", bw); err != nil { t.Fatal(err) } bx, err := rw.blocksLeftSide("a", "b", c.offset, rand) if err != nil { t.Fatal(err) } bs := []azure.BlockStatus{} for _, v := range bx { bs = append(bs, v.Status) } if !reflect.DeepEqual(bs, c.expectedPattern) { t.Logf("Committed blocks %v", bw) t.Fatalf("For offset %v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.expectedPattern, bs, bx) } if rw.bs.PutBlockList("a", "b", bx); err != nil { t.Fatal(err) } r, err := rw.bs.GetBlob("a", "b") if err != nil { t.Fatal(err) } cout, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } outBlob := string(cout) if outBlob != c.expectedBlob { t.Fatalf("wrong blob contents: %v, expected: %v", outBlob, c.expectedBlob) } } } func TestRandomWriter_blocksRightSide(t *testing.T) { blob := "AAAAABBBBBCCC" cases := []struct { offset int64 size int64 expectedBlob string expectedPattern []azure.BlockStatus }{ {0, 100, "", []azure.BlockStatus{}}, // overwrite the entire blob {0, 3, "AABBBBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // split first block {4, 1, "BBBBBCCC", []azure.BlockStatus{azure.BlockStatusCommitted, azure.BlockStatusCommitted}}, // write to last char of first block {1, 6, "BBBCCC", []azure.BlockStatus{azure.BlockStatusUncommitted, azure.BlockStatusCommitted}}, // overwrite splits first and second block, last block remains {3, 8, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite a block in middle block, split end block {10, 1, "CC", []azure.BlockStatus{azure.BlockStatusUncommitted}}, // overwrite first byte of rightmost block {11, 2, "", []azure.BlockStatus{}}, // overwrite the rightmost index {13, 20, "", []azure.BlockStatus{}}, // append to the end } for _, c := range cases { s := NewStorageSimulator() rw := newRandomBlobWriter(&s, 5) rand := newBlockIDGenerator() if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } bw, _, err := rw.writeChunkToBlocks("a", "b", strings.NewReader(blob), rand) if err != nil { t.Fatal(err) } if err := rw.bs.PutBlockList("a", "b", bw); err != nil { t.Fatal(err) } bx, err := rw.blocksRightSide("a", "b", c.offset, c.size, rand) if err != nil { t.Fatal(err) } bs := []azure.BlockStatus{} for _, v := range bx { bs = append(bs, v.Status) } if !reflect.DeepEqual(bs, c.expectedPattern) { t.Logf("Committed blocks %v", bw) t.Fatalf("For offset %v-size:%v: Expected pattern: %v, Got: %v\n(Returned: %v)", c.offset, c.size, c.expectedPattern, bs, bx) } if rw.bs.PutBlockList("a", "b", bx); err != nil { t.Fatal(err) } r, err := rw.bs.GetBlob("a", "b") if err != nil { t.Fatal(err) } cout, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } outBlob := string(cout) if outBlob != c.expectedBlob { t.Fatalf("For offset %v-size:%v: wrong blob contents: %v, expected: %v", c.offset, c.size, outBlob, c.expectedBlob) } } } func TestRandomWriter_Write_NewBlob(t *testing.T) { var ( s = NewStorageSimulator() rw = newRandomBlobWriter(&s, 1024*3) // 3 KB blocks blob = randomContents(1024 * 7) // 7 KB blob ) if err := rw.bs.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } if _, err := rw.WriteBlobAt("a", "b", 10, bytes.NewReader(blob)); err == nil { t.Fatal("expected error, got nil") } if _, err := rw.WriteBlobAt("a", "b", 100000, bytes.NewReader(blob)); err == nil { t.Fatal("expected error, got nil") } if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(blob)); err != nil { t.Fatal(err) } else if expected := int64(len(blob)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := rw.bs.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, blob) } if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { t.Fatal(err) } else if len(bx.CommittedBlocks) != 3 { t.Fatalf("got wrong number of committed blocks: %v", len(bx.CommittedBlocks)) } // Replace first 512 bytes leftChunk := randomContents(512) blob = append(leftChunk, blob[512:]...) if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(leftChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(leftChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := rw.bs.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, blob) } if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { t.Fatal(err) } else if expected := 4; len(bx.CommittedBlocks) != expected { t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) } // Replace last 512 bytes with 1024 bytes rightChunk := randomContents(1024) offset := int64(len(blob) - 512) blob = append(blob[:offset], rightChunk...) if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(rightChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(rightChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := rw.bs.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, blob) } if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { t.Fatal(err) } else if expected := 5; len(bx.CommittedBlocks) != expected { t.Fatalf("got wrong number of committed blocks: %v, expected: %v", len(bx.CommittedBlocks), expected) } // Replace 2K-4K (overlaps 2 blocks from L/R) newChunk := randomContents(1024 * 2) offset = 1024 * 2 blob = append(append(blob[:offset], newChunk...), blob[offset+int64(len(newChunk)):]...) if nn, err := rw.WriteBlobAt("a", "b", offset, bytes.NewReader(newChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(newChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := rw.bs.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, blob) } if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { t.Fatal(err) } else if expected := 6; len(bx.CommittedBlocks) != expected { t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) } // Replace the entire blob newBlob := randomContents(1024 * 30) if nn, err := rw.WriteBlobAt("a", "b", 0, bytes.NewReader(newBlob)); err != nil { t.Fatal(err) } else if expected := int64(len(newBlob)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := rw.bs.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, newBlob) } if bx, err := rw.bs.GetBlockList("a", "b", azure.BlockListTypeCommitted); err != nil { t.Fatal(err) } else if expected := 10; len(bx.CommittedBlocks) != expected { t.Fatalf("got wrong number of committed blocks: %v, expected: %v\n%v", len(bx.CommittedBlocks), expected, bx.CommittedBlocks) } else if expected, size := int64(1024*30), getBlobSize(bx); size != expected { t.Fatalf("committed block size does not indicate blob size") } } func Test_getBlobSize(t *testing.T) { // with some committed blocks if expected, size := int64(151), getBlobSize(azure.BlockListResponse{ CommittedBlocks: []azure.BlockResponse{ {"A", 100}, {"B", 50}, {"C", 1}, }, UncommittedBlocks: []azure.BlockResponse{ {"D", 200}, }}); expected != size { t.Fatalf("wrong blob size: %v, expected: %v", size, expected) } // with no committed blocks if expected, size := int64(0), getBlobSize(azure.BlockListResponse{ UncommittedBlocks: []azure.BlockResponse{ {"A", 100}, {"B", 50}, {"C", 1}, {"D", 200}, }}); expected != size { t.Fatalf("wrong blob size: %v, expected: %v", size, expected) } } func assertBlobContents(t *testing.T, r io.Reader, expected []byte) { out, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(out, expected) { t.Fatalf("wrong blob contents. size: %v, expected: %v", len(out), len(expected)) } } func randomContents(length int64) []byte { b := make([]byte, length) for i := range b { b[i] = byte(rand.Intn(2 << 8)) } return b } distribution-2.3.0/registry/storage/driver/azure/zerofillwriter.go000066400000000000000000000025321265472114500256130ustar00rootroot00000000000000package azure import ( "bytes" "io" ) type blockBlobWriter interface { GetSize(container, blob string) (int64, error) WriteBlobAt(container, blob string, offset int64, chunk io.Reader) (int64, error) } // zeroFillWriter enables writing to an offset outside a block blob's size // by offering the chunk to the underlying writer as a contiguous data with // the gap in between filled with NUL (zero) bytes. type zeroFillWriter struct { blockBlobWriter } func newZeroFillWriter(b blockBlobWriter) zeroFillWriter { w := zeroFillWriter{} w.blockBlobWriter = b return w } // Write writes the given chunk to the specified existing blob even though // offset is out of blob's size. The gaps are filled with zeros. Returned // written number count does not include zeros written. func (z *zeroFillWriter) Write(container, blob string, offset int64, chunk io.Reader) (int64, error) { size, err := z.blockBlobWriter.GetSize(container, blob) if err != nil { return 0, err } var reader io.Reader var zeroPadding int64 if offset <= size { reader = chunk } else { zeroPadding = offset - size offset = size // adjust offset to be the append index zeros := bytes.NewReader(make([]byte, zeroPadding)) reader = io.MultiReader(zeros, chunk) } nn, err := z.blockBlobWriter.WriteBlobAt(container, blob, offset, reader) nn -= zeroPadding return nn, err } distribution-2.3.0/registry/storage/driver/azure/zerofillwriter_test.go000066400000000000000000000072171265472114500266570ustar00rootroot00000000000000package azure import ( "bytes" "testing" ) func Test_zeroFillWrite_AppendNoGap(t *testing.T) { s := NewStorageSimulator() bw := newRandomBlobWriter(&s, 1024*1) zw := newZeroFillWriter(&bw) if err := s.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } firstChunk := randomContents(1024*3 + 512) if nn, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(firstChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, firstChunk) } secondChunk := randomContents(256) if nn, err := zw.Write("a", "b", int64(len(firstChunk)), bytes.NewReader(secondChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(secondChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, append(firstChunk, secondChunk...)) } } func Test_zeroFillWrite_StartWithGap(t *testing.T) { s := NewStorageSimulator() bw := newRandomBlobWriter(&s, 1024*2) zw := newZeroFillWriter(&bw) if err := s.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } chunk := randomContents(1024 * 5) padding := int64(1024*2 + 256) if nn, err := zw.Write("a", "b", padding, bytes.NewReader(chunk)); err != nil { t.Fatal(err) } else if expected := int64(len(chunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, append(make([]byte, padding), chunk...)) } } func Test_zeroFillWrite_AppendWithGap(t *testing.T) { s := NewStorageSimulator() bw := newRandomBlobWriter(&s, 1024*2) zw := newZeroFillWriter(&bw) if err := s.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } firstChunk := randomContents(1024*3 + 512) if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { t.Fatal(err) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, firstChunk) } secondChunk := randomContents(256) padding := int64(1024 * 4) if nn, err := zw.Write("a", "b", int64(len(firstChunk))+padding, bytes.NewReader(secondChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(secondChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, append(firstChunk, append(make([]byte, padding), secondChunk...)...)) } } func Test_zeroFillWrite_LiesWithinSize(t *testing.T) { s := NewStorageSimulator() bw := newRandomBlobWriter(&s, 1024*2) zw := newZeroFillWriter(&bw) if err := s.CreateBlockBlob("a", "b"); err != nil { t.Fatal(err) } firstChunk := randomContents(1024 * 3) if _, err := zw.Write("a", "b", 0, bytes.NewReader(firstChunk)); err != nil { t.Fatal(err) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, firstChunk) } // in this case, zerofill won't be used secondChunk := randomContents(256) if nn, err := zw.Write("a", "b", 0, bytes.NewReader(secondChunk)); err != nil { t.Fatal(err) } else if expected := int64(len(secondChunk)); expected != nn { t.Fatalf("wrong written bytes count: %v, expected: %v", nn, expected) } if out, err := s.GetBlob("a", "b"); err != nil { t.Fatal(err) } else { assertBlobContents(t, out, append(secondChunk, firstChunk[len(secondChunk):]...)) } } distribution-2.3.0/registry/storage/driver/base/000077500000000000000000000000001265472114500217635ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/base/base.go000066400000000000000000000155571265472114500232410ustar00rootroot00000000000000// Package base provides a base implementation of the storage driver that can // be used to implement common checks. The goal is to increase the amount of // code sharing. // // The canonical approach to use this class is to embed in the exported driver // struct such that calls are proxied through this implementation. First, // declare the internal driver, as follows: // // type driver struct { ... internal ...} // // The resulting type should implement StorageDriver such that it can be the // target of a Base struct. The exported type can then be declared as follows: // // type Driver struct { // Base // } // // Because Driver embeds Base, it effectively implements Base. If the driver // needs to intercept a call, before going to base, Driver should implement // that method. Effectively, Driver can intercept calls before coming in and // driver implements the actual logic. // // To further shield the embed from other packages, it is recommended to // employ a private embed struct: // // type baseEmbed struct { // base.Base // } // // Then, declare driver to embed baseEmbed, rather than Base directly: // // type Driver struct { // baseEmbed // } // // The type now implements StorageDriver, proxying through Base, without // exporting an unnecessary field. package base import ( "io" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // Base provides a wrapper around a storagedriver implementation that provides // common path and bounds checking. type Base struct { storagedriver.StorageDriver } // Format errors received from the storage driver func (base *Base) setDriverName(e error) error { switch actual := e.(type) { case nil: return nil case storagedriver.ErrUnsupportedMethod: actual.DriverName = base.StorageDriver.Name() return actual case storagedriver.PathNotFoundError: actual.DriverName = base.StorageDriver.Name() return actual case storagedriver.InvalidPathError: actual.DriverName = base.StorageDriver.Name() return actual case storagedriver.InvalidOffsetError: actual.DriverName = base.StorageDriver.Name() return actual default: storageError := storagedriver.Error{ DriverName: base.StorageDriver.Name(), Enclosed: e, } return storageError } } // GetContent wraps GetContent of underlying storage driver. func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) { ctx, done := context.WithTrace(ctx) defer done("%s.GetContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } b, e := base.StorageDriver.GetContent(ctx, path) return b, base.setDriverName(e) } // PutContent wraps PutContent of underlying storage driver. func (base *Base) PutContent(ctx context.Context, path string, content []byte) error { ctx, done := context.WithTrace(ctx) defer done("%s.PutContent(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } return base.setDriverName(base.StorageDriver.PutContent(ctx, path, content)) } // ReadStream wraps ReadStream of underlying storage driver. func (base *Base) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { ctx, done := context.WithTrace(ctx) defer done("%s.ReadStream(%q, %d)", base.Name(), path, offset) if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} } if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } rc, e := base.StorageDriver.ReadStream(ctx, path, offset) return rc, base.setDriverName(e) } // WriteStream wraps WriteStream of underlying storage driver. func (base *Base) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { ctx, done := context.WithTrace(ctx) defer done("%s.WriteStream(%q, %d)", base.Name(), path, offset) if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()} } if !storagedriver.PathRegexp.MatchString(path) { return 0, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } i64, e := base.StorageDriver.WriteStream(ctx, path, offset, reader) return i64, base.setDriverName(e) } // Stat wraps Stat of underlying storage driver. func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { ctx, done := context.WithTrace(ctx) defer done("%s.Stat(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } fi, e := base.StorageDriver.Stat(ctx, path) return fi, base.setDriverName(e) } // List wraps List of underlying storage driver. func (base *Base) List(ctx context.Context, path string) ([]string, error) { ctx, done := context.WithTrace(ctx) defer done("%s.List(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) && path != "/" { return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } str, e := base.StorageDriver.List(ctx, path) return str, base.setDriverName(e) } // Move wraps Move of underlying storage driver. func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error { ctx, done := context.WithTrace(ctx) defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath) if !storagedriver.PathRegexp.MatchString(sourcePath) { return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()} } else if !storagedriver.PathRegexp.MatchString(destPath) { return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()} } return base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath)) } // Delete wraps Delete of underlying storage driver. func (base *Base) Delete(ctx context.Context, path string) error { ctx, done := context.WithTrace(ctx) defer done("%s.Delete(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } return base.setDriverName(base.StorageDriver.Delete(ctx, path)) } // URLFor wraps URLFor of underlying storage driver. func (base *Base) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { ctx, done := context.WithTrace(ctx) defer done("%s.URLFor(%q)", base.Name(), path) if !storagedriver.PathRegexp.MatchString(path) { return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()} } str, e := base.StorageDriver.URLFor(ctx, path, options) return str, base.setDriverName(e) } distribution-2.3.0/registry/storage/driver/factory/000077500000000000000000000000001265472114500225205ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/factory/factory.go000066400000000000000000000037541265472114500245270ustar00rootroot00000000000000package factory import ( "fmt" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // driverFactories stores an internal mapping between storage driver names and their respective // factories var driverFactories = make(map[string]StorageDriverFactory) // StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces // Storage drivers should call Register() with a factory to make the driver available by name type StorageDriverFactory interface { // Create returns a new storagedriver.StorageDriver with the given parameters // Parameters will vary by driver and may be ignored // Each parameter key must only consist of lowercase letters and numbers Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) } // Register makes a storage driver available by the provided name. // If Register is called twice with the same name or if driver factory is nil, it panics. func Register(name string, factory StorageDriverFactory) { if factory == nil { panic("Must not provide nil StorageDriverFactory") } _, registered := driverFactories[name] if registered { panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name)) } driverFactories[name] = factory } // Create a new storagedriver.StorageDriver with the given name and // parameters. To use a driver, the StorageDriverFactory must first be // registered with the given name. If no drivers are found, an // InvalidStorageDriverError is returned func Create(name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) { driverFactory, ok := driverFactories[name] if !ok { return nil, InvalidStorageDriverError{name} } return driverFactory.Create(parameters) } // InvalidStorageDriverError records an attempt to construct an unregistered storage driver type InvalidStorageDriverError struct { Name string } func (err InvalidStorageDriverError) Error() string { return fmt.Sprintf("StorageDriver not registered: %s", err.Name) } distribution-2.3.0/registry/storage/driver/fileinfo.go000066400000000000000000000051341265472114500231760ustar00rootroot00000000000000package driver import "time" // FileInfo returns information about a given path. Inspired by os.FileInfo, // it elides the base name method for a full path instead. type FileInfo interface { // Path provides the full path of the target of this file info. Path() string // Size returns current length in bytes of the file. The return value can // be used to write to the end of the file at path. The value is // meaningless if IsDir returns true. Size() int64 // ModTime returns the modification time for the file. For backends that // don't have a modification time, the creation time should be returned. ModTime() time.Time // IsDir returns true if the path is a directory. IsDir() bool } // NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal // should only be used by storagedriver implementations. They should moved to // a "driver" package, similar to database/sql. // FileInfoFields provides the exported fields for implementing FileInfo // interface in storagedriver implementations. It should be used with // InternalFileInfo. type FileInfoFields struct { // Path provides the full path of the target of this file info. Path string // Size is current length in bytes of the file. The value of this field // can be used to write to the end of the file at path. The value is // meaningless if IsDir is set to true. Size int64 // ModTime returns the modification time for the file. For backends that // don't have a modification time, the creation time should be returned. ModTime time.Time // IsDir returns true if the path is a directory. IsDir bool } // FileInfoInternal implements the FileInfo interface. This should only be // used by storagedriver implementations that don't have a specialized // FileInfo type. type FileInfoInternal struct { FileInfoFields } var _ FileInfo = FileInfoInternal{} var _ FileInfo = &FileInfoInternal{} // Path provides the full path of the target of this file info. func (fi FileInfoInternal) Path() string { return fi.FileInfoFields.Path } // Size returns current length in bytes of the file. The return value can // be used to write to the end of the file at path. The value is // meaningless if IsDir returns true. func (fi FileInfoInternal) Size() int64 { return fi.FileInfoFields.Size } // ModTime returns the modification time for the file. For backends that // don't have a modification time, the creation time should be returned. func (fi FileInfoInternal) ModTime() time.Time { return fi.FileInfoFields.ModTime } // IsDir returns true if the path is a directory. func (fi FileInfoInternal) IsDir() bool { return fi.FileInfoFields.IsDir } distribution-2.3.0/registry/storage/driver/filesystem/000077500000000000000000000000001265472114500232355ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/filesystem/driver.go000066400000000000000000000164511265472114500250660ustar00rootroot00000000000000package filesystem import ( "bytes" "fmt" "io" "io/ioutil" "os" "path" "time" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "filesystem" const defaultRootDirectory = "/var/lib/registry" func init() { factory.Register(driverName, &filesystemDriverFactory{}) } // filesystemDriverFactory implements the factory.StorageDriverFactory interface type filesystemDriverFactory struct{} func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters), nil } type driver struct { rootDirectory string } type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by a local // filesystem. All provided paths will be subpaths of the RootDirectory. type Driver struct { baseEmbed } // FromParameters constructs a new Driver with a given parameters map // Optional Parameters: // - rootdirectory func FromParameters(parameters map[string]interface{}) *Driver { var rootDirectory = defaultRootDirectory if parameters != nil { rootDir, ok := parameters["rootdirectory"] if ok { rootDirectory = fmt.Sprint(rootDir) } } return New(rootDirectory) } // New constructs a new Driver with a given rootDirectory func New(rootDirectory string) *Driver { return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: &driver{ rootDirectory: rootDirectory, }, }, }, } } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } defer rc.Close() p, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return p, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error { if _, err := d.WriteStream(ctx, subPath, 0, bytes.NewReader(contents)); err != nil { return err } return os.Truncate(d.fullPath(subPath), int64(len(contents))) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0644) if err != nil { if os.IsNotExist(err) { return nil, storagedriver.PathNotFoundError{Path: path} } return nil, err } seekPos, err := file.Seek(int64(offset), os.SEEK_SET) if err != nil { file.Close() return nil, err } else if seekPos < int64(offset) { file.Close() return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } return file, nil } // WriteStream stores the contents of the provided io.Reader at a location // designated by the given path. func (d *driver) WriteStream(ctx context.Context, subPath string, offset int64, reader io.Reader) (nn int64, err error) { // TODO(stevvooe): This needs to be a requirement. // if !path.IsAbs(subPath) { // return fmt.Errorf("absolute path required: %q", subPath) // } fullPath := d.fullPath(subPath) parentDir := path.Dir(fullPath) if err := os.MkdirAll(parentDir, 0777); err != nil { return 0, err } fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0666) if err != nil { // TODO(stevvooe): A few missing conditions in storage driver: // 1. What if the path is already a directory? // 2. Should number 1 be exposed explicitly in storagedriver? // 2. Can this path not exist, even if we create above? return 0, err } defer fp.Close() nn, err = fp.Seek(offset, os.SEEK_SET) if err != nil { return 0, err } if nn != offset { return 0, fmt.Errorf("bad seek to %v, expected %v in fp=%v", offset, nn, fp) } return io.Copy(fp, reader) } // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, subPath string) (storagedriver.FileInfo, error) { fullPath := d.fullPath(subPath) fi, err := os.Stat(fullPath) if err != nil { if os.IsNotExist(err) { return nil, storagedriver.PathNotFoundError{Path: subPath} } return nil, err } return fileInfo{ path: subPath, FileInfo: fi, }, nil } // List returns a list of the objects that are direct descendants of the given // path. func (d *driver) List(ctx context.Context, subPath string) ([]string, error) { fullPath := d.fullPath(subPath) dir, err := os.Open(fullPath) if err != nil { if os.IsNotExist(err) { return nil, storagedriver.PathNotFoundError{Path: subPath} } return nil, err } defer dir.Close() fileNames, err := dir.Readdirnames(0) if err != nil { return nil, err } keys := make([]string, 0, len(fileNames)) for _, fileName := range fileNames { keys = append(keys, path.Join(subPath, fileName)) } return keys, nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { source := d.fullPath(sourcePath) dest := d.fullPath(destPath) if _, err := os.Stat(source); os.IsNotExist(err) { return storagedriver.PathNotFoundError{Path: sourcePath} } if err := os.MkdirAll(path.Dir(dest), 0755); err != nil { return err } err := os.Rename(source, dest) return err } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, subPath string) error { fullPath := d.fullPath(subPath) _, err := os.Stat(fullPath) if err != nil && !os.IsNotExist(err) { return err } else if err != nil { return storagedriver.PathNotFoundError{Path: subPath} } err = os.RemoveAll(fullPath) return err } // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod{} } // fullPath returns the absolute path of a key within the Driver's storage. func (d *driver) fullPath(subPath string) string { return path.Join(d.rootDirectory, subPath) } type fileInfo struct { os.FileInfo path string } var _ storagedriver.FileInfo = fileInfo{} // Path provides the full path of the target of this file info. func (fi fileInfo) Path() string { return fi.path } // Size returns current length in bytes of the file. The return value can // be used to write to the end of the file at path. The value is // meaningless if IsDir returns true. func (fi fileInfo) Size() int64 { if fi.IsDir() { return 0 } return fi.FileInfo.Size() } // ModTime returns the modification time for the file. For backends that // don't have a modification time, the creation time should be returned. func (fi fileInfo) ModTime() time.Time { return fi.FileInfo.ModTime() } // IsDir returns true if the path is a directory. func (fi fileInfo) IsDir() bool { return fi.FileInfo.IsDir() } distribution-2.3.0/registry/storage/driver/filesystem/driver_test.go000066400000000000000000000010511265472114500261130ustar00rootroot00000000000000package filesystem import ( "io/ioutil" "os" "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" . "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { TestingT(t) } func init() { root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return New(root), nil }, testsuites.NeverSkip) } distribution-2.3.0/registry/storage/driver/gcs/000077500000000000000000000000001265472114500216255ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/gcs/doc.go000066400000000000000000000002231265472114500227160ustar00rootroot00000000000000// Package gcs implements the Google Cloud Storage driver backend. Support can be // enabled by including the "include_gcs" build tag. package gcs distribution-2.3.0/registry/storage/driver/gcs/gcs.go000066400000000000000000000445111265472114500227350ustar00rootroot00000000000000// Package gcs provides a storagedriver.StorageDriver implementation to // store blobs in Google cloud storage. // // This package leverages the google.golang.org/cloud/storage client library //for interfacing with gcs. // // Because gcs is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // // Keep in mind that gcs guarantees only eventual consistency, so do not assume // that a successful write will mean immediate access to the data written (although // in most regions a new object put has guaranteed read after write). The only true // guarantee is that once you call Stat and receive a certain file size, that much of // the file is already accessible. // // +build include_gcs package gcs import ( "bytes" "fmt" "io" "io/ioutil" "math/rand" "net/http" "net/url" "sort" "strings" "time" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/googleapi" storageapi "google.golang.org/api/storage/v1" "google.golang.org/cloud" "google.golang.org/cloud/storage" ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "gcs" const dummyProjectID = "" // driverParameters is a struct that encapsulates all of the driver parameters after all values have been set type driverParameters struct { bucket string config *jwt.Config email string privateKey []byte client *http.Client rootDirectory string } func init() { factory.Register(driverName, &gcsDriverFactory{}) } // gcsDriverFactory implements the factory.StorageDriverFactory interface type gcsDriverFactory struct{} // Create StorageDriver from parameters func (factory *gcsDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } // driver is a storagedriver.StorageDriver implementation backed by GCS // Objects are stored at absolute keys in the provided bucket. type driver struct { client *http.Client bucket string email string privateKey []byte rootDirectory string } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - bucket func FromParameters(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { bucket, ok := parameters["bucket"] if !ok || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } rootDirectory, ok := parameters["rootdirectory"] if !ok { rootDirectory = "" } var ts oauth2.TokenSource jwtConf := new(jwt.Config) if keyfile, ok := parameters["keyfile"]; ok { jsonKey, err := ioutil.ReadFile(fmt.Sprint(keyfile)) if err != nil { return nil, err } jwtConf, err = google.JWTConfigFromJSON(jsonKey, storage.ScopeFullControl) if err != nil { return nil, err } ts = jwtConf.TokenSource(context.Background()) } else { var err error ts, err = google.DefaultTokenSource(context.Background(), storage.ScopeFullControl) if err != nil { return nil, err } } params := driverParameters{ bucket: fmt.Sprint(bucket), rootDirectory: fmt.Sprint(rootDirectory), email: jwtConf.Email, privateKey: jwtConf.PrivateKey, client: oauth2.NewClient(context.Background(), ts), } return New(params) } // New constructs a new driver func New(params driverParameters) (storagedriver.StorageDriver, error) { rootDirectory := strings.Trim(params.rootDirectory, "/") if rootDirectory != "" { rootDirectory += "/" } d := &driver{ bucket: params.bucket, rootDirectory: rootDirectory, email: params.email, privateKey: params.privateKey, client: params.client, } return &base.Base{ StorageDriver: d, }, nil } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. func (d *driver) GetContent(context ctx.Context, path string) ([]byte, error) { rc, err := d.ReadStream(context, path, 0) if err != nil { return nil, err } defer rc.Close() p, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return p, nil } // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. func (d *driver) PutContent(context ctx.Context, path string, contents []byte) error { wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) wc.ContentType = "application/octet-stream" defer wc.Close() _, err := wc.Write(contents) return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. func (d *driver) ReadStream(context ctx.Context, path string, offset int64) (io.ReadCloser, error) { name := d.pathToKey(path) // copied from google.golang.org/cloud/storage#NewReader : // to set the additional "Range" header u := &url.URL{ Scheme: "https", Host: "storage.googleapis.com", Path: fmt.Sprintf("/%s/%s", d.bucket, name), } req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, err } if offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%v-", offset)) } res, err := d.client.Do(req) if err != nil { return nil, err } if res.StatusCode == http.StatusNotFound { res.Body.Close() return nil, storagedriver.PathNotFoundError{Path: path} } if res.StatusCode == http.StatusRequestedRangeNotSatisfiable { res.Body.Close() obj, err := storageStatObject(d.context(context), d.bucket, name) if err != nil { return nil, err } if offset == int64(obj.Size) { return ioutil.NopCloser(bytes.NewReader([]byte{})), nil } return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } if res.StatusCode < 200 || res.StatusCode > 299 { res.Body.Close() return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", d.bucket, name, res.Status) } return res.Body, nil } // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. func (d *driver) WriteStream(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } if offset == 0 { return d.writeCompletely(context, path, 0, reader) } service, err := storageapi.New(d.client) if err != nil { return 0, err } objService := storageapi.NewObjectsService(service) var obj *storageapi.Object err = retry(5, func() error { o, err := objService.Get(d.bucket, d.pathToKey(path)).Do() obj = o return err }) // obj, err := retry(5, objService.Get(d.bucket, d.pathToKey(path)).Do) if err != nil { return 0, err } // cannot append more chunks, so redo from scratch if obj.ComponentCount >= 1023 { return d.writeCompletely(context, path, offset, reader) } // skip from reader objSize := int64(obj.Size) nn, err := skip(reader, objSize-offset) if err != nil { return nn, err } // Size <= offset partName := fmt.Sprintf("%v#part-%d#", d.pathToKey(path), obj.ComponentCount) gcsContext := d.context(context) wc := storage.NewWriter(gcsContext, d.bucket, partName) wc.ContentType = "application/octet-stream" if objSize < offset { err = writeZeros(wc, offset-objSize) if err != nil { wc.CloseWithError(err) return nn, err } } n, err := io.Copy(wc, reader) if err != nil { wc.CloseWithError(err) return nn, err } err = wc.Close() if err != nil { return nn, err } // wc was closed succesfully, so the temporary part exists, schedule it for deletion at the end // of the function defer storageDeleteObject(gcsContext, d.bucket, partName) req := &storageapi.ComposeRequest{ Destination: &storageapi.Object{Bucket: obj.Bucket, Name: obj.Name, ContentType: obj.ContentType}, SourceObjects: []*storageapi.ComposeRequestSourceObjects{ { Name: obj.Name, Generation: obj.Generation, }, { Name: partName, Generation: wc.Object().Generation, }}, } err = retry(5, func() error { _, err := objService.Compose(d.bucket, obj.Name, req).Do(); return err }) if err == nil { nn = nn + n } return nn, err } type request func() error func retry(maxTries int, req request) error { backoff := time.Second var err error for i := 0; i < maxTries; i++ { err = req() if err == nil { return nil } status, ok := err.(*googleapi.Error) if !ok || (status.Code != 429 && status.Code < http.StatusInternalServerError) { return err } time.Sleep(backoff - time.Second + (time.Duration(rand.Int31n(1000)) * time.Millisecond)) if i <= 4 { backoff = backoff * 2 } } return err } func (d *driver) writeCompletely(context ctx.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { wc := storage.NewWriter(d.context(context), d.bucket, d.pathToKey(path)) wc.ContentType = "application/octet-stream" defer wc.Close() // Copy the first offset bytes of the existing contents // (padded with zeros if needed) into the writer if offset > 0 { existing, err := d.ReadStream(context, path, 0) if err != nil { return 0, err } defer existing.Close() n, err := io.CopyN(wc, existing, offset) if err == io.EOF { err = writeZeros(wc, offset-n) } if err != nil { return 0, err } } return io.Copy(wc, reader) } func skip(reader io.Reader, count int64) (int64, error) { if count <= 0 { return 0, nil } return io.CopyN(ioutil.Discard, reader, count) } func writeZeros(wc io.Writer, count int64) error { buf := make([]byte, 32*1024) for count > 0 { size := cap(buf) if int64(size) > count { size = int(count) } n, err := wc.Write(buf[0:size]) if err != nil { return err } count = count - int64(n) } return nil } // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. func (d *driver) Stat(context ctx.Context, path string) (storagedriver.FileInfo, error) { var fi storagedriver.FileInfoFields //try to get as file gcsContext := d.context(context) obj, err := storageStatObject(gcsContext, d.bucket, d.pathToKey(path)) if err == nil { fi = storagedriver.FileInfoFields{ Path: path, Size: obj.Size, ModTime: obj.Updated, IsDir: false, } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } //try to get as folder dirpath := d.pathToDirKey(path) var query *storage.Query query = &storage.Query{} query.Prefix = dirpath query.MaxResults = 1 objects, err := storageListObjects(gcsContext, d.bucket, query) if err != nil { return nil, err } if len(objects.Results) < 1 { return nil, storagedriver.PathNotFoundError{Path: path} } fi = storagedriver.FileInfoFields{ Path: path, IsDir: true, } obj = objects.Results[0] if obj.Name == dirpath { fi.Size = obj.Size fi.ModTime = obj.Updated } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the //given path. func (d *driver) List(context ctx.Context, path string) ([]string, error) { var query *storage.Query query = &storage.Query{} query.Delimiter = "/" query.Prefix = d.pathToDirKey(path) list := make([]string, 0, 64) for { objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, object := range objects.Results { // GCS does not guarantee strong consistency between // DELETE and LIST operationsCheck that the object is not deleted, // so filter out any objects with a non-zero time-deleted if object.Deleted.IsZero() { name := object.Name // Ignore objects with names that end with '#' (these are uploaded parts) if name[len(name)-1] != '#' { name = d.keyToPath(name) list = append(list, name) } } } for _, subpath := range objects.Prefixes { subpath = d.keyToPath(subpath) list = append(list, subpath) } query = objects.Next if query == nil { break } } if path != "/" && len(list) == 0 { // Treat empty response as missing directory, since we don't actually // have directories in Google Cloud Storage. return nil, storagedriver.PathNotFoundError{Path: path} } return list, nil } // Move moves an object stored at sourcePath to destPath, removing the // original object. func (d *driver) Move(context ctx.Context, sourcePath string, destPath string) error { prefix := d.pathToDirKey(sourcePath) gcsContext := d.context(context) keys, err := d.listAll(gcsContext, prefix) if err != nil { return err } if len(keys) > 0 { destPrefix := d.pathToDirKey(destPath) copies := make([]string, 0, len(keys)) sort.Strings(keys) var err error for _, key := range keys { dest := destPrefix + key[len(prefix):] _, err = storageCopyObject(gcsContext, d.bucket, key, d.bucket, dest, nil) if err == nil { copies = append(copies, dest) } else { break } } // if an error occurred, attempt to cleanup the copies made if err != nil { for i := len(copies) - 1; i >= 0; i-- { _ = storageDeleteObject(gcsContext, d.bucket, copies[i]) } return err } // delete originals for i := len(keys) - 1; i >= 0; i-- { err2 := storageDeleteObject(gcsContext, d.bucket, keys[i]) if err2 != nil { err = err2 } } return err } _, err = storageCopyObject(gcsContext, d.bucket, d.pathToKey(sourcePath), d.bucket, d.pathToKey(destPath), nil) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } } return err } return storageDeleteObject(gcsContext, d.bucket, d.pathToKey(sourcePath)) } // listAll recursively lists all names of objects stored at "prefix" and its subpaths. func (d *driver) listAll(context context.Context, prefix string) ([]string, error) { list := make([]string, 0, 64) query := &storage.Query{} query.Prefix = prefix query.Versions = false for { objects, err := storageListObjects(d.context(context), d.bucket, query) if err != nil { return nil, err } for _, obj := range objects.Results { // GCS does not guarantee strong consistency between // DELETE and LIST operationsCheck that the object is not deleted, // so filter out any objects with a non-zero time-deleted if obj.Deleted.IsZero() { list = append(list, obj.Name) } } query = objects.Next if query == nil { break } } return list, nil } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(context ctx.Context, path string) error { prefix := d.pathToDirKey(path) gcsContext := d.context(context) keys, err := d.listAll(gcsContext, prefix) if err != nil { return err } if len(keys) > 0 { sort.Sort(sort.Reverse(sort.StringSlice(keys))) for _, key := range keys { err := storageDeleteObject(gcsContext, d.bucket, key) // GCS only guarantees eventual consistency, so listAll might return // paths that no longer exist. If this happens, just ignore any not // found error if status, ok := err.(*googleapi.Error); ok { if status.Code == http.StatusNotFound { err = nil } } if err != nil { return err } } return nil } err = storageDeleteObject(gcsContext, d.bucket, d.pathToKey(path)) if err != nil { if status := err.(*googleapi.Error); status != nil { if status.Code == http.StatusNotFound { return storagedriver.PathNotFoundError{Path: path} } } } return err } func storageDeleteObject(context context.Context, bucket string, name string) error { return retry(5, func() error { return storage.DeleteObject(context, bucket, name) }) } func storageStatObject(context context.Context, bucket string, name string) (*storage.Object, error) { var obj *storage.Object err := retry(5, func() error { var err error obj, err = storage.StatObject(context, bucket, name) return err }) return obj, err } func storageListObjects(context context.Context, bucket string, q *storage.Query) (*storage.Objects, error) { var objs *storage.Objects err := retry(5, func() error { var err error objs, err = storage.ListObjects(context, bucket, q) return err }) return objs, err } func storageCopyObject(context context.Context, srcBucket, srcName string, destBucket, destName string, attrs *storage.ObjectAttrs) (*storage.Object, error) { var obj *storage.Object err := retry(5, func() error { var err error obj, err = storage.CopyObject(context, srcBucket, srcName, destBucket, destName, attrs) return err }) return obj, err } // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // Returns ErrUnsupportedMethod if this driver has no privateKey func (d *driver) URLFor(context ctx.Context, path string, options map[string]interface{}) (string, error) { if d.privateKey == nil { return "", storagedriver.ErrUnsupportedMethod{} } name := d.pathToKey(path) methodString := "GET" method, ok := options["method"] if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { return "", storagedriver.ErrUnsupportedMethod{} } } expiresTime := time.Now().Add(20 * time.Minute) expires, ok := options["expiry"] if ok { et, ok := expires.(time.Time) if ok { expiresTime = et } } opts := &storage.SignedURLOptions{ GoogleAccessID: d.email, PrivateKey: d.privateKey, Method: methodString, Expires: expiresTime, } return storage.SignedURL(d.bucket, name, opts) } func (d *driver) context(context ctx.Context) context.Context { return cloud.WithContext(context, dummyProjectID, d.client) } func (d *driver) pathToKey(path string) string { return strings.TrimRight(d.rootDirectory+strings.TrimLeft(path, "/"), "/") } func (d *driver) pathToDirKey(path string) string { return d.pathToKey(path) + "/" } func (d *driver) keyToPath(key string) string { return "/" + strings.Trim(strings.TrimPrefix(key, d.rootDirectory), "/") } distribution-2.3.0/registry/storage/driver/gcs/gcs_test.go000066400000000000000000000110701265472114500237660ustar00rootroot00000000000000// +build include_gcs package gcs import ( "io/ioutil" "os" "testing" "fmt" ctx "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/cloud/storage" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } var gcsDriverConstructor func(rootDirectory string) (storagedriver.StorageDriver, error) var skipGCS func() string func init() { bucket := os.Getenv("REGISTRY_STORAGE_GCS_BUCKET") credentials := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") // Skip GCS storage driver tests if environment variable parameters are not provided skipGCS = func() string { if bucket == "" || credentials == "" { return "The following environment variables must be set to enable these tests: REGISTRY_STORAGE_GCS_BUCKET, GOOGLE_APPLICATION_CREDENTIALS" } return "" } if skipGCS() != "" { return } root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) var ts oauth2.TokenSource var email string var privateKey []byte ts, err = google.DefaultTokenSource(ctx.Background(), storage.ScopeFullControl) if err != nil { // Assume that the file contents are within the environment variable since it exists // but does not contain a valid file path jwtConfig, err := google.JWTConfigFromJSON([]byte(credentials), storage.ScopeFullControl) if err != nil { panic(fmt.Sprintf("Error reading JWT config : %s", err)) } email = jwtConfig.Email privateKey = []byte(jwtConfig.PrivateKey) if len(privateKey) == 0 { panic("Error reading JWT config : missing private_key property") } if email == "" { panic("Error reading JWT config : missing client_email property") } ts = jwtConfig.TokenSource(ctx.Background()) } gcsDriverConstructor = func(rootDirectory string) (storagedriver.StorageDriver, error) { parameters := driverParameters{ bucket: bucket, rootDirectory: root, email: email, privateKey: privateKey, client: oauth2.NewClient(ctx.Background(), ts), } return New(parameters) } testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return gcsDriverConstructor(root) }, skipGCS) } func TestRetry(t *testing.T) { if skipGCS() != "" { t.Skip(skipGCS()) } assertError := func(expected string, observed error) { observedMsg := "" if observed != nil { observedMsg = observed.Error() } if observedMsg != expected { t.Fatalf("expected %v, observed %v\n", expected, observedMsg) } } err := retry(2, func() error { return &googleapi.Error{ Code: 503, Message: "google api error", } }) assertError("googleapi: Error 503: google api error", err) err = retry(2, func() error { return &googleapi.Error{ Code: 404, Message: "google api error", } }) assertError("googleapi: Error 404: google api error", err) err = retry(2, func() error { return fmt.Errorf("error") }) assertError("error", err) } func TestEmptyRootList(t *testing.T) { if skipGCS() != "" { t.Skip(skipGCS()) } validRoot, err := ioutil.TempDir("", "driver-") if err != nil { t.Fatalf("unexpected error creating temporary directory: %v", err) } defer os.Remove(validRoot) rootedDriver, err := gcsDriverConstructor(validRoot) if err != nil { t.Fatalf("unexpected error creating rooted driver: %v", err) } emptyRootDriver, err := gcsDriverConstructor("") if err != nil { t.Fatalf("unexpected error creating empty root driver: %v", err) } slashRootDriver, err := gcsDriverConstructor("/") if err != nil { t.Fatalf("unexpected error creating slash root driver: %v", err) } filename := "/test" contents := []byte("contents") ctx := ctx.Background() err = rootedDriver.PutContent(ctx, filename, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } defer func() { err := rootedDriver.Delete(ctx, filename) if err != nil { t.Fatalf("failed to remove %v due to %v\n", filename, err) } }() keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } } distribution-2.3.0/registry/storage/driver/inmemory/000077500000000000000000000000001265472114500227105ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/inmemory/driver.go000066400000000000000000000145621265472114500245420ustar00rootroot00000000000000package inmemory import ( "bytes" "fmt" "io" "io/ioutil" "sync" "time" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "inmemory" func init() { factory.Register(driverName, &inMemoryDriverFactory{}) } // inMemoryDriverFacotry implements the factory.StorageDriverFactory interface. type inMemoryDriverFactory struct{} func (factory *inMemoryDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return New(), nil } type driver struct { root *dir mutex sync.RWMutex } // baseEmbed allows us to hide the Base embed. type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by a local map. // Intended solely for example and testing purposes. type Driver struct { baseEmbed // embedded, hidden base driver. } var _ storagedriver.StorageDriver = &Driver{} // New constructs a new Driver. func New() *Driver { return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: &driver{ root: &dir{ common: common{ p: "/", mod: time.Now(), }, }, }, }, }, } } // Implement the storagedriver.StorageDriver interface. func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { d.mutex.RLock() defer d.mutex.RUnlock() rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } defer rc.Close() return ioutil.ReadAll(rc) } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error { d.mutex.Lock() defer d.mutex.Unlock() f, err := d.root.mkfile(p) if err != nil { // TODO(stevvooe): Again, we need to clarify when this is not a // directory in StorageDriver API. return fmt.Errorf("not a file") } f.truncate() f.WriteAt(contents, 0) return nil } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { d.mutex.RLock() defer d.mutex.RUnlock() if offset < 0 { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } path = normalize(path) found := d.root.find(path) if found.path() != path { return nil, storagedriver.PathNotFoundError{Path: path} } if found.isdir() { return nil, fmt.Errorf("%q is a directory", path) } return ioutil.NopCloser(found.(*file).sectionReader(offset)), nil } // WriteStream stores the contents of the provided io.ReadCloser at a location // designated by the given path. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) { d.mutex.Lock() defer d.mutex.Unlock() if offset < 0 { return 0, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } normalized := normalize(path) f, err := d.root.mkfile(normalized) if err != nil { return 0, fmt.Errorf("not a file") } // Unlock while we are reading from the source, in case we are reading // from the same mfs instance. This can be fixed by a more granular // locking model. d.mutex.Unlock() d.mutex.RLock() // Take the readlock to block other writers. var buf bytes.Buffer nn, err = buf.ReadFrom(reader) if err != nil { // TODO(stevvooe): This condition is odd and we may need to clarify: // we've read nn bytes from reader but have written nothing to the // backend. What is the correct return value? Really, the caller needs // to know that the reader has been advanced and reattempting the // operation is incorrect. d.mutex.RUnlock() d.mutex.Lock() return nn, err } d.mutex.RUnlock() d.mutex.Lock() f.WriteAt(buf.Bytes(), offset) return nn, err } // Stat returns info about the provided path. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { d.mutex.RLock() defer d.mutex.RUnlock() normalized := normalize(path) found := d.root.find(path) if found.path() != normalized { return nil, storagedriver.PathNotFoundError{Path: path} } fi := storagedriver.FileInfoFields{ Path: path, IsDir: found.isdir(), ModTime: found.modtime(), } if !fi.IsDir { fi.Size = int64(len(found.(*file).data)) } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given // path. func (d *driver) List(ctx context.Context, path string) ([]string, error) { d.mutex.RLock() defer d.mutex.RUnlock() normalized := normalize(path) found := d.root.find(normalized) if !found.isdir() { return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this... } entries, err := found.(*dir).list(normalized) if err != nil { switch err { case errNotExists: return nil, storagedriver.PathNotFoundError{Path: path} case errIsNotDir: return nil, fmt.Errorf("not a directory") default: return nil, err } } return entries, nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { d.mutex.Lock() defer d.mutex.Unlock() normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath) err := d.root.move(normalizedSrc, normalizedDst) switch err { case errNotExists: return storagedriver.PathNotFoundError{Path: destPath} default: return err } } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { d.mutex.Lock() defer d.mutex.Unlock() normalized := normalize(path) err := d.root.delete(normalized) switch err { case errNotExists: return storagedriver.PathNotFoundError{Path: path} default: return err } } // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod{} } distribution-2.3.0/registry/storage/driver/inmemory/driver_test.go000066400000000000000000000007471265472114500256010ustar00rootroot00000000000000package inmemory import ( "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } func init() { inmemoryDriverConstructor := func() (storagedriver.StorageDriver, error) { return New(), nil } testsuites.RegisterSuite(inmemoryDriverConstructor, testsuites.NeverSkip) } distribution-2.3.0/registry/storage/driver/inmemory/mfs.go000066400000000000000000000137501265472114500240320ustar00rootroot00000000000000package inmemory import ( "fmt" "io" "path" "sort" "strings" "time" ) var ( errExists = fmt.Errorf("exists") errNotExists = fmt.Errorf("notexists") errIsNotDir = fmt.Errorf("notdir") errIsDir = fmt.Errorf("isdir") ) type node interface { name() string path() string isdir() bool modtime() time.Time } // dir is the central type for the memory-based storagedriver. All operations // are dispatched from a root dir. type dir struct { common // TODO(stevvooe): Use sorted slice + search. children map[string]node } var _ node = &dir{} func (d *dir) isdir() bool { return true } // add places the node n into dir d. func (d *dir) add(n node) { if d.children == nil { d.children = make(map[string]node) } d.children[n.name()] = n d.mod = time.Now() } // find searches for the node, given path q in dir. If the node is found, it // will be returned. If the node is not found, the closet existing parent. If // the node is found, the returned (node).path() will match q. func (d *dir) find(q string) node { q = strings.Trim(q, "/") i := strings.Index(q, "/") if q == "" { return d } if i == 0 { panic("shouldn't happen, no root paths") } var component string if i < 0 { // No more path components component = q } else { component = q[:i] } child, ok := d.children[component] if !ok { // Node was not found. Return p and the current node. return d } if child.isdir() { // traverse down! q = q[i+1:] return child.(*dir).find(q) } return child } func (d *dir) list(p string) ([]string, error) { n := d.find(p) if n.path() != p { return nil, errNotExists } if !n.isdir() { return nil, errIsNotDir } var children []string for _, child := range n.(*dir).children { children = append(children, child.path()) } sort.Strings(children) return children, nil } // mkfile or return the existing one. returns an error if it exists and is a // directory. Essentially, this is open or create. func (d *dir) mkfile(p string) (*file, error) { n := d.find(p) if n.path() == p { if n.isdir() { return nil, errIsDir } return n.(*file), nil } dirpath, filename := path.Split(p) // Make any non-existent directories n, err := d.mkdirs(dirpath) if err != nil { return nil, err } dd := n.(*dir) n = &file{ common: common{ p: path.Join(dd.path(), filename), mod: time.Now(), }, } dd.add(n) return n.(*file), nil } // mkdirs creates any missing directory entries in p and returns the result. func (d *dir) mkdirs(p string) (*dir, error) { p = normalize(p) n := d.find(p) if !n.isdir() { // Found something there return nil, errIsNotDir } if n.path() == p { return n.(*dir), nil } dd := n.(*dir) relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/") if relative == "" { return dd, nil } components := strings.Split(relative, "/") for _, component := range components { d, err := dd.mkdir(component) if err != nil { // This should actually never happen, since there are no children. return nil, err } dd = d } return dd, nil } // mkdir creates a child directory under d with the given name. func (d *dir) mkdir(name string) (*dir, error) { if name == "" { return nil, fmt.Errorf("invalid dirname") } _, ok := d.children[name] if ok { return nil, errExists } child := &dir{ common: common{ p: path.Join(d.path(), name), mod: time.Now(), }, } d.add(child) d.mod = time.Now() return child, nil } func (d *dir) move(src, dst string) error { dstDirname, _ := path.Split(dst) dp, err := d.mkdirs(dstDirname) if err != nil { return err } srcDirname, srcFilename := path.Split(src) sp := d.find(srcDirname) if normalize(srcDirname) != normalize(sp.path()) { return errNotExists } spd, ok := sp.(*dir) if !ok { return errIsNotDir // paranoid. } s, ok := spd.children[srcFilename] if !ok { return errNotExists } delete(spd.children, srcFilename) switch n := s.(type) { case *dir: n.p = dst case *file: n.p = dst } dp.add(s) return nil } func (d *dir) delete(p string) error { dirname, filename := path.Split(p) parent := d.find(dirname) if normalize(dirname) != normalize(parent.path()) { return errNotExists } if _, ok := parent.(*dir).children[filename]; !ok { return errNotExists } delete(parent.(*dir).children, filename) return nil } // dump outputs a primitive directory structure to stdout. func (d *dir) dump(indent string) { fmt.Println(indent, d.name()+"/") for _, child := range d.children { if child.isdir() { child.(*dir).dump(indent + "\t") } else { fmt.Println(indent, child.name()) } } } func (d *dir) String() string { return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children) } // file stores actual data in the fs tree. It acts like an open, seekable file // where operations are conducted through ReadAt and WriteAt. Use it with // SectionReader for the best effect. type file struct { common data []byte } var _ node = &file{} func (f *file) isdir() bool { return false } func (f *file) truncate() { f.data = f.data[:0] } func (f *file) sectionReader(offset int64) io.Reader { return io.NewSectionReader(f, offset, int64(len(f.data))-offset) } func (f *file) ReadAt(p []byte, offset int64) (n int, err error) { return copy(p, f.data[offset:]), nil } func (f *file) WriteAt(p []byte, offset int64) (n int, err error) { off := int(offset) if cap(f.data) < off+len(p) { data := make([]byte, len(f.data), off+len(p)) copy(data, f.data) f.data = data } f.mod = time.Now() f.data = f.data[:off+len(p)] return copy(f.data[off:off+len(p)], p), nil } func (f *file) String() string { return fmt.Sprintf("&file{path: %q}", f.p) } // common provides shared fields and methods for node implementations. type common struct { p string mod time.Time } func (c *common) name() string { _, name := path.Split(c.p) return name } func (c *common) path() string { return c.p } func (c *common) modtime() time.Time { return c.mod } func normalize(p string) string { return "/" + strings.Trim(p, "/") } distribution-2.3.0/registry/storage/driver/middleware/000077500000000000000000000000001265472114500231665ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/middleware/cloudfront/000077500000000000000000000000001265472114500253455ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/middleware/cloudfront/middleware.go000066400000000000000000000071141265472114500300140ustar00rootroot00000000000000// Package middleware - cloudfront wrapper for storage libs // N.B. currently only works with S3, not arbitrary sites // package middleware import ( "crypto/x509" "encoding/pem" "fmt" "io/ioutil" "time" "github.com/AdRoll/goamz/cloudfront" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" ) // cloudFrontStorageMiddleware provides an simple implementation of layerHandler that // constructs temporary signed CloudFront URLs from the storagedriver layer URL, // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { storagedriver.StorageDriver cloudfront *cloudfront.CloudFront duration time.Duration } var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} // newCloudFrontLayerHandler constructs and returns a new CloudFront // LayerHandler implementation. // Required options: baseurl, privatekey, keypairid func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { base, ok := options["baseurl"] if !ok { return nil, fmt.Errorf("No baseurl provided") } baseURL, ok := base.(string) if !ok { return nil, fmt.Errorf("baseurl must be a string") } pk, ok := options["privatekey"] if !ok { return nil, fmt.Errorf("No privatekey provided") } pkPath, ok := pk.(string) if !ok { return nil, fmt.Errorf("privatekey must be a string") } kpid, ok := options["keypairid"] if !ok { return nil, fmt.Errorf("No keypairid provided") } keypairID, ok := kpid.(string) if !ok { return nil, fmt.Errorf("keypairid must be a string") } pkBytes, err := ioutil.ReadFile(pkPath) if err != nil { return nil, fmt.Errorf("Failed to read privatekey file: %s", err) } block, _ := pem.Decode([]byte(pkBytes)) if block == nil { return nil, fmt.Errorf("Failed to decode private key as an rsa private key") } privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } cf := cloudfront.New(baseURL, privateKey, keypairID) duration := 20 * time.Minute d, ok := options["duration"] if ok { switch d := d.(type) { case time.Duration: duration = d case string: dur, err := time.ParseDuration(d) if err != nil { return nil, fmt.Errorf("Invalid duration: %s", err) } duration = dur } } return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil } // S3BucketKeyer is any type that is capable of returning the S3 bucket key // which should be cached by AWS CloudFront. type S3BucketKeyer interface { S3BucketKey(path string) string } // Resolve returns an http.Handler which can serve the contents of the given // Layer, or an error if not supported by the storagedriver. func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { // TODO(endophage): currently only supports S3 keyer, ok := lh.StorageDriver.(S3BucketKeyer) if !ok { context.GetLogger(ctx).Warn("the CloudFront middleware does not support this backend storage driver") return lh.StorageDriver.URLFor(ctx, path, options) } cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) if err != nil { return "", err } return cfURL, nil } // init registers the cloudfront layerHandler backend. func init() { storagemiddleware.Register("cloudfront", storagemiddleware.InitFunc(newCloudFrontStorageMiddleware)) } distribution-2.3.0/registry/storage/driver/middleware/storagemiddleware.go000066400000000000000000000024171265472114500272230ustar00rootroot00000000000000package storagemiddleware import ( "fmt" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // InitFunc is the type of a StorageMiddleware factory function and is // used to register the constructor for different StorageMiddleware backends. type InitFunc func(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) var storageMiddlewares map[string]InitFunc // Register is used to register an InitFunc for // a StorageMiddleware backend with the given name. func Register(name string, initFunc InitFunc) error { if storageMiddlewares == nil { storageMiddlewares = make(map[string]InitFunc) } if _, exists := storageMiddlewares[name]; exists { return fmt.Errorf("name already registered: %s", name) } storageMiddlewares[name] = initFunc return nil } // Get constructs a StorageMiddleware with the given options using the named backend. func Get(name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) { if storageMiddlewares != nil { if initFunc, exists := storageMiddlewares[name]; exists { return initFunc(storageDriver, options) } } return nil, fmt.Errorf("no storage middleware registered with name: %s", name) } distribution-2.3.0/registry/storage/driver/oss/000077500000000000000000000000001265472114500216555ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/oss/doc.go000066400000000000000000000002211265472114500227440ustar00rootroot00000000000000// Package oss implements the Aliyun OSS Storage driver backend. Support can be // enabled by including the "include_oss" build tag. package oss distribution-2.3.0/registry/storage/driver/oss/oss.go000066400000000000000000000524221265472114500230150ustar00rootroot00000000000000// Package oss provides a storagedriver.StorageDriver implementation to // store blobs in Aliyun OSS cloud storage. // // This package leverages the denverdino/aliyungo client library for interfacing with // oss. // // Because OSS is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // // +build include_oss package oss import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "reflect" "strconv" "strings" "sync" "time" "github.com/docker/distribution/context" "github.com/Sirupsen/logrus" "github.com/denverdino/aliyungo/oss" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "oss" // minChunkSize defines the minimum multipart upload chunk size // OSS API requires multipart upload chunks to be at least 5MB const minChunkSize = 5 << 20 const defaultChunkSize = 2 * minChunkSize const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk // listMax is the largest amount of objects you can request from OSS in a list call const listMax = 1000 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { AccessKeyID string AccessKeySecret string Bucket string Region oss.Region Internal bool Encrypt bool Secure bool ChunkSize int64 RootDirectory string Endpoint string } func init() { factory.Register(driverName, &ossDriverFactory{}) } // ossDriverFactory implements the factory.StorageDriverFactory interface type ossDriverFactory struct{} func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } type driver struct { Client *oss.Client Bucket *oss.Bucket ChunkSize int64 Encrypt bool RootDirectory string pool sync.Pool // pool []byte buffers used for WriteStream zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS // Objects are stored at absolute keys in the provided bucket. type Driver struct { baseEmbed } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - accesskey // - secretkey // - region // - bucket // - encrypt func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating // with an IAM on an ec2 instance (in which case the instance credentials will // be summoned when GetAuth is called) accessKey, ok := parameters["accesskeyid"] if !ok { return nil, fmt.Errorf("No accesskeyid parameter provided") } secretKey, ok := parameters["accesskeysecret"] if !ok { return nil, fmt.Errorf("No accesskeysecret parameter provided") } regionName, ok := parameters["region"] if !ok || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } bucket, ok := parameters["bucket"] if !ok || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } internalBool := false internal, ok := parameters["internal"] if ok { internalBool, ok = internal.(bool) if !ok { return nil, fmt.Errorf("The internal parameter should be a boolean") } } encryptBool := false encrypt, ok := parameters["encrypt"] if ok { encryptBool, ok = encrypt.(bool) if !ok { return nil, fmt.Errorf("The encrypt parameter should be a boolean") } } secureBool := true secure, ok := parameters["secure"] if ok { secureBool, ok = secure.(bool) if !ok { return nil, fmt.Errorf("The secure parameter should be a boolean") } } chunkSize := int64(defaultChunkSize) chunkSizeParam, ok := parameters["chunksize"] if ok { switch v := chunkSizeParam.(type) { case string: vv, err := strconv.ParseInt(v, 0, 64) if err != nil { return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) } chunkSize = vv case int64: chunkSize = v case int, uint, int32, uint32, uint64: chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() default: return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) } if chunkSize < minChunkSize { return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) } } rootDirectory, ok := parameters["rootdirectory"] if !ok { rootDirectory = "" } endpoint, ok := parameters["endpoint"] if !ok { endpoint = "" } params := DriverParameters{ AccessKeyID: fmt.Sprint(accessKey), AccessKeySecret: fmt.Sprint(secretKey), Bucket: fmt.Sprint(bucket), Region: oss.Region(fmt.Sprint(regionName)), ChunkSize: chunkSize, RootDirectory: fmt.Sprint(rootDirectory), Encrypt: encryptBool, Secure: secureBool, Internal: internalBool, Endpoint: fmt.Sprint(endpoint), } return New(params) } // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and // bucketName func New(params DriverParameters) (*Driver, error) { client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) client.SetEndpoint(params.Endpoint) bucket := client.Bucket(params.Bucket) client.SetDebug(false) // Validate that the given credentials have at least read permissions in the // given bucket scope. if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { return nil, err } // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new OSS client while another one is running on the same bucket. d := &driver{ Client: client, Bucket: bucket, ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, zeros: make([]byte, params.ChunkSize), } d.pool.New = func() interface{} { return make([]byte, d.ChunkSize) } return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: d, }, }, }, nil } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Bucket.Get(d.ossPath(path)) if err != nil { return nil, parseError(path, err) } return content, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) if err != nil { return nil, parseError(path, err) } // Due to Aliyun OSS API, status 200 and whole object will be return instead of an // InvalidRange error when range is invalid. // // OSS sever will always return http.StatusPartialContent if range is acceptable. if resp.StatusCode != http.StatusPartialContent { resp.Body.Close() return ioutil.NopCloser(bytes.NewReader(nil)), nil } return resp.Body, nil } // WriteStream stores the contents of the provided io.Reader at a // location designated by the given path. The driver will know it has // received the full contents when the reader returns io.EOF. The number // of successfully READ bytes will be returned, even if an error is // returned. May be used to resume writing a stream by providing a nonzero // offset. Offsets past the current size will write from the position // beyond the end of the file. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { partNumber := 1 bytesRead := 0 var putErrChan chan error parts := []oss.Part{} var part oss.Part done := make(chan struct{}) // stopgap to free up waiting goroutines multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) if err != nil { return 0, err } buf := d.getbuf() // We never want to leave a dangling multipart upload, our only consistent state is // when there is a whole object at path. This is in order to remain consistent with // the stat call. // // Note that if the machine dies before executing the defer, we will be left with a dangling // multipart upload, which will eventually be cleaned up, but we will lose all of the progress // made prior to the machine crashing. defer func() { if putErrChan != nil { if putErr := <-putErrChan; putErr != nil { err = putErr } } if len(parts) > 0 { if multi == nil { // Parts should be empty if the multi is not initialized panic("Unreachable") } else { if multi.Complete(parts) != nil { multi.Abort() } } } d.putbuf(buf) // needs to be here to pick up new buf value close(done) // free up any waiting goroutines }() // Fills from 0 to total from current fromSmallCurrent := func(total int64) error { current, err := d.ReadStream(ctx, path, 0) if err != nil { return err } bytesRead = 0 for int64(bytesRead) < total { //The loop should very rarely enter a second iteration nn, err := current.Read(buf[bytesRead:total]) bytesRead += nn if err != nil { if err != io.EOF { return err } break } } return nil } // Fills from parameter to chunkSize from reader fromReader := func(from int64) error { bytesRead = 0 for from+int64(bytesRead) < d.ChunkSize { nn, err := reader.Read(buf[from+int64(bytesRead):]) totalRead += int64(nn) bytesRead += nn if err != nil { if err != io.EOF { return err } break } } if putErrChan == nil { putErrChan = make(chan error) } else { if putErr := <-putErrChan; putErr != nil { putErrChan = nil return putErr } } go func(bytesRead int, from int64, buf []byte) { defer d.putbuf(buf) // this buffer gets dropped after this call // DRAGONS(stevvooe): There are few things one might want to know // about this section. First, the putErrChan is expecting an error // and a nil or just a nil to come through the channel. This is // covered by the silly defer below. The other aspect is the OSS // retry backoff to deal with RequestTimeout errors. Even though // the underlying OSS library should handle it, it doesn't seem to // be part of the shouldRetry function (see denverdino/aliyungo/oss). defer func() { select { case putErrChan <- nil: // for some reason, we do this no matter what. case <-done: return // ensure we don't leak the goroutine } }() if bytesRead <= 0 { return } var err error var part oss.Part part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) if err != nil { logrus.Errorf("error putting part, aborting: %v", err) select { case putErrChan <- err: case <-done: return // don't leak the goroutine } } // parts and partNumber are safe, because this function is the // only one modifying them and we force it to be executed // serially. parts = append(parts, part) partNumber++ }(bytesRead, from, buf) buf = d.getbuf() // use a new buffer for the next call return nil } if offset > 0 { resp, err := d.Bucket.Head(d.ossPath(path), nil) if err != nil { if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { return 0, err } } currentLength := int64(0) if err == nil { currentLength = resp.ContentLength } if currentLength >= offset { if offset < d.ChunkSize { // chunkSize > currentLength >= offset if err = fromSmallCurrent(offset); err != nil { return totalRead, err } if err = fromReader(offset); err != nil { return totalRead, err } if totalRead+offset < d.ChunkSize { return totalRead, nil } } else { // currentLength >= offset >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } parts = append(parts, part) partNumber++ } } else { // Fills between parameters with 0s but only when to - from <= chunkSize fromZeroFillSmall := func(from, to int64) error { bytesRead = 0 for from+int64(bytesRead) < to { nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) bytesRead += nn if err != nil { return err } } return nil } // Fills between parameters with 0s, making new parts fromZeroFillLarge := func(from, to int64) error { bytesRead64 := int64(0) for to-(from+bytesRead64) >= d.ChunkSize { part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) if err != nil { return err } bytesRead64 += d.ChunkSize parts = append(parts, part) partNumber++ } return fromZeroFillSmall(0, (to-from)%d.ChunkSize) } // currentLength < offset if currentLength < d.ChunkSize { if offset < d.ChunkSize { // chunkSize > offset > currentLength if err = fromSmallCurrent(currentLength); err != nil { return totalRead, err } if err = fromZeroFillSmall(currentLength, offset); err != nil { return totalRead, err } if err = fromReader(offset); err != nil { return totalRead, err } if totalRead+offset < d.ChunkSize { return totalRead, nil } } else { // offset >= chunkSize > currentLength if err = fromSmallCurrent(currentLength); err != nil { return totalRead, err } if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { return totalRead, err } part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) if err != nil { return totalRead, err } parts = append(parts, part) partNumber++ //Zero fill from chunkSize up to offset, then some reader if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { return totalRead, err } if err = fromReader(offset % d.ChunkSize); err != nil { return totalRead, err } if totalRead+(offset%d.ChunkSize) < d.ChunkSize { return totalRead, nil } } } else { // offset > currentLength >= chunkSize _, part, err = multi.PutPartCopy(partNumber, oss.CopyOptions{}, d.Bucket.Path(d.ossPath(path))) if err != nil { return 0, err } parts = append(parts, part) partNumber++ //Zero fill from currentLength up to offset, then some reader if err = fromZeroFillLarge(currentLength, offset); err != nil { return totalRead, err } if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { return totalRead, err } if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { return totalRead, nil } } } } for { if err = fromReader(0); err != nil { return totalRead, err } if int64(bytesRead) < d.ChunkSize { break } } return totalRead, nil } // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) if err != nil { return nil, err } fi := storagedriver.FileInfoFields{ Path: path, } if len(listResponse.Contents) == 1 { if listResponse.Contents[0].Key != d.ossPath(path) { fi.IsDir = true } else { fi.IsDir = false fi.Size = listResponse.Contents[0].Size timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) if err != nil { return nil, err } fi.ModTime = timestamp } } else if len(listResponse.CommonPrefixes) == 1 { fi.IsDir = true } else { return nil, storagedriver.PathNotFoundError{Path: path} } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given path. func (d *driver) List(ctx context.Context, opath string) ([]string, error) { path := opath if path != "/" && opath[len(path)-1] != '/' { path = path + "/" } // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". // In those cases, there is no root prefix to replace and we must actually add a "/" to all // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp prefix := "" if d.ossPath("") == "" { prefix = "/" } listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) if err != nil { return nil, parseError(opath, err) } files := []string{} directories := []string{} for { for _, key := range listResponse.Contents { files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) } for _, commonPrefix := range listResponse.CommonPrefixes { directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) } if listResponse.IsTruncated { listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) if err != nil { return nil, err } } else { break } } if opath != "/" { if len(files) == 0 && len(directories) == 0 { // Treat empty response as missing directory, since we don't actually // have directories in s3. return nil, storagedriver.PathNotFoundError{Path: opath} } } return append(files, directories...), nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), d.getContentType(), getPermissions(), oss.Options{}) if err != nil { logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) return parseError(sourcePath, err) } return d.Delete(ctx, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} } ossObjects := make([]oss.Object, listMax) for len(listResponse.Contents) > 0 { for index, key := range listResponse.Contents { ossObjects[index].Key = key.Key } err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) if err != nil { return nil } listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) if err != nil { return err } } return nil } // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { methodString := "GET" method, ok := options["method"] if ok { methodString, ok = method.(string) if !ok || (methodString != "GET") { return "", storagedriver.ErrUnsupportedMethod{} } } expiresTime := time.Now().Add(20 * time.Minute) expires, ok := options["expiry"] if ok { et, ok := expires.(time.Time) if ok { expiresTime = et } } logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) logrus.Infof("testURL: %s", testURL) return testURL, nil } func (d *driver) ossPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } func parseError(path string, err error) error { if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { return storagedriver.PathNotFoundError{Path: path} } return err } func hasCode(err error, code string) bool { ossErr, ok := err.(*oss.Error) return ok && ossErr.Code == code } func (d *driver) getOptions() oss.Options { return oss.Options{ServerSideEncryption: d.Encrypt} } func getPermissions() oss.ACL { return oss.Private } func (d *driver) getContentType() string { return "application/octet-stream" } // getbuf returns a buffer from the driver's pool with length d.ChunkSize. func (d *driver) getbuf() []byte { return d.pool.Get().([]byte) } func (d *driver) putbuf(p []byte) { copy(p, d.zeros) d.pool.Put(p) } distribution-2.3.0/registry/storage/driver/oss/oss_test.go000066400000000000000000000071761265472114500240620ustar00rootroot00000000000000// +build include_oss package oss import ( "io/ioutil" alioss "github.com/denverdino/aliyungo/oss" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" //"log" "os" "strconv" "testing" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } var ossDriverConstructor func(rootDirectory string) (*Driver, error) var skipCheck func() string func init() { accessKey := os.Getenv("ALIYUN_ACCESS_KEY_ID") secretKey := os.Getenv("ALIYUN_ACCESS_KEY_SECRET") bucket := os.Getenv("OSS_BUCKET") region := os.Getenv("OSS_REGION") internal := os.Getenv("OSS_INTERNAL") encrypt := os.Getenv("OSS_ENCRYPT") secure := os.Getenv("OSS_SECURE") endpoint := os.Getenv("OSS_ENDPOINT") root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) ossDriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) if err != nil { return nil, err } } secureBool := false if secure != "" { secureBool, err = strconv.ParseBool(secure) if err != nil { return nil, err } } internalBool := false if internal != "" { internalBool, err = strconv.ParseBool(internal) if err != nil { return nil, err } } parameters := DriverParameters{ AccessKeyID: accessKey, AccessKeySecret: secretKey, Bucket: bucket, Region: alioss.Region(region), Internal: internalBool, ChunkSize: minChunkSize, RootDirectory: rootDirectory, Encrypt: encryptBool, Secure: secureBool, Endpoint: endpoint, } return New(parameters) } // Skip OSS storage driver tests if environment variable parameters are not provided skipCheck = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set ALIYUN_ACCESS_KEY_ID, ALIYUN_ACCESS_KEY_SECRET, OSS_REGION, OSS_BUCKET, and OSS_ENCRYPT to run OSS tests" } return "" } testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return ossDriverConstructor(root) }, skipCheck) } func TestEmptyRootList(t *testing.T) { if skipCheck() != "" { t.Skip(skipCheck()) } validRoot, err := ioutil.TempDir("", "driver-") if err != nil { t.Fatalf("unexpected error creating temporary directory: %v", err) } defer os.Remove(validRoot) rootedDriver, err := ossDriverConstructor(validRoot) if err != nil { t.Fatalf("unexpected error creating rooted driver: %v", err) } emptyRootDriver, err := ossDriverConstructor("") if err != nil { t.Fatalf("unexpected error creating empty root driver: %v", err) } slashRootDriver, err := ossDriverConstructor("/") if err != nil { t.Fatalf("unexpected error creating slash root driver: %v", err) } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } } distribution-2.3.0/registry/storage/driver/rados/000077500000000000000000000000001265472114500221615ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/rados/doc.go000066400000000000000000000002221265472114500232510ustar00rootroot00000000000000// Package rados implements the rados storage driver backend. Support can be // enabled by including the "include_rados" build tag. package rados distribution-2.3.0/registry/storage/driver/rados/rados.go000066400000000000000000000342241265472114500236250ustar00rootroot00000000000000// +build include_rados package rados import ( "bytes" "encoding/binary" "fmt" "io" "io/ioutil" "path" "strconv" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/uuid" "github.com/noahdesu/go-ceph/rados" ) const driverName = "rados" // Prefix all the stored blob const objectBlobPrefix = "blob:" // Stripes objects size to 4M const defaultChunkSize = 4 << 20 const defaultXattrTotalSizeName = "total-size" // Max number of keys fetched from omap at each read operation const defaultKeysFetched = 1 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { poolname string username string chunksize uint64 } func init() { factory.Register(driverName, &radosDriverFactory{}) } // radosDriverFactory implements the factory.StorageDriverFactory interface type radosDriverFactory struct{} func (factory *radosDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } type driver struct { Conn *rados.Conn Ioctx *rados.IOContext chunksize uint64 } type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by Ceph RADOS // Objects are stored at absolute keys in the provided bucket. type Driver struct { baseEmbed } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - poolname: the ceph pool name func FromParameters(parameters map[string]interface{}) (*Driver, error) { pool, ok := parameters["poolname"] if !ok { return nil, fmt.Errorf("No poolname parameter provided") } username, ok := parameters["username"] if !ok { username = "" } chunksize := uint64(defaultChunkSize) chunksizeParam, ok := parameters["chunksize"] if ok { chunksize, ok = chunksizeParam.(uint64) if !ok { return nil, fmt.Errorf("The chunksize parameter should be a number") } } params := DriverParameters{ fmt.Sprint(pool), fmt.Sprint(username), chunksize, } return New(params) } // New constructs a new Driver func New(params DriverParameters) (*Driver, error) { var conn *rados.Conn var err error if params.username != "" { log.Infof("Opening connection to pool %s using user %s", params.poolname, params.username) conn, err = rados.NewConnWithUser(params.username) } else { log.Infof("Opening connection to pool %s", params.poolname) conn, err = rados.NewConn() } if err != nil { return nil, err } err = conn.ReadDefaultConfigFile() if err != nil { return nil, err } err = conn.Connect() if err != nil { return nil, err } log.Infof("Connected") ioctx, err := conn.OpenIOContext(params.poolname) log.Infof("Connected to pool %s", params.poolname) if err != nil { return nil, err } d := &driver{ Ioctx: ioctx, Conn: conn, chunksize: params.chunksize, } return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: d, }, }, }, nil } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { rc, err := d.ReadStream(ctx, path, 0) if err != nil { return nil, err } defer rc.Close() p, err := ioutil.ReadAll(rc) if err != nil { return nil, err } return p, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { if _, err := d.WriteStream(ctx, path, 0, bytes.NewReader(contents)); err != nil { return err } return nil } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. type readStreamReader struct { driver *driver oid string size uint64 offset uint64 } func (r *readStreamReader) Read(b []byte) (n int, err error) { // Determine the part available to read bufferOffset := uint64(0) bufferSize := uint64(len(b)) // End of the object, read less than the buffer size if bufferSize > r.size-r.offset { bufferSize = r.size - r.offset } // Fill `b` for bufferOffset < bufferSize { // Get the offset in the object chunk chunkedOid, chunkedOffset := r.driver.getChunkNameFromOffset(r.oid, r.offset) // Determine the best size to read bufferEndOffset := bufferSize if bufferEndOffset-bufferOffset > r.driver.chunksize-chunkedOffset { bufferEndOffset = bufferOffset + (r.driver.chunksize - chunkedOffset) } // Read the chunk n, err = r.driver.Ioctx.Read(chunkedOid, b[bufferOffset:bufferEndOffset], chunkedOffset) if err != nil { return int(bufferOffset), err } bufferOffset += uint64(n) r.offset += uint64(n) } // EOF if the offset is at the end of the object if r.offset == r.size { return int(bufferOffset), io.EOF } return int(bufferOffset), nil } func (r *readStreamReader) Close() error { return nil } func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { // get oid from filename oid, err := d.getOid(path) if err != nil { return nil, err } // get object stat stat, err := d.Stat(ctx, path) if err != nil { return nil, err } if offset > stat.Size() { return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset} } return &readStreamReader{ driver: d, oid: oid, size: uint64(stat.Size()), offset: uint64(offset), }, nil } func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { buf := make([]byte, d.chunksize) totalRead = 0 oid, err := d.getOid(path) if err != nil { switch err.(type) { // Trying to write new object, generate new blob identifier for it case storagedriver.PathNotFoundError: oid = d.generateOid() err = d.putOid(path, oid) if err != nil { return 0, err } default: return 0, err } } else { // Check total object size only for existing ones totalSize, err := d.getXattrTotalSize(ctx, oid) if err != nil { return 0, err } // If offset if after the current object size, fill the gap with zeros for totalSize < uint64(offset) { sizeToWrite := d.chunksize if totalSize-uint64(offset) < sizeToWrite { sizeToWrite = totalSize - uint64(offset) } chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(totalSize)) err = d.Ioctx.Write(chunkName, buf[:sizeToWrite], uint64(chunkOffset)) if err != nil { return totalRead, err } totalSize += sizeToWrite } } // Writer for { // Align to chunk size sizeRead := uint64(0) sizeToRead := uint64(offset+totalRead) % d.chunksize if sizeToRead == 0 { sizeToRead = d.chunksize } // Read from `reader` for sizeRead < sizeToRead { nn, err := reader.Read(buf[sizeRead:sizeToRead]) sizeRead += uint64(nn) if err != nil { if err != io.EOF { return totalRead, err } break } } // End of file and nothing was read if sizeRead == 0 { break } // Write chunk object chunkName, chunkOffset := d.getChunkNameFromOffset(oid, uint64(offset+totalRead)) err = d.Ioctx.Write(chunkName, buf[:sizeRead], uint64(chunkOffset)) if err != nil { return totalRead, err } // Update total object size as xattr in the first chunk of the object err = d.setXattrTotalSize(oid, uint64(offset+totalRead)+sizeRead) if err != nil { return totalRead, err } totalRead += int64(sizeRead) // End of file if sizeRead < sizeToRead { break } } return totalRead, nil } // Stat retrieves the FileInfo for the given path, including the current size func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { // get oid from filename oid, err := d.getOid(path) if err != nil { return nil, err } // the path is a virtual directory? if oid == "" { return storagedriver.FileInfoInternal{ FileInfoFields: storagedriver.FileInfoFields{ Path: path, Size: 0, IsDir: true, }, }, nil } // stat first chunk stat, err := d.Ioctx.Stat(oid + "-0") if err != nil { return nil, err } // get total size of chunked object totalSize, err := d.getXattrTotalSize(ctx, oid) if err != nil { return nil, err } return storagedriver.FileInfoInternal{ FileInfoFields: storagedriver.FileInfoFields{ Path: path, Size: int64(totalSize), ModTime: stat.ModTime, }, }, nil } // List returns a list of the objects that are direct descendants of the given path. func (d *driver) List(ctx context.Context, dirPath string) ([]string, error) { files, err := d.listDirectoryOid(dirPath) if err != nil { return nil, storagedriver.PathNotFoundError{Path: dirPath} } keys := make([]string, 0, len(files)) for k := range files { if k != dirPath { keys = append(keys, path.Join(dirPath, k)) } } return keys, nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { // Get oid oid, err := d.getOid(sourcePath) if err != nil { return err } // Move reference err = d.putOid(destPath, oid) if err != nil { return err } // Delete old reference err = d.deleteOid(sourcePath) if err != nil { return err } return nil } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, objectPath string) error { // Get oid oid, err := d.getOid(objectPath) if err != nil { return err } // Deleting virtual directory if oid == "" { objects, err := d.listDirectoryOid(objectPath) if err != nil { return err } for object := range objects { err = d.Delete(ctx, path.Join(objectPath, object)) if err != nil { return err } } } else { // Delete object chunks totalSize, err := d.getXattrTotalSize(ctx, oid) if err != nil { return err } for offset := uint64(0); offset < totalSize; offset += d.chunksize { chunkName, _ := d.getChunkNameFromOffset(oid, offset) err = d.Ioctx.Delete(chunkName) if err != nil { return err } } // Delete reference err = d.deleteOid(objectPath) if err != nil { return err } } return nil } // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { return "", storagedriver.ErrUnsupportedMethod{} } // Generate a blob identifier func (d *driver) generateOid() string { return objectBlobPrefix + uuid.Generate().String() } // Reference a object and its hierarchy func (d *driver) putOid(objectPath string, oid string) error { directory := path.Dir(objectPath) base := path.Base(objectPath) createParentReference := true // After creating this reference, skip the parents referencing since the // hierarchy already exists if oid == "" { firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) if (err == nil) && (len(firstReference) > 0) { createParentReference = false } } oids := map[string][]byte{ base: []byte(oid), } // Reference object err := d.Ioctx.SetOmap(directory, oids) if err != nil { return err } // Esure parent virtual directories if createParentReference { return d.putOid(directory, "") } return nil } // Get the object identifier from an object name func (d *driver) getOid(objectPath string) (string, error) { directory := path.Dir(objectPath) base := path.Base(objectPath) files, err := d.Ioctx.GetOmapValues(directory, "", base, 1) if (err != nil) || (files[base] == nil) { return "", storagedriver.PathNotFoundError{Path: objectPath} } return string(files[base]), nil } // List the objects of a virtual directory func (d *driver) listDirectoryOid(path string) (list map[string][]byte, err error) { return d.Ioctx.GetAllOmapValues(path, "", "", defaultKeysFetched) } // Remove a file from the files hierarchy func (d *driver) deleteOid(objectPath string) error { // Remove object reference directory := path.Dir(objectPath) base := path.Base(objectPath) err := d.Ioctx.RmOmapKeys(directory, []string{base}) if err != nil { return err } // Remove virtual directory if empty (no more references) firstReference, err := d.Ioctx.GetOmapValues(directory, "", "", 1) if err != nil { return err } if len(firstReference) == 0 { // Delete omap err := d.Ioctx.Delete(directory) if err != nil { return err } // Remove reference on parent omaps if directory != "" { return d.deleteOid(directory) } } return nil } // Takes an offset in an chunked object and return the chunk name and a new // offset in this chunk object func (d *driver) getChunkNameFromOffset(oid string, offset uint64) (string, uint64) { chunkID := offset / d.chunksize chunkedOid := oid + "-" + strconv.FormatInt(int64(chunkID), 10) chunkedOffset := offset % d.chunksize return chunkedOid, chunkedOffset } // Set the total size of a chunked object `oid` func (d *driver) setXattrTotalSize(oid string, size uint64) error { // Convert uint64 `size` to []byte xattr := make([]byte, binary.MaxVarintLen64) binary.LittleEndian.PutUint64(xattr, size) // Save the total size as a xattr in the first chunk return d.Ioctx.SetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) } // Get the total size of the chunked object `oid` stored as xattr func (d *driver) getXattrTotalSize(ctx context.Context, oid string) (uint64, error) { // Fetch xattr as []byte xattr := make([]byte, binary.MaxVarintLen64) xattrLength, err := d.Ioctx.GetXattr(oid+"-0", defaultXattrTotalSizeName, xattr) if err != nil { return 0, err } if xattrLength != len(xattr) { context.GetLogger(ctx).Errorf("object %s xattr length mismatch: %d != %d", oid, xattrLength, len(xattr)) return 0, storagedriver.PathNotFoundError{Path: oid} } // Convert []byte as uint64 totalSize := binary.LittleEndian.Uint64(xattr) return totalSize, nil } distribution-2.3.0/registry/storage/driver/rados/rados_test.go000066400000000000000000000014151265472114500246600ustar00rootroot00000000000000// +build include_rados package rados import ( "os" "testing" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } func init() { poolname := os.Getenv("RADOS_POOL") username := os.Getenv("RADOS_USER") driverConstructor := func() (storagedriver.StorageDriver, error) { parameters := DriverParameters{ poolname, username, defaultChunkSize, } return New(parameters) } skipCheck := func() string { if poolname == "" { return "RADOS_POOL must be set to run Rado tests" } return "" } testsuites.RegisterSuite(driverConstructor, skipCheck) } distribution-2.3.0/registry/storage/driver/s3/000077500000000000000000000000001265472114500213765ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/s3/s3.go000066400000000000000000000543531265472114500222640ustar00rootroot00000000000000// Package s3 provides a storagedriver.StorageDriver implementation to // store blobs in Amazon S3 cloud storage. // // This package leverages the AdRoll/goamz client library for interfacing with // s3. // // Because s3 is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // // Keep in mind that s3 guarantees only eventual consistency, so do not assume // that a successful write will mean immediate access to the data written (although // in most regions a new object put has guaranteed read after write). The only true // guarantee is that once you call Stat and receive a certain file size, that much of // the file is already accessible. package s3 import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "reflect" "strconv" "strings" "sync" "time" "github.com/AdRoll/goamz/aws" "github.com/AdRoll/goamz/s3" "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" ) const driverName = "s3" // minChunkSize defines the minimum multipart upload chunk size // S3 API requires multipart upload chunks to be at least 5MB const minChunkSize = 5 << 20 const defaultChunkSize = 2 * minChunkSize // listMax is the largest amount of objects you can request from S3 in a list call const listMax = 1000 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set type DriverParameters struct { AccessKey string SecretKey string Bucket string Region aws.Region Encrypt bool Secure bool V4Auth bool ChunkSize int64 RootDirectory string } func init() { factory.Register(driverName, &s3DriverFactory{}) } // s3DriverFactory implements the factory.StorageDriverFactory interface type s3DriverFactory struct{} func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } type driver struct { S3 *s3.S3 Bucket *s3.Bucket ChunkSize int64 Encrypt bool RootDirectory string pool sync.Pool // pool []byte buffers used for WriteStream zeros []byte // shared, zero-valued buffer used for WriteStream } type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 // Objects are stored at absolute keys in the provided bucket. type Driver struct { baseEmbed } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - accesskey // - secretkey // - region // - bucket // - encrypt func FromParameters(parameters map[string]interface{}) (*Driver, error) { // Providing no values for these is valid in case the user is authenticating // with an IAM on an ec2 instance (in which case the instance credentials will // be summoned when GetAuth is called) accessKey, ok := parameters["accesskey"] if !ok { accessKey = "" } secretKey, ok := parameters["secretkey"] if !ok { secretKey = "" } regionName, ok := parameters["region"] if !ok || fmt.Sprint(regionName) == "" { return nil, fmt.Errorf("No region parameter provided") } region := aws.GetRegion(fmt.Sprint(regionName)) if region.Name == "" { return nil, fmt.Errorf("Invalid region provided: %v", region) } bucket, ok := parameters["bucket"] if !ok || fmt.Sprint(bucket) == "" { return nil, fmt.Errorf("No bucket parameter provided") } encryptBool := false encrypt, ok := parameters["encrypt"] if ok { encryptBool, ok = encrypt.(bool) if !ok { return nil, fmt.Errorf("The encrypt parameter should be a boolean") } } secureBool := true secure, ok := parameters["secure"] if ok { secureBool, ok = secure.(bool) if !ok { return nil, fmt.Errorf("The secure parameter should be a boolean") } } v4AuthBool := false v4Auth, ok := parameters["v4auth"] if ok { v4AuthBool, ok = v4Auth.(bool) if !ok { return nil, fmt.Errorf("The v4auth parameter should be a boolean") } } chunkSize := int64(defaultChunkSize) chunkSizeParam, ok := parameters["chunksize"] if ok { switch v := chunkSizeParam.(type) { case string: vv, err := strconv.ParseInt(v, 0, 64) if err != nil { return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) } chunkSize = vv case int64: chunkSize = v case int, uint, int32, uint32, uint64: chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() default: return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) } if chunkSize < minChunkSize { return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) } } rootDirectory, ok := parameters["rootdirectory"] if !ok { rootDirectory = "" } params := DriverParameters{ fmt.Sprint(accessKey), fmt.Sprint(secretKey), fmt.Sprint(bucket), region, encryptBool, secureBool, v4AuthBool, chunkSize, fmt.Sprint(rootDirectory), } return New(params) } // New constructs a new Driver with the given AWS credentials, region, encryption flag, and // bucketName func New(params DriverParameters) (*Driver, error) { auth, err := aws.GetAuth(params.AccessKey, params.SecretKey, "", time.Time{}) if err != nil { return nil, fmt.Errorf("unable to resolve aws credentials, please ensure that 'accesskey' and 'secretkey' are properly set or the credentials are available in $HOME/.aws/credentials: %v", err) } if !params.Secure { params.Region.S3Endpoint = strings.Replace(params.Region.S3Endpoint, "https", "http", 1) } s3obj := s3.New(auth, params.Region) bucket := s3obj.Bucket(params.Bucket) if params.V4Auth { s3obj.Signature = aws.V4Signature } else { if params.Region.Name == "eu-central-1" { return nil, fmt.Errorf("The eu-central-1 region only works with v4 authentication") } } // TODO Currently multipart uploads have no timestamps, so this would be unwise // if you initiated a new s3driver while another one is running on the same bucket. // multis, _, err := bucket.ListMulti("", "") // if err != nil { // return nil, err // } // for _, multi := range multis { // err := multi.Abort() // //TODO appropriate to do this error checking? // if err != nil { // return nil, err // } // } d := &driver{ S3: s3obj, Bucket: bucket, ChunkSize: params.ChunkSize, Encrypt: params.Encrypt, RootDirectory: params.RootDirectory, zeros: make([]byte, params.ChunkSize), } d.pool.New = func() interface{} { return make([]byte, d.ChunkSize) } return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: d, }, }, }, nil } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Bucket.Get(d.s3Path(path)) if err != nil { return nil, parseError(path, err) } return content, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { return parseError(path, d.Bucket.Put(d.s3Path(path), contents, d.getContentType(), getPermissions(), d.getOptions())) } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(http.Header) headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") resp, err := d.Bucket.GetResponseWithHeaders(d.s3Path(path), headers) if err != nil { if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "InvalidRange" { return ioutil.NopCloser(bytes.NewReader(nil)), nil } return nil, parseError(path, err) } return resp.Body, nil } // WriteStream stores the contents of the provided io.Reader at a // location designated by the given path. The driver will know it has // received the full contents when the reader returns io.EOF. The number // of successfully READ bytes will be returned, even if an error is // returned. May be used to resume writing a stream by providing a nonzero // offset. Offsets past the current size will write from the position // beyond the end of the file. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { partNumber := 1 bytesRead := 0 var putErrChan chan error parts := []s3.Part{} var part s3.Part done := make(chan struct{}) // stopgap to free up waiting goroutines multi, err := d.Bucket.InitMulti(d.s3Path(path), d.getContentType(), getPermissions(), d.getOptions()) if err != nil { return 0, err } buf := d.getbuf() // We never want to leave a dangling multipart upload, our only consistent state is // when there is a whole object at path. This is in order to remain consistent with // the stat call. // // Note that if the machine dies before executing the defer, we will be left with a dangling // multipart upload, which will eventually be cleaned up, but we will lose all of the progress // made prior to the machine crashing. defer func() { if putErrChan != nil { if putErr := <-putErrChan; putErr != nil { err = putErr } } if len(parts) > 0 { if multi == nil { // Parts should be empty if the multi is not initialized panic("Unreachable") } else { if multi.Complete(parts) != nil { multi.Abort() } } } d.putbuf(buf) // needs to be here to pick up new buf value close(done) // free up any waiting goroutines }() // Fills from 0 to total from current fromSmallCurrent := func(total int64) error { current, err := d.ReadStream(ctx, path, 0) if err != nil { return err } bytesRead = 0 for int64(bytesRead) < total { //The loop should very rarely enter a second iteration nn, err := current.Read(buf[bytesRead:total]) bytesRead += nn if err != nil { if err != io.EOF { return err } break } } return nil } // Fills from parameter to chunkSize from reader fromReader := func(from int64) error { bytesRead = 0 for from+int64(bytesRead) < d.ChunkSize { nn, err := reader.Read(buf[from+int64(bytesRead):]) totalRead += int64(nn) bytesRead += nn if err != nil { if err != io.EOF { return err } break } } if putErrChan == nil { putErrChan = make(chan error) } else { if putErr := <-putErrChan; putErr != nil { putErrChan = nil return putErr } } go func(bytesRead int, from int64, buf []byte) { defer d.putbuf(buf) // this buffer gets dropped after this call // DRAGONS(stevvooe): There are few things one might want to know // about this section. First, the putErrChan is expecting an error // and a nil or just a nil to come through the channel. This is // covered by the silly defer below. The other aspect is the s3 // retry backoff to deal with RequestTimeout errors. Even though // the underlying s3 library should handle it, it doesn't seem to // be part of the shouldRetry function (see AdRoll/goamz/s3). defer func() { select { case putErrChan <- nil: // for some reason, we do this no matter what. case <-done: return // ensure we don't leak the goroutine } }() if bytesRead <= 0 { return } var err error var part s3.Part loop: for retries := 0; retries < 5; retries++ { part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from])) if err == nil { break // success! } // NOTE(stevvooe): This retry code tries to only retry under // conditions where the s3 package does not. We may add s3 // error codes to the below if we see others bubble up in the // application. Right now, the most troubling is // RequestTimeout, which seems to only triggered when a tcp // connection to s3 slows to a crawl. If the RequestTimeout // ends up getting added to the s3 library and we don't see // other errors, this retry loop can be removed. switch err := err.(type) { case *s3.Error: switch err.Code { case "RequestTimeout": // allow retries on only this error. default: break loop } } backoff := 100 * time.Millisecond * time.Duration(retries+1) logrus.Errorf("error putting part, retrying after %v: %v", err, backoff.String()) time.Sleep(backoff) } if err != nil { logrus.Errorf("error putting part, aborting: %v", err) select { case putErrChan <- err: case <-done: return // don't leak the goroutine } } // parts and partNumber are safe, because this function is the // only one modifying them and we force it to be executed // serially. parts = append(parts, part) partNumber++ }(bytesRead, from, buf) buf = d.getbuf() // use a new buffer for the next call return nil } if offset > 0 { resp, err := d.Bucket.Head(d.s3Path(path), nil) if err != nil { if s3Err, ok := err.(*s3.Error); !ok || s3Err.Code != "NoSuchKey" { return 0, err } } currentLength := int64(0) if err == nil { currentLength = resp.ContentLength } if currentLength >= offset { if offset < d.ChunkSize { // chunkSize > currentLength >= offset if err = fromSmallCurrent(offset); err != nil { return totalRead, err } if err = fromReader(offset); err != nil { return totalRead, err } if totalRead+offset < d.ChunkSize { return totalRead, nil } } else { // currentLength >= offset >= chunkSize _, part, err = multi.PutPartCopy(partNumber, s3.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, d.Bucket.Name+"/"+d.s3Path(path)) if err != nil { return 0, err } parts = append(parts, part) partNumber++ } } else { // Fills between parameters with 0s but only when to - from <= chunkSize fromZeroFillSmall := func(from, to int64) error { bytesRead = 0 for from+int64(bytesRead) < to { nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) bytesRead += nn if err != nil { return err } } return nil } // Fills between parameters with 0s, making new parts fromZeroFillLarge := func(from, to int64) error { bytesRead64 := int64(0) for to-(from+bytesRead64) >= d.ChunkSize { part, err := multi.PutPart(int(partNumber), bytes.NewReader(d.zeros)) if err != nil { return err } bytesRead64 += d.ChunkSize parts = append(parts, part) partNumber++ } return fromZeroFillSmall(0, (to-from)%d.ChunkSize) } // currentLength < offset if currentLength < d.ChunkSize { if offset < d.ChunkSize { // chunkSize > offset > currentLength if err = fromSmallCurrent(currentLength); err != nil { return totalRead, err } if err = fromZeroFillSmall(currentLength, offset); err != nil { return totalRead, err } if err = fromReader(offset); err != nil { return totalRead, err } if totalRead+offset < d.ChunkSize { return totalRead, nil } } else { // offset >= chunkSize > currentLength if err = fromSmallCurrent(currentLength); err != nil { return totalRead, err } if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { return totalRead, err } part, err = multi.PutPart(int(partNumber), bytes.NewReader(buf)) if err != nil { return totalRead, err } parts = append(parts, part) partNumber++ //Zero fill from chunkSize up to offset, then some reader if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { return totalRead, err } if err = fromReader(offset % d.ChunkSize); err != nil { return totalRead, err } if totalRead+(offset%d.ChunkSize) < d.ChunkSize { return totalRead, nil } } } else { // offset > currentLength >= chunkSize _, part, err = multi.PutPartCopy(partNumber, s3.CopyOptions{}, d.Bucket.Name+"/"+d.s3Path(path)) if err != nil { return 0, err } parts = append(parts, part) partNumber++ //Zero fill from currentLength up to offset, then some reader if err = fromZeroFillLarge(currentLength, offset); err != nil { return totalRead, err } if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { return totalRead, err } if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { return totalRead, nil } } } } for { if err = fromReader(0); err != nil { return totalRead, err } if int64(bytesRead) < d.ChunkSize { break } } return totalRead, nil } // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", 1) if err != nil { return nil, err } fi := storagedriver.FileInfoFields{ Path: path, } if len(listResponse.Contents) == 1 { if listResponse.Contents[0].Key != d.s3Path(path) { fi.IsDir = true } else { fi.IsDir = false fi.Size = listResponse.Contents[0].Size timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) if err != nil { return nil, err } fi.ModTime = timestamp } } else if len(listResponse.CommonPrefixes) == 1 { fi.IsDir = true } else { return nil, storagedriver.PathNotFoundError{Path: path} } return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } // List returns a list of the objects that are direct descendants of the given path. func (d *driver) List(ctx context.Context, opath string) ([]string, error) { path := opath if path != "/" && path[len(path)-1] != '/' { path = path + "/" } // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". // In those cases, there is no root prefix to replace and we must actually add a "/" to all // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp prefix := "" if d.s3Path("") == "" { prefix = "/" } listResponse, err := d.Bucket.List(d.s3Path(path), "/", "", listMax) if err != nil { return nil, parseError(opath, err) } files := []string{} directories := []string{} for { for _, key := range listResponse.Contents { files = append(files, strings.Replace(key.Key, d.s3Path(""), prefix, 1)) } for _, commonPrefix := range listResponse.CommonPrefixes { directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) } if listResponse.IsTruncated { listResponse, err = d.Bucket.List(d.s3Path(path), "/", listResponse.NextMarker, listMax) if err != nil { return nil, err } } else { break } } if opath != "/" { if len(files) == 0 && len(directories) == 0 { // Treat empty response as missing directory, since we don't actually // have directories in s3. return nil, storagedriver.PathNotFoundError{Path: opath} } } return append(files, directories...), nil } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { /* This is terrible, but aws doesn't have an actual move. */ _, err := d.Bucket.PutCopy(d.s3Path(destPath), getPermissions(), s3.CopyOptions{Options: d.getOptions(), ContentType: d.getContentType()}, d.Bucket.Name+"/"+d.s3Path(sourcePath)) if err != nil { return parseError(sourcePath, err) } return d.Delete(ctx, sourcePath) } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { listResponse, err := d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil || len(listResponse.Contents) == 0 { return storagedriver.PathNotFoundError{Path: path} } s3Objects := make([]s3.Object, listMax) for len(listResponse.Contents) > 0 { for index, key := range listResponse.Contents { s3Objects[index].Key = key.Key } err := d.Bucket.DelMulti(s3.Delete{Quiet: false, Objects: s3Objects[0:len(listResponse.Contents)]}) if err != nil { return nil } listResponse, err = d.Bucket.List(d.s3Path(path), "", "", listMax) if err != nil { return err } } return nil } // URLFor returns a URL which may be used to retrieve the content stored at the given path. // May return an UnsupportedMethodErr in certain StorageDriver implementations. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { methodString := "GET" method, ok := options["method"] if ok { methodString, ok = method.(string) if !ok || (methodString != "GET" && methodString != "HEAD") { return "", storagedriver.ErrUnsupportedMethod{} } } expiresTime := time.Now().Add(20 * time.Minute) expires, ok := options["expiry"] if ok { et, ok := expires.(time.Time) if ok { expiresTime = et } } return d.Bucket.SignedURLWithMethod(methodString, d.s3Path(path), expiresTime, nil, nil), nil } func (d *driver) s3Path(path string) string { return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") } // S3BucketKey returns the s3 bucket key for the given storage driver path. func (d *Driver) S3BucketKey(path string) string { return d.StorageDriver.(*driver).s3Path(path) } func parseError(path string, err error) error { if s3Err, ok := err.(*s3.Error); ok && s3Err.Code == "NoSuchKey" { return storagedriver.PathNotFoundError{Path: path} } return err } func hasCode(err error, code string) bool { s3err, ok := err.(*aws.Error) return ok && s3err.Code == code } func (d *driver) getOptions() s3.Options { return s3.Options{SSE: d.Encrypt} } func getPermissions() s3.ACL { return s3.Private } func (d *driver) getContentType() string { return "application/octet-stream" } // getbuf returns a buffer from the driver's pool with length d.ChunkSize. func (d *driver) getbuf() []byte { return d.pool.Get().([]byte) } func (d *driver) putbuf(p []byte) { copy(p, d.zeros) d.pool.Put(p) } distribution-2.3.0/registry/storage/driver/s3/s3_test.go000066400000000000000000000064511265472114500233170ustar00rootroot00000000000000package s3 import ( "io/ioutil" "os" "strconv" "testing" "github.com/AdRoll/goamz/aws" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } var s3DriverConstructor func(rootDirectory string) (*Driver, error) var skipS3 func() string func init() { accessKey := os.Getenv("AWS_ACCESS_KEY") secretKey := os.Getenv("AWS_SECRET_KEY") bucket := os.Getenv("S3_BUCKET") encrypt := os.Getenv("S3_ENCRYPT") secure := os.Getenv("S3_SECURE") v4auth := os.Getenv("S3_USE_V4_AUTH") region := os.Getenv("AWS_REGION") root, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(root) s3DriverConstructor = func(rootDirectory string) (*Driver, error) { encryptBool := false if encrypt != "" { encryptBool, err = strconv.ParseBool(encrypt) if err != nil { return nil, err } } secureBool := true if secure != "" { secureBool, err = strconv.ParseBool(secure) if err != nil { return nil, err } } v4AuthBool := false if v4auth != "" { v4AuthBool, err = strconv.ParseBool(v4auth) if err != nil { return nil, err } } parameters := DriverParameters{ accessKey, secretKey, bucket, aws.GetRegion(region), encryptBool, secureBool, v4AuthBool, minChunkSize, rootDirectory, } return New(parameters) } // Skip S3 storage driver tests if environment variable parameters are not provided skipS3 = func() string { if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" } return "" } testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { return s3DriverConstructor(root) }, skipS3) } func TestEmptyRootList(t *testing.T) { if skipS3() != "" { t.Skip(skipS3()) } validRoot, err := ioutil.TempDir("", "driver-") if err != nil { t.Fatalf("unexpected error creating temporary directory: %v", err) } defer os.Remove(validRoot) rootedDriver, err := s3DriverConstructor(validRoot) if err != nil { t.Fatalf("unexpected error creating rooted driver: %v", err) } emptyRootDriver, err := s3DriverConstructor("") if err != nil { t.Fatalf("unexpected error creating empty root driver: %v", err) } slashRootDriver, err := s3DriverConstructor("/") if err != nil { t.Fatalf("unexpected error creating slash root driver: %v", err) } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } defer rootedDriver.Delete(ctx, filename) keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } } distribution-2.3.0/registry/storage/driver/storagedriver.go000066400000000000000000000120061265472114500242570ustar00rootroot00000000000000package driver import ( "fmt" "io" "regexp" "strconv" "strings" "github.com/docker/distribution/context" ) // Version is a string representing the storage driver version, of the form // Major.Minor. // The registry must accept storage drivers with equal major version and greater // minor version, but may not be compatible with older storage driver versions. type Version string // Major returns the major (primary) component of a version. func (version Version) Major() uint { majorPart := strings.Split(string(version), ".")[0] major, _ := strconv.ParseUint(majorPart, 10, 0) return uint(major) } // Minor returns the minor (secondary) component of a version. func (version Version) Minor() uint { minorPart := strings.Split(string(version), ".")[1] minor, _ := strconv.ParseUint(minorPart, 10, 0) return uint(minor) } // CurrentVersion is the current storage driver Version. const CurrentVersion Version = "0.1" // StorageDriver defines methods that a Storage Driver must implement for a // filesystem-like key/value object storage. type StorageDriver interface { // Name returns the human-readable "name" of the driver, useful in error // messages and logging. By convention, this will just be the registration // name, but drivers may provide other information here. Name() string // GetContent retrieves the content stored at "path" as a []byte. // This should primarily be used for small objects. GetContent(ctx context.Context, path string) ([]byte, error) // PutContent stores the []byte content at a location designated by "path". // This should primarily be used for small objects. PutContent(ctx context.Context, path string, content []byte) error // ReadStream retrieves an io.ReadCloser for the content stored at "path" // with a given byte offset. // May be used to resume reading a stream by providing a nonzero offset. ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) // WriteStream stores the contents of the provided io.ReadCloser at a // location designated by the given path. // May be used to resume writing a stream by providing a nonzero offset. // The offset must be no larger than the CurrentSize for this path. WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (nn int64, err error) // Stat retrieves the FileInfo for the given path, including the current // size in bytes and the creation time. Stat(ctx context.Context, path string) (FileInfo, error) // List returns a list of the objects that are direct descendants of the //given path. List(ctx context.Context, path string) ([]string, error) // Move moves an object stored at sourcePath to destPath, removing the // original object. // Note: This may be no more efficient than a copy followed by a delete for // many implementations. Move(ctx context.Context, sourcePath string, destPath string) error // Delete recursively deletes all objects stored at "path" and its subpaths. Delete(ctx context.Context, path string) error // URLFor returns a URL which may be used to retrieve the content stored at // the given path, possibly using the given options. // May return an ErrUnsupportedMethod in certain StorageDriver // implementations. URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) } // PathRegexp is the regular expression which each file path must match. A // file path is absolute, beginning with a slash and containing a positive // number of path components separated by slashes, where each component is // restricted to alphanumeric characters or a period, underscore, or // hyphen. var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`) // ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method. type ErrUnsupportedMethod struct { DriverName string } func (err ErrUnsupportedMethod) Error() string { return fmt.Sprintf("%s: unsupported method", err.DriverName) } // PathNotFoundError is returned when operating on a nonexistent path. type PathNotFoundError struct { Path string DriverName string } func (err PathNotFoundError) Error() string { return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path) } // InvalidPathError is returned when the provided path is malformed. type InvalidPathError struct { Path string DriverName string } func (err InvalidPathError) Error() string { return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path) } // InvalidOffsetError is returned when attempting to read or write from an // invalid offset. type InvalidOffsetError struct { Path string Offset int64 DriverName string } func (err InvalidOffsetError) Error() string { return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path) } // Error is a catch-all error type which captures an error string and // the driver type on which it occured. type Error struct { DriverName string Enclosed error } func (err Error) Error() string { return fmt.Sprintf("%s: %s", err.DriverName, err.Enclosed) } distribution-2.3.0/registry/storage/driver/swift/000077500000000000000000000000001265472114500222055ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/swift/swift.go000066400000000000000000000556411265472114500237030ustar00rootroot00000000000000// Package swift provides a storagedriver.StorageDriver implementation to // store blobs in Openstack Swift object storage. // // This package leverages the ncw/swift client library for interfacing with // Swift. // // It supports both TempAuth authentication and Keystone authentication // (up to version 3). // // As Swift has a limit on the size of a single uploaded object (by default // this is 5GB), the driver makes use of the Swift Large Object Support // (http://docs.openstack.org/developer/swift/overview_large_objects.html). // Only one container is used for both manifests and data objects. Manifests // are stored in the 'files' pseudo directory, data objects are stored under // 'segments'. package swift import ( "bytes" "crypto/md5" "crypto/rand" "crypto/sha1" "crypto/tls" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/mitchellh/mapstructure" "github.com/ncw/swift" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/version" ) const driverName = "swift" // defaultChunkSize defines the default size of a segment const defaultChunkSize = 20 * 1024 * 1024 // minChunkSize defines the minimum size of a segment const minChunkSize = 1 << 20 // readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded var readAfterWriteTimeout = 15 * time.Second // readAfterWriteWait defines the time to sleep between two retries var readAfterWriteWait = 200 * time.Millisecond // Parameters A struct that encapsulates all of the driver parameters after all values have been set type Parameters struct { Username string Password string AuthURL string Tenant string TenantID string Domain string DomainID string TrustID string Region string Container string Prefix string InsecureSkipVerify bool ChunkSize int SecretKey string AccessKey string TempURLContainerKey bool TempURLMethods []string } // swiftInfo maps the JSON structure returned by Swift /info endpoint type swiftInfo struct { Swift struct { Version string `mapstructure:"version"` } Tempurl struct { Methods []string `mapstructure:"methods"` } } func init() { factory.Register(driverName, &swiftDriverFactory{}) } // swiftDriverFactory implements the factory.StorageDriverFactory interface type swiftDriverFactory struct{} func (factory *swiftDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { return FromParameters(parameters) } type driver struct { Conn swift.Connection Container string Prefix string BulkDeleteSupport bool ChunkSize int SecretKey string AccessKey string TempURLContainerKey bool TempURLMethods []string } type baseEmbed struct { base.Base } // Driver is a storagedriver.StorageDriver implementation backed by Openstack Swift // Objects are stored at absolute keys in the provided container. type Driver struct { baseEmbed } // FromParameters constructs a new Driver with a given parameters map // Required parameters: // - username // - password // - authurl // - container func FromParameters(parameters map[string]interface{}) (*Driver, error) { params := Parameters{ ChunkSize: defaultChunkSize, InsecureSkipVerify: false, } if err := mapstructure.Decode(parameters, ¶ms); err != nil { return nil, err } if params.Username == "" { return nil, fmt.Errorf("No username parameter provided") } if params.Password == "" { return nil, fmt.Errorf("No password parameter provided") } if params.AuthURL == "" { return nil, fmt.Errorf("No authurl parameter provided") } if params.Container == "" { return nil, fmt.Errorf("No container parameter provided") } if params.ChunkSize < minChunkSize { return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", params.ChunkSize, minChunkSize) } return New(params) } // New constructs a new Driver with the given Openstack Swift credentials and container name func New(params Parameters) (*Driver, error) { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, MaxIdleConnsPerHost: 2048, TLSClientConfig: &tls.Config{InsecureSkipVerify: params.InsecureSkipVerify}, } ct := swift.Connection{ UserName: params.Username, ApiKey: params.Password, AuthUrl: params.AuthURL, Region: params.Region, UserAgent: "distribution/" + version.Version, Tenant: params.Tenant, TenantId: params.TenantID, Domain: params.Domain, DomainId: params.DomainID, TrustId: params.TrustID, Transport: transport, ConnectTimeout: 60 * time.Second, Timeout: 15 * 60 * time.Second, } err := ct.Authenticate() if err != nil { return nil, fmt.Errorf("Swift authentication failed: %s", err) } if _, _, err := ct.Container(params.Container); err == swift.ContainerNotFound { if err := ct.ContainerCreate(params.Container, nil); err != nil { return nil, fmt.Errorf("Failed to create container %s (%s)", params.Container, err) } } else if err != nil { return nil, fmt.Errorf("Failed to retrieve info about container %s (%s)", params.Container, err) } d := &driver{ Conn: ct, Container: params.Container, Prefix: params.Prefix, ChunkSize: params.ChunkSize, TempURLMethods: make([]string, 0), AccessKey: params.AccessKey, } info := swiftInfo{} if config, err := d.Conn.QueryInfo(); err == nil { _, d.BulkDeleteSupport = config["bulk_delete"] if err := mapstructure.Decode(config, &info); err == nil { d.TempURLContainerKey = info.Swift.Version >= "2.3.0" d.TempURLMethods = info.Tempurl.Methods } } else { d.TempURLContainerKey = params.TempURLContainerKey d.TempURLMethods = params.TempURLMethods } if len(d.TempURLMethods) > 0 { secretKey := params.SecretKey if secretKey == "" { secretKey, _ = generateSecret() } // Since Swift 2.2.2, we can now set secret keys on containers // in addition to the account secret keys. Use them in preference. if d.TempURLContainerKey { _, containerHeaders, err := d.Conn.Container(d.Container) if err != nil { return nil, fmt.Errorf("Failed to fetch container info %s (%s)", d.Container, err) } d.SecretKey = containerHeaders["X-Container-Meta-Temp-Url-Key"] if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { m := swift.Metadata{} m["temp-url-key"] = secretKey if d.Conn.ContainerUpdate(d.Container, m.ContainerHeaders()); err == nil { d.SecretKey = secretKey } } } else { // Use the account secret key _, accountHeaders, err := d.Conn.Account() if err != nil { return nil, fmt.Errorf("Failed to fetch account info (%s)", err) } d.SecretKey = accountHeaders["X-Account-Meta-Temp-Url-Key"] if d.SecretKey == "" || (params.SecretKey != "" && d.SecretKey != params.SecretKey) { m := swift.Metadata{} m["temp-url-key"] = secretKey if err := d.Conn.AccountUpdate(m.AccountHeaders()); err == nil { d.SecretKey = secretKey } } } } return &Driver{ baseEmbed: baseEmbed{ Base: base.Base{ StorageDriver: d, }, }, }, nil } // Implement the storagedriver.StorageDriver interface func (d *driver) Name() string { return driverName } // GetContent retrieves the content stored at "path" as a []byte. func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { content, err := d.Conn.ObjectGetBytes(d.Container, d.swiftPath(path)) if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return content, nil } // PutContent stores the []byte content at a location designated by "path". func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { err := d.Conn.ObjectPutBytes(d.Container, d.swiftPath(path), contents, d.getContentType()) if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // given byte offset. func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { headers := make(swift.Headers) headers["Range"] = "bytes=" + strconv.FormatInt(offset, 10) + "-" file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } if swiftErr, ok := err.(*swift.Error); ok && swiftErr.StatusCode == http.StatusRequestedRangeNotSatisfiable { return ioutil.NopCloser(bytes.NewReader(nil)), nil } return file, err } // WriteStream stores the contents of the provided io.Reader at a // location designated by the given path. The driver will know it has // received the full contents when the reader returns io.EOF. The number // of successfully READ bytes will be returned, even if an error is // returned. May be used to resume writing a stream by providing a nonzero // offset. Offsets past the current size will write from the position // beyond the end of the file. func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (int64, error) { var ( segments []swift.Object multi io.Reader paddingReader io.Reader currentLength int64 cursor int64 segmentPath string ) partNumber := 1 chunkSize := int64(d.ChunkSize) zeroBuf := make([]byte, d.ChunkSize) hash := md5.New() getSegment := func() string { return fmt.Sprintf("%s/%016d", segmentPath, partNumber) } max := func(a int64, b int64) int64 { if a > b { return a } return b } createManifest := true info, headers, err := d.Conn.Object(d.Container, d.swiftPath(path)) if err == nil { manifest, ok := headers["X-Object-Manifest"] if !ok { if segmentPath, err = d.swiftSegmentPath(path); err != nil { return 0, err } if err := d.Conn.ObjectMove(d.Container, d.swiftPath(path), d.Container, getSegment()); err != nil { return 0, err } segments = append(segments, info) } else { _, segmentPath = parseManifest(manifest) if segments, err = d.getAllSegments(segmentPath); err != nil { return 0, err } createManifest = false } currentLength = info.Bytes } else if err == swift.ObjectNotFound { if segmentPath, err = d.swiftSegmentPath(path); err != nil { return 0, err } } else { return 0, err } // First, we skip the existing segments that are not modified by this call for i := range segments { if offset < cursor+segments[i].Bytes { break } cursor += segments[i].Bytes hash.Write([]byte(segments[i].Hash)) partNumber++ } // We reached the end of the file but we haven't reached 'offset' yet // Therefore we add blocks of zeros if offset >= currentLength { for offset-currentLength >= chunkSize { // Insert a block a zero headers, err := d.Conn.ObjectPut(d.Container, getSegment(), bytes.NewReader(zeroBuf), false, "", d.getContentType(), nil) if err != nil { if err == swift.ObjectNotFound { return 0, storagedriver.PathNotFoundError{Path: getSegment()} } return 0, err } currentLength += chunkSize partNumber++ hash.Write([]byte(headers["Etag"])) } cursor = currentLength paddingReader = bytes.NewReader(zeroBuf) } else if offset-cursor > 0 { // Offset is inside the current segment : we need to read the // data from the beginning of the segment to offset file, _, err := d.Conn.ObjectOpen(d.Container, getSegment(), false, nil) if err != nil { if err == swift.ObjectNotFound { return 0, storagedriver.PathNotFoundError{Path: getSegment()} } return 0, err } defer file.Close() paddingReader = file } readers := []io.Reader{} if paddingReader != nil { readers = append(readers, io.LimitReader(paddingReader, offset-cursor)) } readers = append(readers, io.LimitReader(reader, chunkSize-(offset-cursor))) multi = io.MultiReader(readers...) writeSegment := func(segment string) (finished bool, bytesRead int64, err error) { currentSegment, err := d.Conn.ObjectCreate(d.Container, segment, false, "", d.getContentType(), nil) if err != nil { if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: segment} } return false, bytesRead, err } segmentHash := md5.New() writer := io.MultiWriter(currentSegment, segmentHash) n, err := io.Copy(writer, multi) if err != nil { return false, bytesRead, err } if n > 0 { defer func() { closeError := currentSegment.Close() if err != nil { err = closeError } hexHash := hex.EncodeToString(segmentHash.Sum(nil)) hash.Write([]byte(hexHash)) }() bytesRead += n - max(0, offset-cursor) } if n < chunkSize { // We wrote all the data if cursor+n < currentLength { // Copy the end of the chunk headers := make(swift.Headers) headers["Range"] = "bytes=" + strconv.FormatInt(cursor+n, 10) + "-" + strconv.FormatInt(cursor+chunkSize, 10) file, _, err := d.Conn.ObjectOpen(d.Container, d.swiftPath(path), false, headers) if err != nil { if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: path} } return false, bytesRead, err } _, copyErr := io.Copy(writer, file) if err := file.Close(); err != nil { if err == swift.ObjectNotFound { return false, bytesRead, storagedriver.PathNotFoundError{Path: path} } return false, bytesRead, err } if copyErr != nil { return false, bytesRead, copyErr } } return true, bytesRead, nil } multi = io.LimitReader(reader, chunkSize) cursor += chunkSize partNumber++ return false, bytesRead, nil } finished := false read := int64(0) bytesRead := int64(0) for finished == false { finished, read, err = writeSegment(getSegment()) bytesRead += read if err != nil { return bytesRead, err } } for ; partNumber < len(segments); partNumber++ { hash.Write([]byte(segments[partNumber].Hash)) } if createManifest { if err := d.createManifest(path, d.Container+"/"+segmentPath); err != nil { return 0, err } } expectedHash := hex.EncodeToString(hash.Sum(nil)) waitingTime := readAfterWriteWait endTime := time.Now().Add(readAfterWriteTimeout) for { var infos swift.Object if infos, _, err = d.Conn.Object(d.Container, d.swiftPath(path)); err == nil { if strings.Trim(infos.Hash, "\"") == expectedHash { return bytesRead, nil } err = fmt.Errorf("Timeout expired while waiting for segments of %s to show up", path) } if time.Now().Add(waitingTime).After(endTime) { break } time.Sleep(waitingTime) waitingTime *= 2 } return bytesRead, err } // Stat retrieves the FileInfo for the given path, including the current size // in bytes and the creation time. func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { swiftPath := d.swiftPath(path) opts := &swift.ObjectsOpts{ Prefix: swiftPath, Delimiter: '/', } objects, err := d.Conn.ObjectsAll(d.Container, opts) if err != nil { if err == swift.ContainerNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return nil, err } fi := storagedriver.FileInfoFields{ Path: strings.TrimPrefix(strings.TrimSuffix(swiftPath, "/"), d.swiftPath("/")), } for _, obj := range objects { if obj.PseudoDirectory && obj.Name == swiftPath+"/" { fi.IsDir = true return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } else if obj.Name == swiftPath { // On Swift 1.12, the 'bytes' field is always 0 // so we need to do a second HEAD request info, _, err := d.Conn.Object(d.Container, swiftPath) if err != nil { if err == swift.ObjectNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return nil, err } fi.IsDir = false fi.Size = info.Bytes fi.ModTime = info.LastModified return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil } } return nil, storagedriver.PathNotFoundError{Path: path} } // List returns a list of the objects that are direct descendants of the given path. func (d *driver) List(ctx context.Context, path string) ([]string, error) { var files []string prefix := d.swiftPath(path) if prefix != "" { prefix += "/" } opts := &swift.ObjectsOpts{ Prefix: prefix, Delimiter: '/', } objects, err := d.Conn.ObjectsAll(d.Container, opts) for _, obj := range objects { files = append(files, strings.TrimPrefix(strings.TrimSuffix(obj.Name, "/"), d.swiftPath("/"))) } if err == swift.ContainerNotFound || (len(objects) == 0 && path != "/") { return files, storagedriver.PathNotFoundError{Path: path} } return files, err } // Move moves an object stored at sourcePath to destPath, removing the original // object. func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { _, headers, err := d.Conn.Object(d.Container, d.swiftPath(sourcePath)) if err == nil { if manifest, ok := headers["X-Object-Manifest"]; ok { if err = d.createManifest(destPath, manifest); err != nil { return err } err = d.Conn.ObjectDelete(d.Container, d.swiftPath(sourcePath)) } else { err = d.Conn.ObjectMove(d.Container, d.swiftPath(sourcePath), d.Container, d.swiftPath(destPath)) } } if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: sourcePath} } return err } // Delete recursively deletes all objects stored at "path" and its subpaths. func (d *driver) Delete(ctx context.Context, path string) error { opts := swift.ObjectsOpts{ Prefix: d.swiftPath(path) + "/", } objects, err := d.Conn.ObjectsAll(d.Container, &opts) if err != nil { if err == swift.ContainerNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } for _, obj := range objects { if obj.PseudoDirectory { continue } if _, headers, err := d.Conn.Object(d.Container, obj.Name); err == nil { manifest, ok := headers["X-Object-Manifest"] if ok { _, prefix := parseManifest(manifest) segments, err := d.getAllSegments(prefix) if err != nil { return err } objects = append(objects, segments...) } } else { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: obj.Name} } return err } } if d.BulkDeleteSupport && len(objects) > 0 { filenames := make([]string, len(objects)) for i, obj := range objects { filenames[i] = obj.Name } _, err = d.Conn.BulkDelete(d.Container, filenames) // Don't fail on ObjectNotFound because eventual consistency // makes this situation normal. if err != nil && err != swift.Forbidden && err != swift.ObjectNotFound { if err == swift.ContainerNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } } else { for _, obj := range objects { if err := d.Conn.ObjectDelete(d.Container, obj.Name); err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: obj.Name} } return err } } } _, _, err = d.Conn.Object(d.Container, d.swiftPath(path)) if err == nil { if err := d.Conn.ObjectDelete(d.Container, d.swiftPath(path)); err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } } else if err == swift.ObjectNotFound { if len(objects) == 0 { return storagedriver.PathNotFoundError{Path: path} } } else { return err } return nil } // URLFor returns a URL which may be used to retrieve the content stored at the given path. func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { if d.SecretKey == "" { return "", storagedriver.ErrUnsupportedMethod{} } methodString := "GET" method, ok := options["method"] if ok { if methodString, ok = method.(string); !ok { return "", storagedriver.ErrUnsupportedMethod{} } } if methodString == "HEAD" { // A "HEAD" request on a temporary URL is allowed if the // signature was generated with "GET", "POST" or "PUT" methodString = "GET" } supported := false for _, method := range d.TempURLMethods { if method == methodString { supported = true break } } if !supported { return "", storagedriver.ErrUnsupportedMethod{} } expiresTime := time.Now().Add(20 * time.Minute) expires, ok := options["expiry"] if ok { et, ok := expires.(time.Time) if ok { expiresTime = et } } tempURL := d.Conn.ObjectTempUrl(d.Container, d.swiftPath(path), d.SecretKey, methodString, expiresTime) if d.AccessKey != "" { // On HP Cloud, the signature must be in the form of tenant_id:access_key:signature url, _ := url.Parse(tempURL) query := url.Query() query.Set("temp_url_sig", fmt.Sprintf("%s:%s:%s", d.Conn.TenantId, d.AccessKey, query.Get("temp_url_sig"))) url.RawQuery = query.Encode() tempURL = url.String() } return tempURL, nil } func (d *driver) swiftPath(path string) string { return strings.TrimLeft(strings.TrimRight(d.Prefix+"/files"+path, "/"), "/") } func (d *driver) swiftSegmentPath(path string) (string, error) { checksum := sha1.New() random := make([]byte, 32) if _, err := rand.Read(random); err != nil { return "", err } path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) return strings.TrimLeft(strings.TrimRight(d.Prefix+"/segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil } func (d *driver) getContentType() string { return "application/octet-stream" } func (d *driver) getAllSegments(path string) ([]swift.Object, error) { segments, err := d.Conn.ObjectsAll(d.Container, &swift.ObjectsOpts{Prefix: path}) if err == swift.ContainerNotFound { return nil, storagedriver.PathNotFoundError{Path: path} } return segments, err } func (d *driver) createManifest(path string, segments string) error { headers := make(swift.Headers) headers["X-Object-Manifest"] = segments manifest, err := d.Conn.ObjectCreate(d.Container, d.swiftPath(path), false, "", d.getContentType(), headers) if err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } if err := manifest.Close(); err != nil { if err == swift.ObjectNotFound { return storagedriver.PathNotFoundError{Path: path} } return err } return nil } func parseManifest(manifest string) (container string, prefix string) { components := strings.SplitN(manifest, "/", 2) container = components[0] if len(components) > 1 { prefix = components[1] } return container, prefix } func generateSecret() (string, error) { var secretBytes [32]byte if _, err := rand.Read(secretBytes[:]); err != nil { return "", fmt.Errorf("could not generate random bytes for Swift secret key: %v", err) } return hex.EncodeToString(secretBytes[:]), nil } distribution-2.3.0/registry/storage/driver/swift/swift_test.go000066400000000000000000000105051265472114500247300ustar00rootroot00000000000000package swift import ( "io/ioutil" "os" "strconv" "strings" "testing" "github.com/ncw/swift/swifttest" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/testsuites" "gopkg.in/check.v1" ) // Hook up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } var swiftDriverConstructor func(prefix string) (*Driver, error) func init() { var ( username string password string authURL string tenant string tenantID string domain string domainID string trustID string container string region string insecureSkipVerify bool secretKey string accessKey string containerKey bool tempURLMethods []string swiftServer *swifttest.SwiftServer err error ) username = os.Getenv("SWIFT_USERNAME") password = os.Getenv("SWIFT_PASSWORD") authURL = os.Getenv("SWIFT_AUTH_URL") tenant = os.Getenv("SWIFT_TENANT_NAME") tenantID = os.Getenv("SWIFT_TENANT_ID") domain = os.Getenv("SWIFT_DOMAIN_NAME") domainID = os.Getenv("SWIFT_DOMAIN_ID") trustID = os.Getenv("SWIFT_TRUST_ID") container = os.Getenv("SWIFT_CONTAINER_NAME") region = os.Getenv("SWIFT_REGION_NAME") insecureSkipVerify, _ = strconv.ParseBool(os.Getenv("SWIFT_INSECURESKIPVERIFY")) secretKey = os.Getenv("SWIFT_SECRET_KEY") accessKey = os.Getenv("SWIFT_ACCESS_KEY") containerKey, _ = strconv.ParseBool(os.Getenv("SWIFT_TEMPURL_CONTAINERKEY")) tempURLMethods = strings.Split(os.Getenv("SWIFT_TEMPURL_METHODS"), ",") if username == "" || password == "" || authURL == "" || container == "" { if swiftServer, err = swifttest.NewSwiftServer("localhost"); err != nil { panic(err) } username = "swifttest" password = "swifttest" authURL = swiftServer.AuthURL container = "test" } prefix, err := ioutil.TempDir("", "driver-") if err != nil { panic(err) } defer os.Remove(prefix) swiftDriverConstructor = func(root string) (*Driver, error) { parameters := Parameters{ username, password, authURL, tenant, tenantID, domain, domainID, trustID, region, container, root, insecureSkipVerify, defaultChunkSize, secretKey, accessKey, containerKey, tempURLMethods, } return New(parameters) } driverConstructor := func() (storagedriver.StorageDriver, error) { return swiftDriverConstructor(prefix) } testsuites.RegisterSuite(driverConstructor, testsuites.NeverSkip) } func TestEmptyRootList(t *testing.T) { validRoot, err := ioutil.TempDir("", "driver-") if err != nil { t.Fatalf("unexpected error creating temporary directory: %v", err) } defer os.Remove(validRoot) rootedDriver, err := swiftDriverConstructor(validRoot) if err != nil { t.Fatalf("unexpected error creating rooted driver: %v", err) } emptyRootDriver, err := swiftDriverConstructor("") if err != nil { t.Fatalf("unexpected error creating empty root driver: %v", err) } slashRootDriver, err := swiftDriverConstructor("/") if err != nil { t.Fatalf("unexpected error creating slash root driver: %v", err) } filename := "/test" contents := []byte("contents") ctx := context.Background() err = rootedDriver.PutContent(ctx, filename, contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } keys, err := emptyRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } keys, err = slashRootDriver.List(ctx, "/") for _, path := range keys { if !storagedriver.PathRegexp.MatchString(path) { t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) } } // Create an object with a path nested under the existing object err = rootedDriver.PutContent(ctx, filename+"/file1", contents) if err != nil { t.Fatalf("unexpected error creating content: %v", err) } err = rootedDriver.Delete(ctx, filename) if err != nil { t.Fatalf("failed to delete: %v", err) } keys, err = rootedDriver.List(ctx, "/") if err != nil { t.Fatalf("failed to list objects after deletion: %v", err) } if len(keys) != 0 { t.Fatal("delete did not remove nested objects") } } distribution-2.3.0/registry/storage/driver/testsuites/000077500000000000000000000000001265472114500232655ustar00rootroot00000000000000distribution-2.3.0/registry/storage/driver/testsuites/testsuites.go000066400000000000000000001164071265472114500260410ustar00rootroot00000000000000package testsuites import ( "bytes" "crypto/sha1" "io" "io/ioutil" "math/rand" "net/http" "os" "path" "sort" "strings" "sync" "testing" "time" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" "gopkg.in/check.v1" ) // Test hooks up gocheck into the "go test" runner. func Test(t *testing.T) { check.TestingT(t) } // RegisterSuite registers an in-process storage driver test suite with // the go test runner. func RegisterSuite(driverConstructor DriverConstructor, skipCheck SkipCheck) { check.Suite(&DriverSuite{ Constructor: driverConstructor, SkipCheck: skipCheck, ctx: context.Background(), }) } // SkipCheck is a function used to determine if a test suite should be skipped. // If a SkipCheck returns a non-empty skip reason, the suite is skipped with // the given reason. type SkipCheck func() (reason string) // NeverSkip is a default SkipCheck which never skips the suite. var NeverSkip SkipCheck = func() string { return "" } // DriverConstructor is a function which returns a new // storagedriver.StorageDriver. type DriverConstructor func() (storagedriver.StorageDriver, error) // DriverTeardown is a function which cleans up a suite's // storagedriver.StorageDriver. type DriverTeardown func() error // DriverSuite is a gocheck test suite designed to test a // storagedriver.StorageDriver. The intended way to create a DriverSuite is // with RegisterSuite. type DriverSuite struct { Constructor DriverConstructor Teardown DriverTeardown SkipCheck storagedriver.StorageDriver ctx context.Context } // SetUpSuite sets up the gocheck test suite. func (suite *DriverSuite) SetUpSuite(c *check.C) { if reason := suite.SkipCheck(); reason != "" { c.Skip(reason) } d, err := suite.Constructor() c.Assert(err, check.IsNil) suite.StorageDriver = d } // TearDownSuite tears down the gocheck test suite. func (suite *DriverSuite) TearDownSuite(c *check.C) { if suite.Teardown != nil { err := suite.Teardown() c.Assert(err, check.IsNil) } } // TearDownTest tears down the gocheck test. // This causes the suite to abort if any files are left around in the storage // driver. func (suite *DriverSuite) TearDownTest(c *check.C) { files, _ := suite.StorageDriver.List(suite.ctx, "/") if len(files) > 0 { c.Fatalf("Storage driver did not clean up properly. Offending files: %#v", files) } } // TestRootExists ensures that all storage drivers have a root path by default. func (suite *DriverSuite) TestRootExists(c *check.C) { _, err := suite.StorageDriver.List(suite.ctx, "/") if err != nil { c.Fatalf(`the root path "/" should always exist: %v`, err) } } // TestValidPaths checks that various valid file paths are accepted by the // storage driver. func (suite *DriverSuite) TestValidPaths(c *check.C) { contents := randomContents(64) validFiles := []string{ "/a", "/2", "/aa", "/a.a", "/0-9/abcdefg", "/abcdefg/z.75", "/abc/1.2.3.4.5-6_zyx/123.z/4", "/docker/docker-registry", "/123.abc", "/abc./abc", "/.abc", "/a--b", "/a-.b", "/_.abc", "/Docker/docker-registry", "/Abc/Cba"} for _, filename := range validFiles { err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) defer suite.deletePath(c, firstPart(filename)) c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } } func (suite *DriverSuite) deletePath(c *check.C, path string) { for tries := 2; tries > 0; tries-- { err := suite.StorageDriver.Delete(suite.ctx, path) if _, ok := err.(storagedriver.PathNotFoundError); ok { err = nil } c.Assert(err, check.IsNil) paths, err := suite.StorageDriver.List(suite.ctx, path) if len(paths) == 0 { break } time.Sleep(time.Second * 2) } } // TestInvalidPaths checks that various invalid file paths are rejected by the // storage driver. func (suite *DriverSuite) TestInvalidPaths(c *check.C) { contents := randomContents(64) invalidFiles := []string{ "", "/", "abc", "123.abc", "//bcd", "/abc_123/"} for _, filename := range invalidFiles { err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) // only delete if file was succesfully written if err == nil { defer suite.deletePath(c, firstPart(filename)) } c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidPathError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } } // TestWriteRead1 tests a simple write-read workflow. func (suite *DriverSuite) TestWriteRead1(c *check.C) { filename := randomPath(32) contents := []byte("a") suite.writeReadCompare(c, filename, contents) } // TestWriteRead2 tests a simple write-read workflow with unicode data. func (suite *DriverSuite) TestWriteRead2(c *check.C) { filename := randomPath(32) contents := []byte("\xc3\x9f") suite.writeReadCompare(c, filename, contents) } // TestWriteRead3 tests a simple write-read workflow with a small string. func (suite *DriverSuite) TestWriteRead3(c *check.C) { filename := randomPath(32) contents := randomContents(32) suite.writeReadCompare(c, filename, contents) } // TestWriteRead4 tests a simple write-read workflow with 1MB of data. func (suite *DriverSuite) TestWriteRead4(c *check.C) { filename := randomPath(32) contents := randomContents(1024 * 1024) suite.writeReadCompare(c, filename, contents) } // TestWriteReadNonUTF8 tests that non-utf8 data may be written to the storage // driver safely. func (suite *DriverSuite) TestWriteReadNonUTF8(c *check.C) { filename := randomPath(32) contents := []byte{0x80, 0x80, 0x80, 0x80} suite.writeReadCompare(c, filename, contents) } // TestTruncate tests that putting smaller contents than an original file does // remove the excess contents. func (suite *DriverSuite) TestTruncate(c *check.C) { filename := randomPath(32) contents := randomContents(1024 * 1024) suite.writeReadCompare(c, filename, contents) contents = randomContents(1024) suite.writeReadCompare(c, filename, contents) } // TestReadNonexistent tests reading content from an empty path. func (suite *DriverSuite) TestReadNonexistent(c *check.C) { filename := randomPath(32) _, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestWriteReadStreams1 tests a simple write-read streaming workflow. func (suite *DriverSuite) TestWriteReadStreams1(c *check.C) { filename := randomPath(32) contents := []byte("a") suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams2 tests a simple write-read streaming workflow with // unicode data. func (suite *DriverSuite) TestWriteReadStreams2(c *check.C) { filename := randomPath(32) contents := []byte("\xc3\x9f") suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams3 tests a simple write-read streaming workflow with a // small amount of data. func (suite *DriverSuite) TestWriteReadStreams3(c *check.C) { filename := randomPath(32) contents := randomContents(32) suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreams4 tests a simple write-read streaming workflow with 1MB // of data. func (suite *DriverSuite) TestWriteReadStreams4(c *check.C) { filename := randomPath(32) contents := randomContents(1024 * 1024) suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadStreamsNonUTF8 tests that non-utf8 data may be written to the // storage driver safely. func (suite *DriverSuite) TestWriteReadStreamsNonUTF8(c *check.C) { filename := randomPath(32) contents := []byte{0x80, 0x80, 0x80, 0x80} suite.writeReadCompareStreams(c, filename, contents) } // TestWriteReadLargeStreams tests that a 5GB file may be written to the storage // driver safely. func (suite *DriverSuite) TestWriteReadLargeStreams(c *check.C) { if testing.Short() { c.Skip("Skipping test in short mode") } filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) checksum := sha1.New() var fileSize int64 = 5 * 1024 * 1024 * 1024 contents := newRandReader(fileSize) written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, io.TeeReader(contents, checksum)) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, fileSize) reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() writtenChecksum := sha1.New() io.Copy(writtenChecksum, reader) c.Assert(writtenChecksum.Sum(nil), check.DeepEquals, checksum.Sum(nil)) } // TestReadStreamWithOffset tests that the appropriate data is streamed when // reading with a given offset. func (suite *DriverSuite) TestReadStreamWithOffset(c *check.C) { filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) chunkSize := int64(32) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) contentsChunk3 := randomContents(chunkSize) err := suite.StorageDriver.PutContent(suite.ctx, filename, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) c.Assert(err, check.IsNil) reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() readContents, err := ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, append(append(contentsChunk1, contentsChunk2...), contentsChunk3...)) reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize) c.Assert(err, check.IsNil) defer reader.Close() readContents, err = ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, append(contentsChunk2, contentsChunk3...)) reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*2) c.Assert(err, check.IsNil) defer reader.Close() readContents, err = ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contentsChunk3) // Ensure we get invalid offest for negative offsets. reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, -1) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) c.Assert(reader, check.IsNil) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) // Read past the end of the content and make sure we get a reader that // returns 0 bytes and io.EOF reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3) c.Assert(err, check.IsNil) defer reader.Close() buf := make([]byte, chunkSize) n, err := reader.Read(buf) c.Assert(err, check.Equals, io.EOF) c.Assert(n, check.Equals, 0) // Check the N-1 boundary condition, ensuring we get 1 byte then io.EOF. reader, err = suite.StorageDriver.ReadStream(suite.ctx, filename, chunkSize*3-1) c.Assert(err, check.IsNil) defer reader.Close() n, err = reader.Read(buf) c.Assert(n, check.Equals, 1) // We don't care whether the io.EOF comes on the this read or the first // zero read, but the only error acceptable here is io.EOF. if err != nil { c.Assert(err, check.Equals, io.EOF) } // Any more reads should result in zero bytes and io.EOF n, err = reader.Read(buf) c.Assert(n, check.Equals, 0) c.Assert(err, check.Equals, io.EOF) } // TestContinueStreamAppendLarge tests that a stream write can be appended to without // corrupting the data with a large chunk size. func (suite *DriverSuite) TestContinueStreamAppendLarge(c *check.C) { suite.testContinueStreamAppend(c, int64(10*1024*1024)) } // TestContinueStreamAppendSmall is the same as TestContinueStreamAppendLarge, but only // with a tiny chunk size in order to test corner cases for some cloud storage drivers. func (suite *DriverSuite) TestContinueStreamAppendSmall(c *check.C) { suite.testContinueStreamAppend(c, int64(32)) } func (suite *DriverSuite) testContinueStreamAppend(c *check.C, chunkSize int64) { filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) contentsChunk1 := randomContents(chunkSize) contentsChunk2 := randomContents(chunkSize) contentsChunk3 := randomContents(chunkSize) contentsChunk4 := randomContents(chunkSize) zeroChunk := make([]byte, int64(chunkSize)) fullContents := append(append(contentsChunk1, contentsChunk2...), contentsChunk3...) nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contentsChunk1)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk1))) fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(contentsChunk1))) nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) // Test re-writing the last chunk nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size()-chunkSize, bytes.NewReader(contentsChunk2)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contentsChunk2))) fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, 2*chunkSize) nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, fi.Size(), bytes.NewReader(fullContents[fi.Size():])) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(fullContents[fi.Size():]))) received, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, fullContents) // Writing past size of file extends file (no offset error). We would like // to write chunk 4 one chunk length past chunk 3. It should be successful // and the resulting file will be 5 chunks long, with a chunk of all // zeros. fullContents = append(fullContents, zeroChunk...) fullContents = append(fullContents, contentsChunk4...) nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, int64(len(fullContents))-chunkSize, bytes.NewReader(contentsChunk4)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, chunkSize) fi, err = suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Size(), check.Equals, int64(len(fullContents))) received, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(len(received), check.Equals, len(fullContents)) c.Assert(received[chunkSize*3:chunkSize*4], check.DeepEquals, zeroChunk) c.Assert(received[chunkSize*4:chunkSize*5], check.DeepEquals, contentsChunk4) c.Assert(received, check.DeepEquals, fullContents) // Ensure that negative offsets return correct error. nn, err = suite.StorageDriver.WriteStream(suite.ctx, filename, -1, bytes.NewReader(zeroChunk)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.InvalidOffsetError{}) c.Assert(err.(storagedriver.InvalidOffsetError).Path, check.Equals, filename) c.Assert(err.(storagedriver.InvalidOffsetError).Offset, check.Equals, int64(-1)) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestReadNonexistentStream tests that reading a stream for a nonexistent path // fails. func (suite *DriverSuite) TestReadNonexistentStream(c *check.C) { filename := randomPath(32) _, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.ReadStream(suite.ctx, filename, 64) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestList checks the returned list of keys after populating a directory tree. func (suite *DriverSuite) TestList(c *check.C) { rootDirectory := "/" + randomFilename(int64(8+rand.Intn(8))) defer suite.deletePath(c, rootDirectory) doesnotexist := path.Join(rootDirectory, "nonexistent") _, err := suite.StorageDriver.List(suite.ctx, doesnotexist) c.Assert(err, check.Equals, storagedriver.PathNotFoundError{ Path: doesnotexist, DriverName: suite.StorageDriver.Name(), }) parentDirectory := rootDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles := make([]string, 50) for i := 0; i < len(childFiles); i++ { childFile := parentDirectory + "/" + randomFilename(int64(8+rand.Intn(8))) childFiles[i] = childFile err := suite.StorageDriver.PutContent(suite.ctx, childFile, randomContents(32)) c.Assert(err, check.IsNil) } sort.Strings(childFiles) keys, err := suite.StorageDriver.List(suite.ctx, "/") c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{rootDirectory}) keys, err = suite.StorageDriver.List(suite.ctx, rootDirectory) c.Assert(err, check.IsNil) c.Assert(keys, check.DeepEquals, []string{parentDirectory}) keys, err = suite.StorageDriver.List(suite.ctx, parentDirectory) c.Assert(err, check.IsNil) sort.Strings(keys) c.Assert(keys, check.DeepEquals, childFiles) // A few checks to add here (check out #819 for more discussion on this): // 1. Ensure that all paths are absolute. // 2. Ensure that listings only include direct children. // 3. Ensure that we only respond to directory listings that end with a slash (maybe?). } // TestMove checks that a moved object no longer exists at the source path and // does exist at the destination. func (suite *DriverSuite) TestMove(c *check.C) { contents := randomContents(32) sourcePath := randomPath(32) destPath := randomPath(32) defer suite.deletePath(c, firstPart(sourcePath)) defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestMoveOverwrite checks that a moved object no longer exists at the source // path and overwrites the contents at the destination. func (suite *DriverSuite) TestMoveOverwrite(c *check.C) { sourcePath := randomPath(32) destPath := randomPath(32) sourceContents := randomContents(32) destContents := randomContents(64) defer suite.deletePath(c, firstPart(sourcePath)) defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, sourcePath, sourceContents) c.Assert(err, check.IsNil) err = suite.StorageDriver.PutContent(suite.ctx, destPath, destContents) c.Assert(err, check.IsNil) err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.IsNil) received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, sourceContents) _, err = suite.StorageDriver.GetContent(suite.ctx, sourcePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestMoveNonexistent checks that moving a nonexistent key fails and does not // delete the data at the destination path. func (suite *DriverSuite) TestMoveNonexistent(c *check.C) { contents := randomContents(32) sourcePath := randomPath(32) destPath := randomPath(32) defer suite.deletePath(c, firstPart(destPath)) err := suite.StorageDriver.PutContent(suite.ctx, destPath, contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.Move(suite.ctx, sourcePath, destPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) received, err := suite.StorageDriver.GetContent(suite.ctx, destPath) c.Assert(err, check.IsNil) c.Assert(received, check.DeepEquals, contents) } // TestMoveInvalid provides various checks for invalid moves. func (suite *DriverSuite) TestMoveInvalid(c *check.C) { contents := randomContents(32) // Create a regular file. err := suite.StorageDriver.PutContent(suite.ctx, "/notadir", contents) c.Assert(err, check.IsNil) defer suite.deletePath(c, "/notadir") // Now try to move a non-existent file under it. err = suite.StorageDriver.Move(suite.ctx, "/notadir/foo", "/notadir/bar") c.Assert(err, check.NotNil) // non-nil error } // TestDelete checks that the delete operation removes data from the storage // driver func (suite *DriverSuite) TestDelete(c *check.C) { filename := randomPath(32) contents := randomContents(32) defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestURLFor checks that the URLFor method functions properly, but only if it // is implemented func (suite *DriverSuite) TestURLFor(c *check.C) { filename := randomPath(32) contents := randomContents(32) defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) url, err := suite.StorageDriver.URLFor(suite.ctx, filename, nil) if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) response, err := http.Get(url) c.Assert(err, check.IsNil) defer response.Body.Close() read, err := ioutil.ReadAll(response.Body) c.Assert(err, check.IsNil) c.Assert(read, check.DeepEquals, contents) url, err = suite.StorageDriver.URLFor(suite.ctx, filename, map[string]interface{}{"method": "HEAD"}) if _, ok := err.(storagedriver.ErrUnsupportedMethod); ok { return } c.Assert(err, check.IsNil) response, err = http.Head(url) c.Assert(response.StatusCode, check.Equals, 200) c.Assert(response.ContentLength, check.Equals, int64(32)) } // TestDeleteNonexistent checks that removing a nonexistent key fails. func (suite *DriverSuite) TestDeleteNonexistent(c *check.C) { filename := randomPath(32) err := suite.StorageDriver.Delete(suite.ctx, filename) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestDeleteFolder checks that deleting a folder removes all child elements. func (suite *DriverSuite) TestDeleteFolder(c *check.C) { dirname := randomPath(32) filename1 := randomPath(32) filename2 := randomPath(32) filename3 := randomPath(32) contents := randomContents(32) defer suite.deletePath(c, firstPart(dirname)) err := suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename1), contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename2), contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.PutContent(suite.ctx, path.Join(dirname, filename3), contents) c.Assert(err, check.IsNil) err = suite.StorageDriver.Delete(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.IsNil) err = suite.StorageDriver.Delete(suite.ctx, dirname) c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename1)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename2)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) _, err = suite.StorageDriver.GetContent(suite.ctx, path.Join(dirname, filename3)) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) } // TestStatCall runs verifies the implementation of the storagedriver's Stat call. func (suite *DriverSuite) TestStatCall(c *check.C) { content := randomContents(4096) dirPath := randomPath(32) fileName := randomFilename(32) filePath := path.Join(dirPath, fileName) defer suite.deletePath(c, firstPart(dirPath)) // Call on non-existent file/dir, check error. fi, err := suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) c.Assert(fi, check.IsNil) fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.NotNil) c.Assert(err, check.FitsTypeOf, storagedriver.PathNotFoundError{}) c.Assert(strings.Contains(err.Error(), suite.Name()), check.Equals, true) c.Assert(fi, check.IsNil) err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) // Call on regular file, check results fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, filePath) c.Assert(fi.Size(), check.Equals, int64(len(content))) c.Assert(fi.IsDir(), check.Equals, false) createdTime := fi.ModTime() // Sleep and modify the file time.Sleep(time.Second * 10) content = randomContents(4096) err = suite.StorageDriver.PutContent(suite.ctx, filePath, content) c.Assert(err, check.IsNil) fi, err = suite.StorageDriver.Stat(suite.ctx, filePath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) time.Sleep(time.Second * 5) // allow changes to propagate (eventual consistency) // Check if the modification time is after the creation time. // In case of cloud storage services, storage frontend nodes might have // time drift between them, however that should be solved with sleeping // before update. modTime := fi.ModTime() if !modTime.After(createdTime) { c.Errorf("modtime (%s) is before the creation time (%s)", modTime, createdTime) } // Call on directory (do not check ModTime as dirs don't need to support it) fi, err = suite.StorageDriver.Stat(suite.ctx, dirPath) c.Assert(err, check.IsNil) c.Assert(fi, check.NotNil) c.Assert(fi.Path(), check.Equals, dirPath) c.Assert(fi.Size(), check.Equals, int64(0)) c.Assert(fi.IsDir(), check.Equals, true) } // TestPutContentMultipleTimes checks that if storage driver can overwrite the content // in the subsequent puts. Validates that PutContent does not have to work // with an offset like WriteStream does and overwrites the file entirely // rather than writing the data to the [0,len(data)) of the file. func (suite *DriverSuite) TestPutContentMultipleTimes(c *check.C) { filename := randomPath(32) contents := randomContents(4096) defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) contents = randomContents(2048) // upload a different, smaller file err = suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } // TestConcurrentStreamReads checks that multiple clients can safely read from // the same file simultaneously with various offsets. func (suite *DriverSuite) TestConcurrentStreamReads(c *check.C) { var filesize int64 = 128 * 1024 * 1024 if testing.Short() { filesize = 10 * 1024 * 1024 c.Log("Reducing file size to 10MB for short mode") } filename := randomPath(32) contents := randomContents(filesize) defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) var wg sync.WaitGroup readContents := func() { defer wg.Done() offset := rand.Int63n(int64(len(contents))) reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents[offset:]) } wg.Add(10) for i := 0; i < 10; i++ { go readContents() } wg.Wait() } // TestConcurrentFileStreams checks that multiple *os.File objects can be passed // in to WriteStream concurrently without hanging. func (suite *DriverSuite) TestConcurrentFileStreams(c *check.C) { numStreams := 32 if testing.Short() { numStreams = 8 c.Log("Reducing number of streams to 8 for short mode") } var wg sync.WaitGroup testStream := func(size int64) { defer wg.Done() suite.testFileStreams(c, size) } wg.Add(numStreams) for i := numStreams; i > 0; i-- { go testStream(int64(numStreams) * 1024 * 1024) } wg.Wait() } // TestEventualConsistency checks that if stat says that a file is a certain size, then // you can freely read from the file (this is the only guarantee that the driver needs to provide) func (suite *DriverSuite) TestEventualConsistency(c *check.C) { if testing.Short() { c.Skip("Skipping test in short mode") } filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) var offset int64 var misswrites int var chunkSize int64 = 32 for i := 0; i < 1024; i++ { contents := randomContents(chunkSize) read, err := suite.StorageDriver.WriteStream(suite.ctx, filename, offset, bytes.NewReader(contents)) c.Assert(err, check.IsNil) fi, err := suite.StorageDriver.Stat(suite.ctx, filename) c.Assert(err, check.IsNil) // We are most concerned with being able to read data as soon as Stat declares // it is uploaded. This is the strongest guarantee that some drivers (that guarantee // at best eventual consistency) absolutely need to provide. if fi.Size() == offset+chunkSize { reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, offset) c.Assert(err, check.IsNil) readContents, err := ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) reader.Close() offset += read } else { misswrites++ } } if misswrites > 0 { c.Log("There were " + string(misswrites) + " occurences of a write not being instantly available.") } c.Assert(misswrites, check.Not(check.Equals), 1024) } // BenchmarkPutGetEmptyFiles benchmarks PutContent/GetContent for 0B files func (suite *DriverSuite) BenchmarkPutGetEmptyFiles(c *check.C) { suite.benchmarkPutGetFiles(c, 0) } // BenchmarkPutGet1KBFiles benchmarks PutContent/GetContent for 1KB files func (suite *DriverSuite) BenchmarkPutGet1KBFiles(c *check.C) { suite.benchmarkPutGetFiles(c, 1024) } // BenchmarkPutGet1MBFiles benchmarks PutContent/GetContent for 1MB files func (suite *DriverSuite) BenchmarkPutGet1MBFiles(c *check.C) { suite.benchmarkPutGetFiles(c, 1024*1024) } // BenchmarkPutGet1GBFiles benchmarks PutContent/GetContent for 1GB files func (suite *DriverSuite) BenchmarkPutGet1GBFiles(c *check.C) { suite.benchmarkPutGetFiles(c, 1024*1024*1024) } func (suite *DriverSuite) benchmarkPutGetFiles(c *check.C, size int64) { c.SetBytes(size) parentDir := randomPath(8) defer func() { c.StopTimer() suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) err := suite.StorageDriver.PutContent(suite.ctx, filename, randomContents(size)) c.Assert(err, check.IsNil) _, err = suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) } } // BenchmarkStreamEmptyFiles benchmarks WriteStream/ReadStream for 0B files func (suite *DriverSuite) BenchmarkStreamEmptyFiles(c *check.C) { suite.benchmarkStreamFiles(c, 0) } // BenchmarkStream1KBFiles benchmarks WriteStream/ReadStream for 1KB files func (suite *DriverSuite) BenchmarkStream1KBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024) } // BenchmarkStream1MBFiles benchmarks WriteStream/ReadStream for 1MB files func (suite *DriverSuite) BenchmarkStream1MBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024*1024) } // BenchmarkStream1GBFiles benchmarks WriteStream/ReadStream for 1GB files func (suite *DriverSuite) BenchmarkStream1GBFiles(c *check.C) { suite.benchmarkStreamFiles(c, 1024*1024*1024) } func (suite *DriverSuite) benchmarkStreamFiles(c *check.C, size int64) { c.SetBytes(size) parentDir := randomPath(8) defer func() { c.StopTimer() suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := 0; i < c.N; i++ { filename := path.Join(parentDir, randomPath(32)) written, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(randomContents(size))) c.Assert(err, check.IsNil) c.Assert(written, check.Equals, size) rc, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) rc.Close() } } // BenchmarkList5Files benchmarks List for 5 small files func (suite *DriverSuite) BenchmarkList5Files(c *check.C) { suite.benchmarkListFiles(c, 5) } // BenchmarkList50Files benchmarks List for 50 small files func (suite *DriverSuite) BenchmarkList50Files(c *check.C) { suite.benchmarkListFiles(c, 50) } func (suite *DriverSuite) benchmarkListFiles(c *check.C, numFiles int64) { parentDir := randomPath(8) defer func() { c.StopTimer() suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) }() for i := int64(0); i < numFiles; i++ { err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.ResetTimer() for i := 0; i < c.N; i++ { files, err := suite.StorageDriver.List(suite.ctx, parentDir) c.Assert(err, check.IsNil) c.Assert(int64(len(files)), check.Equals, numFiles) } } // BenchmarkDelete5Files benchmarks Delete for 5 small files func (suite *DriverSuite) BenchmarkDelete5Files(c *check.C) { suite.benchmarkDeleteFiles(c, 5) } // BenchmarkDelete50Files benchmarks Delete for 50 small files func (suite *DriverSuite) BenchmarkDelete50Files(c *check.C) { suite.benchmarkDeleteFiles(c, 50) } func (suite *DriverSuite) benchmarkDeleteFiles(c *check.C, numFiles int64) { for i := 0; i < c.N; i++ { parentDir := randomPath(8) defer suite.deletePath(c, firstPart(parentDir)) c.StopTimer() for j := int64(0); j < numFiles; j++ { err := suite.StorageDriver.PutContent(suite.ctx, path.Join(parentDir, randomPath(32)), nil) c.Assert(err, check.IsNil) } c.StartTimer() // This is the operation we're benchmarking err := suite.StorageDriver.Delete(suite.ctx, firstPart(parentDir)) c.Assert(err, check.IsNil) } } func (suite *DriverSuite) testFileStreams(c *check.C, size int64) { tf, err := ioutil.TempFile("", "tf") c.Assert(err, check.IsNil) defer os.Remove(tf.Name()) defer tf.Close() filename := randomPath(32) defer suite.deletePath(c, firstPart(filename)) contents := randomContents(size) _, err = tf.Write(contents) c.Assert(err, check.IsNil) tf.Sync() tf.Seek(0, os.SEEK_SET) nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, tf) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, size) reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() readContents, err := ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } func (suite *DriverSuite) writeReadCompare(c *check.C, filename string, contents []byte) { defer suite.deletePath(c, firstPart(filename)) err := suite.StorageDriver.PutContent(suite.ctx, filename, contents) c.Assert(err, check.IsNil) readContents, err := suite.StorageDriver.GetContent(suite.ctx, filename) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } func (suite *DriverSuite) writeReadCompareStreams(c *check.C, filename string, contents []byte) { defer suite.deletePath(c, firstPart(filename)) nn, err := suite.StorageDriver.WriteStream(suite.ctx, filename, 0, bytes.NewReader(contents)) c.Assert(err, check.IsNil) c.Assert(nn, check.Equals, int64(len(contents))) reader, err := suite.StorageDriver.ReadStream(suite.ctx, filename, 0) c.Assert(err, check.IsNil) defer reader.Close() readContents, err := ioutil.ReadAll(reader) c.Assert(err, check.IsNil) c.Assert(readContents, check.DeepEquals, contents) } var filenameChars = []byte("abcdefghijklmnopqrstuvwxyz0123456789") var separatorChars = []byte("._-") func randomPath(length int64) string { path := "/" for int64(len(path)) < length { chunkLength := rand.Int63n(length-int64(len(path))) + 1 chunk := randomFilename(chunkLength) path += chunk remaining := length - int64(len(path)) if remaining == 1 { path += randomFilename(1) } else if remaining > 1 { path += "/" } } return path } func randomFilename(length int64) string { b := make([]byte, length) wasSeparator := true for i := range b { if !wasSeparator && i < len(b)-1 && rand.Intn(4) == 0 { b[i] = separatorChars[rand.Intn(len(separatorChars))] wasSeparator = true } else { b[i] = filenameChars[rand.Intn(len(filenameChars))] wasSeparator = false } } return string(b) } // randomBytes pre-allocates all of the memory sizes needed for the test. If // anything panics while accessing randomBytes, just make this number bigger. var randomBytes = make([]byte, 128<<20) func init() { // increase the random bytes to the required maximum for i := range randomBytes { randomBytes[i] = byte(rand.Intn(2 << 8)) } } func randomContents(length int64) []byte { return randomBytes[:length] } type randReader struct { r int64 m sync.Mutex } func (rr *randReader) Read(p []byte) (n int, err error) { rr.m.Lock() defer rr.m.Unlock() n = copy(p, randomContents(int64(len(p)))) rr.r -= int64(n) if rr.r <= 0 { err = io.EOF } return } func newRandReader(n int64) *randReader { return &randReader{r: n} } func firstPart(filePath string) string { if filePath == "" { return "/" } for { if filePath[len(filePath)-1] == '/' { filePath = filePath[:len(filePath)-1] } dir, file := path.Split(filePath) if dir == "" && file == "" { return "/" } if dir == "/" || dir == "" { return "/" + file } if file == "" { return dir } filePath = dir } } distribution-2.3.0/registry/storage/filereader.go000066400000000000000000000077561265472114500222260ustar00rootroot00000000000000package storage import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "os" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // TODO(stevvooe): Set an optimal buffer size here. We'll have to // understand the latency characteristics of the underlying network to // set this correctly, so we may want to leave it to the driver. For // out of process drivers, we'll have to optimize this buffer size for // local communication. const fileReaderBufferSize = 4 << 20 // remoteFileReader provides a read seeker interface to files stored in // storagedriver. Used to implement part of layer interface and will be used // to implement read side of LayerUpload. type fileReader struct { driver storagedriver.StorageDriver ctx context.Context // identifying fields path string size int64 // size is the total size, must be set. // mutable fields rc io.ReadCloser // remote read closer brd *bufio.Reader // internal buffered io offset int64 // offset is the current read offset err error // terminal error, if set, reader is closed } // newFileReader initializes a file reader for the remote file. The reader // takes on the size and path that must be determined externally with a stat // call. The reader operates optimistically, assuming that the file is already // there. func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) { return &fileReader{ ctx: ctx, driver: driver, path: path, size: size, }, nil } func (fr *fileReader) Read(p []byte) (n int, err error) { if fr.err != nil { return 0, fr.err } rd, err := fr.reader() if err != nil { return 0, err } n, err = rd.Read(p) fr.offset += int64(n) // Simulate io.EOR error if we reach filesize. if err == nil && fr.offset >= fr.size { err = io.EOF } return n, err } func (fr *fileReader) Seek(offset int64, whence int) (int64, error) { if fr.err != nil { return 0, fr.err } var err error newOffset := fr.offset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: newOffset = fr.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) } if newOffset < 0 { err = fmt.Errorf("cannot seek to negative position") } else { if fr.offset != newOffset { fr.reset() } // No problems, set the offset. fr.offset = newOffset } return fr.offset, err } func (fr *fileReader) Close() error { return fr.closeWithErr(fmt.Errorf("fileReader: closed")) } // reader prepares the current reader at the lrs offset, ensuring its buffered // and ready to go. func (fr *fileReader) reader() (io.Reader, error) { if fr.err != nil { return nil, fr.err } if fr.rc != nil { return fr.brd, nil } // If we don't have a reader, open one up. rc, err := fr.driver.ReadStream(fr.ctx, fr.path, fr.offset) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // NOTE(stevvooe): If the path is not found, we simply return a // reader that returns io.EOF. However, we do not set fr.rc, // allowing future attempts at getting a reader to possibly // succeed if the file turns up later. return ioutil.NopCloser(bytes.NewReader([]byte{})), nil default: return nil, err } } fr.rc = rc if fr.brd == nil { fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize) } else { fr.brd.Reset(fr.rc) } return fr.brd, nil } // resetReader resets the reader, forcing the read method to open up a new // connection and rebuild the buffered reader. This should be called when the // offset and the reader will become out of sync, such as during a seek // operation. func (fr *fileReader) reset() { if fr.err != nil { return } if fr.rc != nil { fr.rc.Close() fr.rc = nil } } func (fr *fileReader) closeWithErr(err error) error { if fr.err != nil { return fr.err } fr.err = err // close and release reader chain if fr.rc != nil { fr.rc.Close() } fr.rc = nil fr.brd = nil return fr.err } distribution-2.3.0/registry/storage/filereader_test.go000066400000000000000000000123551265472114500232540ustar00rootroot00000000000000package storage import ( "bytes" "crypto/rand" "io" mrand "math/rand" "os" "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver/inmemory" ) func TestSimpleRead(t *testing.T) { ctx := context.Background() content := make([]byte, 1<<20) n, err := rand.Read(content) if err != nil { t.Fatalf("unexpected error building random data: %v", err) } if n != len(content) { t.Fatalf("random read didn't fill buffer") } dgst, err := digest.FromReader(bytes.NewReader(content)) if err != nil { t.Fatalf("unexpected error digesting random content: %v", err) } driver := inmemory.New() path := "/random" if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("error allocating file reader: %v", err) } verifier, err := digest.NewDigestVerifier(dgst) if err != nil { t.Fatalf("error getting digest verifier: %s", err) } io.Copy(verifier, fr) if !verifier.Verified() { t.Fatalf("unable to verify read data") } } func TestFileReaderSeek(t *testing.T) { driver := inmemory.New() pattern := "01234567890ab" // prime length block repititions := 1024 path := "/patterned" content := bytes.Repeat([]byte(pattern), repititions) ctx := context.Background() if err := driver.PutContent(ctx, path, content); err != nil { t.Fatalf("error putting patterned content: %v", err) } fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating file reader: %v", err) } // Seek all over the place, in blocks of pattern size and make sure we get // the right data. for _, repitition := range mrand.Perm(repititions - 1) { targetOffset := int64(len(pattern) * repitition) // Seek to a multiple of pattern size and read pattern size bytes offset, err := fr.Seek(targetOffset, os.SEEK_SET) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } if offset != targetOffset { t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset) } p := make([]byte, len(pattern)) n, err := fr.Read(p) if err != nil { t.Fatalf("error reading pattern: %v", err) } if n != len(pattern) { t.Fatalf("incorrect read length: %d != %d", n, len(pattern)) } if string(p) != pattern { t.Fatalf("incorrect read content: %q != %q", p, pattern) } // Check offset current, err := fr.Seek(0, os.SEEK_CUR) if err != nil { t.Fatalf("error checking current offset: %v", err) } if current != targetOffset+int64(len(pattern)) { t.Fatalf("unexpected offset after read: %v", err) } } start, err := fr.Seek(0, os.SEEK_SET) if err != nil { t.Fatalf("error seeking to start: %v", err) } if start != 0 { t.Fatalf("expected to seek to start: %v != 0", start) } end, err := fr.Seek(0, os.SEEK_END) if err != nil { t.Fatalf("error checking current offset: %v", err) } if end != int64(len(content)) { t.Fatalf("expected to seek to end: %v != %v", end, len(content)) } // 4. Seek before start, ensure error. // seek before start before, err := fr.Seek(-1, os.SEEK_SET) if err == nil { t.Fatalf("error expected, returned offset=%v", before) } // 5. Seek after end, after, err := fr.Seek(1, os.SEEK_END) if err != nil { t.Fatalf("unexpected error expected, returned offset=%v", after) } p := make([]byte, 16) n, err := fr.Read(p) if n != 0 { t.Fatalf("bytes reads %d != %d", n, 0) } if err != io.EOF { t.Fatalf("expected io.EOF, got %v", err) } } // TestFileReaderNonExistentFile ensures the reader behaves as expected with a // missing or zero-length remote file. While the file may not exist, the // reader should not error out on creation and should return 0-bytes from the // read method, with an io.EOF error. func TestFileReaderNonExistentFile(t *testing.T) { driver := inmemory.New() fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10) if err != nil { t.Fatalf("unexpected error initializing reader: %v", err) } var buf [1024]byte n, err := fr.Read(buf[:]) if n != 0 { t.Fatalf("non-zero byte read reported: %d != 0", n) } if err != io.EOF { t.Fatalf("read on missing file should return io.EOF, got %v", err) } } // TestLayerReadErrors covers the various error return type for different // conditions that can arise when reading a layer. func TestFileReaderErrors(t *testing.T) { // TODO(stevvooe): We need to cover error return types, driven by the // errors returned via the HTTP API. For now, here is a incomplete list: // // 1. Layer Not Found: returned when layer is not found or access is // denied. // 2. Layer Unavailable: returned when link references are unresolved, // but layer is known to the registry. // 3. Layer Invalid: This may more split into more errors, but should be // returned when name or tarsum does not reference a valid error. We // may also need something to communication layer verification errors // for the inline tarsum check. // 4. Timeout: timeouts to backend. Need to better understand these // failure cases and how the storage driver propagates these errors // up the stack. } distribution-2.3.0/registry/storage/filewriter.go000066400000000000000000000104421265472114500222620ustar00rootroot00000000000000package storage import ( "bufio" "bytes" "fmt" "io" "os" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" ) const ( fileWriterBufferSize = 5 << 20 ) // fileWriter implements a remote file writer backed by a storage driver. type fileWriter struct { driver storagedriver.StorageDriver ctx context.Context // identifying fields path string // mutable fields size int64 // size of the file, aka the current end offset int64 // offset is the current write offset err error // terminal error, if set, reader is closed } type bufferedFileWriter struct { fileWriter bw *bufio.Writer } // fileWriterInterface makes the desired io compliant interface that the // filewriter should implement. type fileWriterInterface interface { io.WriteSeeker io.ReaderFrom io.Closer } var _ fileWriterInterface = &fileWriter{} // newFileWriter returns a prepared fileWriter for the driver and path. This // could be considered similar to an "open" call on a regular filesystem. func newFileWriter(ctx context.Context, driver storagedriver.StorageDriver, path string) (*bufferedFileWriter, error) { fw := fileWriter{ driver: driver, path: path, ctx: ctx, } if fi, err := driver.Stat(ctx, path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero default: return nil, err } } else { if fi.IsDir() { return nil, fmt.Errorf("cannot write to a directory") } fw.size = fi.Size() } buffered := bufferedFileWriter{ fileWriter: fw, } buffered.bw = bufio.NewWriterSize(&buffered.fileWriter, fileWriterBufferSize) return &buffered, nil } // wraps the fileWriter.Write method to buffer small writes func (bfw *bufferedFileWriter) Write(p []byte) (int, error) { return bfw.bw.Write(p) } // wraps fileWriter.Close to ensure the buffer is flushed // before we close the writer. func (bfw *bufferedFileWriter) Close() (err error) { if err = bfw.Flush(); err != nil { return err } err = bfw.fileWriter.Close() return err } // wraps fileWriter.Seek to ensure offset is handled // correctly in respect to pending data in the buffer func (bfw *bufferedFileWriter) Seek(offset int64, whence int) (int64, error) { if err := bfw.Flush(); err != nil { return 0, err } return bfw.fileWriter.Seek(offset, whence) } // wraps bufio.Writer.Flush to allow intermediate flushes // of the bufferedFileWriter func (bfw *bufferedFileWriter) Flush() error { return bfw.bw.Flush() } // Write writes the buffer p at the current write offset. func (fw *fileWriter) Write(p []byte) (n int, err error) { nn, err := fw.ReadFrom(bytes.NewReader(p)) return int(nn), err } // ReadFrom reads reader r until io.EOF writing the contents at the current // offset. func (fw *fileWriter) ReadFrom(r io.Reader) (n int64, err error) { if fw.err != nil { return 0, fw.err } nn, err := fw.driver.WriteStream(fw.ctx, fw.path, fw.offset, r) // We should forward the offset, whether or not there was an error. // Basically, we keep the filewriter in sync with the reader's head. If an // error is encountered, the whole thing should be retried but we proceed // from an expected offset, even if the data didn't make it to the // backend. fw.offset += nn if fw.offset > fw.size { fw.size = fw.offset } return nn, err } // Seek moves the write position do the requested offest based on the whence // argument, which can be os.SEEK_CUR, os.SEEK_END, or os.SEEK_SET. func (fw *fileWriter) Seek(offset int64, whence int) (int64, error) { if fw.err != nil { return 0, fw.err } var err error newOffset := fw.offset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: newOffset = fw.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) } if newOffset < 0 { err = fmt.Errorf("cannot seek to negative position") } else { // No problems, set the offset. fw.offset = newOffset } return fw.offset, err } // Close closes the fileWriter for writing. // Calling it once is valid and correct and it will // return a nil error. Calling it subsequent times will // detect that fw.err has been set and will return the error. func (fw *fileWriter) Close() error { if fw.err != nil { return fw.err } fw.err = fmt.Errorf("filewriter@%v: closed", fw.path) return nil } distribution-2.3.0/registry/storage/filewriter_test.go000066400000000000000000000145711265472114500233300ustar00rootroot00000000000000package storage import ( "bytes" "crypto/rand" "io" "os" "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) // TestSimpleWrite takes the fileWriter through common write operations // ensuring data integrity. func TestSimpleWrite(t *testing.T) { content := make([]byte, 1<<20) n, err := rand.Read(content) if err != nil { t.Fatalf("unexpected error building random data: %v", err) } if n != len(content) { t.Fatalf("random read did't fill buffer") } dgst, err := digest.FromReader(bytes.NewReader(content)) if err != nil { t.Fatalf("unexpected error digesting random content: %v", err) } driver := inmemory.New() path := "/random" ctx := context.Background() fw, err := newFileWriter(ctx, driver, path) if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } defer fw.Close() n, err = fw.Write(content) if err != nil { t.Fatalf("unexpected error writing content: %v", err) } fw.Flush() if n != len(content) { t.Fatalf("unexpected write length: %d != %d", n, len(content)) } fr, err := newFileReader(ctx, driver, path, int64(len(content))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() verifier, err := digest.NewDigestVerifier(dgst) if err != nil { t.Fatalf("unexpected error getting digest verifier: %s", err) } io.Copy(verifier, fr) if !verifier.Verified() { t.Fatalf("unable to verify write data") } // Check the seek position is equal to the content length end, err := fw.Seek(0, os.SEEK_END) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } if end != int64(len(content)) { t.Fatalf("write did not advance offset: %d != %d", end, len(content)) } // Double the content doubled := append(content, content...) doubledgst, err := digest.FromReader(bytes.NewReader(doubled)) if err != nil { t.Fatalf("unexpected error digesting doubled content: %v", err) } nn, err := fw.ReadFrom(bytes.NewReader(content)) if err != nil { t.Fatalf("unexpected error doubling content: %v", err) } if nn != int64(len(content)) { t.Fatalf("writeat was short: %d != %d", n, len(content)) } fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() verifier, err = digest.NewDigestVerifier(doubledgst) if err != nil { t.Fatalf("unexpected error getting digest verifier: %s", err) } io.Copy(verifier, fr) if !verifier.Verified() { t.Fatalf("unable to verify write data") } // Check that Write updated the offset. end, err = fw.Seek(0, os.SEEK_END) if err != nil { t.Fatalf("unexpected error seeking: %v", err) } if end != int64(len(doubled)) { t.Fatalf("write did not advance offset: %d != %d", end, len(doubled)) } // Now, we copy from one path to another, running the data through the // fileReader to fileWriter, rather than the driver.Move command to ensure // everything is working correctly. fr, err = newFileReader(ctx, driver, path, int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() fw, err = newFileWriter(ctx, driver, "/copied") if err != nil { t.Fatalf("unexpected error creating fileWriter: %v", err) } defer fw.Close() nn, err = io.Copy(fw, fr) if err != nil { t.Fatalf("unexpected error copying data: %v", err) } if nn != int64(len(doubled)) { t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled)) } fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled))) if err != nil { t.Fatalf("unexpected error creating fileReader: %v", err) } defer fr.Close() verifier, err = digest.NewDigestVerifier(doubledgst) if err != nil { t.Fatalf("unexpected error getting digest verifier: %s", err) } io.Copy(verifier, fr) if !verifier.Verified() { t.Fatalf("unable to verify write data") } } func TestBufferedFileWriter(t *testing.T) { ctx := context.Background() writer, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) } // write one byte and ensure the offset hasn't been incremented. // offset will only get incremented when the buffer gets flushed short := []byte{byte(1)} writer.Write(short) if writer.offset > 0 { t.Fatalf("WriteStream called prematurely") } // write enough data to cause the buffer to flush and confirm // the offset has been incremented long := make([]byte, fileWriterBufferSize) _, err = rand.Read(long) if err != nil { t.Fatalf("unexpected error building random data: %v", err) } for i := range long { long[i] = byte(i) } writer.Write(long) writer.Close() if writer.offset != (fileWriterBufferSize + 1) { t.Fatalf("WriteStream not called when buffer capacity reached") } } func BenchmarkFileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take for i := 0; i < b.N; i++ { // Start basic fileWriter initialization fw := fileWriter{ driver: inmemory.New(), path: "/random", } ctx := context.Background() if fi, err := fw.driver.Stat(ctx, fw.path); err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: // ignore, offset is zero default: b.Fatalf("Failed to initialize fileWriter: %v", err.Error()) } } else { if fi.IsDir() { b.Fatalf("Cannot write to a directory") } fw.size = fi.Size() } randomBytes := make([]byte, 1<<20) _, err := rand.Read(randomBytes) if err != nil { b.Fatalf("unexpected error building random data: %v", err) } // End basic file writer initialization b.StartTimer() for j := 0; j < 100; j++ { fw.Write(randomBytes) } b.StopTimer() } } func BenchmarkBufferedFileWriter(b *testing.B) { b.StopTimer() // not sure how long setup above will take ctx := context.Background() for i := 0; i < b.N; i++ { bfw, err := newFileWriter(ctx, inmemory.New(), "/random") if err != nil { b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error()) } randomBytes := make([]byte, 1<<20) _, err = rand.Read(randomBytes) if err != nil { b.Fatalf("unexpected error building random data: %v", err) } b.StartTimer() for j := 0; j < 100; j++ { bfw.Write(randomBytes) } b.StopTimer() } } distribution-2.3.0/registry/storage/linkedblobstore.go000066400000000000000000000300551265472114500232720ustar00rootroot00000000000000package storage import ( "fmt" "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" ) // linkPathFunc describes a function that can resolve a link based on the // repository name and digest. type linkPathFunc func(name string, dgst digest.Digest) (string, error) // linkedBlobStore provides a full BlobService that namespaces the blobs to a // given repository. Effectively, it manages the links in a given repository // that grant access to the global blob store. type linkedBlobStore struct { *blobStore registry *registry blobServer distribution.BlobServer blobAccessController distribution.BlobDescriptorService repository distribution.Repository ctx context.Context // only to be used where context can't come through method args deleteEnabled bool resumableDigestEnabled bool // linkPathFns specifies one or more path functions allowing one to // control the repository blob link set to which the blob store // dispatches. This is required because manifest and layer blobs have not // yet been fully merged. At some point, this functionality should be // removed an the blob links folder should be merged. The first entry is // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc } var _ distribution.BlobStore = &linkedBlobStore{} func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { return lbs.blobAccessController.Stat(ctx, dgst) } func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { canonical, err := lbs.Stat(ctx, dgst) // access check if err != nil { return nil, err } return lbs.blobStore.Get(ctx, canonical.Digest) } func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { canonical, err := lbs.Stat(ctx, dgst) // access check if err != nil { return nil, err } return lbs.blobStore.Open(ctx, canonical.Digest) } func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { canonical, err := lbs.Stat(ctx, dgst) // access check if err != nil { return err } if canonical.MediaType != "" { // Set the repository local content type. w.Header().Set("Content-Type", canonical.MediaType) } return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest) } func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { dgst := digest.FromBytes(p) // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { context.GetLogger(ctx).Errorf("error putting into main store: %v", err) return distribution.Descriptor{}, err } if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil { return distribution.Descriptor{}, err } // TODO(stevvooe): Write out mediatype if incoming differs from what is // returned by Put above. Note that we should allow updates for a given // repository. return desc, lbs.linkBlob(ctx, desc) } // createOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. type createOptions struct { Mount struct { ShouldMount bool From reference.Canonical } } type optionFunc func(interface{}) error func (f optionFunc) Apply(v interface{}) error { return f(v) } // WithMountFrom returns a BlobCreateOption which designates that the blob should be // mounted from the given canonical reference. func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { return optionFunc(func(v interface{}) error { opts, ok := v.(*createOptions) if !ok { return fmt.Errorf("unexpected options type: %T", v) } opts.Mount.ShouldMount = true opts.Mount.From = ref return nil }) } // Writer begins a blob write session, returning a handle. func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") var opts createOptions for _, option := range options { err := option.Apply(&opts) if err != nil { return nil, err } } if opts.Mount.ShouldMount { desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) if err == nil { // Mount successful, no need to initiate an upload session return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} } } uuid := uuid.Generate().String() startedAt := time.Now().UTC() path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name().Name(), id: uuid, }) if err != nil { return nil, err } startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name().Name(), id: uuid, }) if err != nil { return nil, err } // Write a startedat file for this upload if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { return nil, err } return lbs.newBlobUpload(ctx, uuid, path, startedAt) } func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ name: lbs.repository.Name().Name(), id: id, }) if err != nil { return nil, err } startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath) if err != nil { switch err := err.(type) { case driver.PathNotFoundError: return nil, distribution.ErrBlobUploadUnknown default: return nil, err } } startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) if err != nil { return nil, err } path, err := pathFor(uploadDataPathSpec{ name: lbs.repository.Name().Name(), id: id, }) if err != nil { return nil, err } return lbs.newBlobUpload(ctx, id, path, startedAt) } func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { if !lbs.deleteEnabled { return distribution.ErrUnsupported } // Ensure the blob is available for deletion _, err := lbs.blobAccessController.Stat(ctx, dgst) if err != nil { return err } err = lbs.blobAccessController.Clear(ctx, dgst) if err != nil { return err } return nil } func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { repo, err := lbs.registry.Repository(ctx, sourceRepo) if err != nil { return distribution.Descriptor{}, err } stat, err := repo.Blobs(ctx).Stat(ctx, dgst) if err != nil { return distribution.Descriptor{}, err } desc := distribution.Descriptor{ Size: stat.Size, // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value // for the specific repository. MediaType: "application/octet-stream", Digest: dgst, } return desc, lbs.linkBlob(ctx, desc) } // newBlobUpload allocates a new upload controller with the given state. func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { fw, err := newFileWriter(ctx, lbs.driver, path) if err != nil { return nil, err } bw := &blobWriter{ blobStore: lbs, id: uuid, startedAt: startedAt, digester: digest.Canonical.New(), bufferedFileWriter: *fw, resumableDigestEnabled: lbs.resumableDigestEnabled, } return bw, nil } // linkBlob links a valid, written blob into the registry under the named // repository for the upload controller. func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error { dgsts := append([]digest.Digest{canonical.Digest}, aliases...) // TODO(stevvooe): Need to write out mediatype for only canonical hash // since we don't care about the aliases. They are generally unused except // for tarsum but those versions don't care about mediatype. // Don't make duplicate links. seenDigests := make(map[digest.Digest]struct{}, len(dgsts)) // only use the first link linkPathFn := lbs.linkPathFns[0] for _, dgst := range dgsts { if _, seen := seenDigests[dgst]; seen { continue } seenDigests[dgst] = struct{}{} blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil { return err } } return nil } type linkedBlobStatter struct { *blobStore repository distribution.Repository // linkPathFns specifies one or more path functions allowing one to // control the repository blob link set to which the blob store // dispatches. This is required because manifest and layer blobs have not // yet been fully merged. At some point, this functionality should be // removed an the blob links folder should be merged. The first entry is // treated as the "canonical" link location and will be used for writes. linkPathFns []linkPathFunc } var _ distribution.BlobDescriptorService = &linkedBlobStatter{} func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { var ( resolveErr error target digest.Digest ) // try the many link path functions until we get success or an error that // is not PathNotFoundError. for _, linkPathFn := range lbs.linkPathFns { var err error target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn) if err == nil { break // success! } switch err := err.(type) { case driver.PathNotFoundError: resolveErr = distribution.ErrBlobUnknown // move to the next linkPathFn, saving the error default: return distribution.Descriptor{}, err } } if resolveErr != nil { return distribution.Descriptor{}, resolveErr } if target != dgst { // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) } // TODO(stevvooe): Look up repository local mediatype and replace that on // the returned descriptor. return lbs.blobStore.statter.Stat(ctx, target) } func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } err = lbs.blobStore.driver.Delete(ctx, blobLinkPath) if err != nil { switch err := err.(type) { case driver.PathNotFoundError: continue // just ignore this error and continue default: return err } } } return nil } // resolveTargetWithFunc allows us to read a link to a resource with different // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return "", err } return lbs.blobStore.readlink(ctx, blobLinkPath) } func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { // The canonical descriptor for a blob is set at the commit phase of upload return nil } // blobLinkPath provides the path to the blob link, also known as layers. func blobLinkPath(name string, dgst digest.Digest) (string, error) { return pathFor(layerLinkPathSpec{name: name, digest: dgst}) } // manifestRevisionLinkPath provides the path to the manifest revision link. func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) { return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst}) } distribution-2.3.0/registry/storage/manifestlisthandler.go000066400000000000000000000055021265472114500241470ustar00rootroot00000000000000package storage import ( "fmt" "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/manifestlist" ) // manifestListHandler is a ManifestHandler that covers schema2 manifest lists. type manifestListHandler struct { repository *repository blobStore *linkedBlobStore ctx context.Context } var _ ManifestHandler = &manifestListHandler{} func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") var m manifestlist.DeserializedManifestList if err := json.Unmarshal(content, &m); err != nil { return nil, err } return &m, nil } func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") m, ok := manifestList.(*manifestlist.DeserializedManifestList) if !ok { return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) } if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { return "", err } mt, payload, err := m.Payload() if err != nil { return "", err } revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } // Link the revision into the repository. if err := ms.blobStore.linkBlob(ctx, revision); err != nil { return "", err } return revision.Digest, nil } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. As a policy, the registry only tries to // store valid content, leaving trust policies of that content up to // consumers. func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification if !skipDependencyVerification { // This manifest service is different from the blob service // returned by Blob. It uses a linked blob store to ensure that // only manifests are accessible. manifestService, err := ms.repository.Manifests(ctx) if err != nil { return err } for _, manifestDescriptor := range mnfst.References() { exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) if err != nil && err != distribution.ErrBlobUnknown { errs = append(errs, err) } if err != nil || !exists { // On error here, we always append unknown blob errors. errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) } } } if len(errs) != 0 { return errs } return nil } distribution-2.3.0/registry/storage/manifeststore.go000066400000000000000000000103741265472114500227750ustar00rootroot00000000000000package storage import ( "fmt" "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/manifest/schema2" ) // A ManifestHandler gets and puts manifests of a particular type. type ManifestHandler interface { // Unmarshal unmarshals the manifest from a byte slice. Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) // Put creates or updates the given manifest returning the manifest digest. Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) } // SkipLayerVerification allows a manifest to be Put before its // layers are on the filesystem func SkipLayerVerification() distribution.ManifestServiceOption { return skipLayerOption{} } type skipLayerOption struct{} func (o skipLayerOption) Apply(m distribution.ManifestService) error { if ms, ok := m.(*manifestStore); ok { ms.skipDependencyVerification = true return nil } return fmt.Errorf("skip layer verification only valid for manifestStore") } type manifestStore struct { repository *repository blobStore *linkedBlobStore ctx context.Context skipDependencyVerification bool schema1Handler ManifestHandler schema2Handler ManifestHandler manifestListHandler ManifestHandler } var _ distribution.ManifestService = &manifestStore{} func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") _, err := ms.blobStore.Stat(ms.ctx, dgst) if err != nil { if err == distribution.ErrBlobUnknown { return false, nil } return false, err } return true, nil } func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") // TODO(stevvooe): Need to check descriptor from above to ensure that the // mediatype is as we expect for the manifest store. content, err := ms.blobStore.Get(ctx, dgst) if err != nil { if err == distribution.ErrBlobUnknown { return nil, distribution.ErrManifestUnknownRevision{ Name: ms.repository.Name().Name(), Revision: dgst, } } return nil, err } var versioned manifest.Versioned if err = json.Unmarshal(content, &versioned); err != nil { return nil, err } switch versioned.SchemaVersion { case 1: return ms.schema1Handler.Unmarshal(ctx, dgst, content) case 2: // This can be an image manifest or a manifest list switch versioned.MediaType { case schema2.MediaTypeManifest: return ms.schema2Handler.Unmarshal(ctx, dgst, content) case manifestlist.MediaTypeManifestList: return ms.manifestListHandler.Unmarshal(ctx, dgst, content) default: return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} } } return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) } func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") switch manifest.(type) { case *schema1.SignedManifest: return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) case *schema2.DeserializedManifest: return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) case *manifestlist.DeserializedManifestList: return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) } return "", fmt.Errorf("unrecognized manifest type %T", manifest) } // Delete removes the revision of the specified manfiest. func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") return ms.blobStore.Delete(ctx, dgst) } func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { return 0, distribution.ErrUnsupported } distribution-2.3.0/registry/storage/manifeststore_test.go000066400000000000000000000247741265472114500240450ustar00rootroot00000000000000package storage import ( "bytes" "io" "reflect" "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" "github.com/docker/libtrust" ) type manifestStoreTestEnv struct { ctx context.Context driver driver.StorageDriver registry distribution.Namespace repository distribution.Repository name reference.Named tag string } func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repo, err := registry.Repository(ctx, name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } return &manifestStoreTestEnv{ ctx: ctx, driver: driver, registry: registry, repository: repo, name: name, tag: tag, } } func TestManifestStorage(t *testing.T) { repoName, _ := reference.ParseNamed("foo/bar") env := newManifestStoreTestEnv(t, repoName, "thetag") ctx := context.Background() ms, err := env.repository.Manifests(ctx) if err != nil { t.Fatal(err) } m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: env.name.Name(), Tag: env.tag, } // Build up some test layers and add them to the manifest, saving the // readseekers for upload later. testLayers := map[digest.Digest]io.ReadSeeker{} for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } dgst := digest.Digest(ds) testLayers[digest.Digest(dgst)] = rs m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) m.History = append(m.History, schema1.History{ V1Compatibility: "", }) } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm, merr := schema1.Sign(&m, pk) if merr != nil { t.Fatalf("error signing manifest: %v", err) } _, err = ms.Put(ctx, sm) if err == nil { t.Fatalf("expected errors putting manifest with full verification") } switch err := err.(type) { case distribution.ErrManifestVerification: if len(err) != 2 { t.Fatalf("expected 2 verification errors: %#v", err) } for _, err := range err { if _, ok := err.(distribution.ErrManifestBlobUnknown); !ok { t.Fatalf("unexpected error type: %v", err) } } default: t.Fatalf("unexpected error verifying manifest: %v", err) } // Now, upload the layers that were missing! for dgst, rs := range testLayers { wr, err := env.repository.Blobs(env.ctx).Create(env.ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(env.ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } var manifestDigest digest.Digest if manifestDigest, err = ms.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } exists, err := ms.Exists(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error checking manifest existence: %#v", err) } if !exists { t.Fatalf("manifest should exist") } fromStore, err := ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } fetchedManifest, ok := fromStore.(*schema1.SignedManifest) if !ok { t.Fatalf("unexpected manifest type from signedstore") } if !reflect.DeepEqual(fetchedManifest, sm) { t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) } _, pl, err := fetchedManifest.Payload() if err != nil { t.Fatalf("error getting payload %#v", err) } fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } payload, err := fetchedJWS.Payload() if err != nil { t.Fatalf("unexpected error extracting payload: %v", err) } // Now that we have a payload, take a moment to check that the manifest is // return by the payload digest. dgst := digest.FromBytes(payload) exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) } if !exists { t.Fatalf("manifest %s should exist", dgst) } fetchedByDigest, err := ms.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } if !reflect.DeepEqual(fetchedByDigest, fetchedManifest) { t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedByDigest, fetchedManifest) } sigs, err := fetchedJWS.Signatures() if err != nil { t.Fatalf("unable to extract signatures: %v", err) } if len(sigs) != 1 { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) } // Now, push the same manifest with a different key pk2, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm2, err := schema1.Sign(&m, pk2) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } _, pl, err = sm2.Payload() if err != nil { t.Fatalf("error getting payload %#v", err) } jws2, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("error parsing signature: %v", err) } sigs2, err := jws2.Signatures() if err != nil { t.Fatalf("unable to extract signatures: %v", err) } if len(sigs2) != 1 { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } if manifestDigest, err = ms.Put(ctx, sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } fromStore, err = ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } fetched, ok := fromStore.(*schema1.SignedManifest) if !ok { t.Fatalf("unexpected type from signed manifeststore : %T", fetched) } if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } // Assemble our payload and two signatures to get what we expect! expectedJWS, err := libtrust.NewJSONSignature(payload, sigs[0], sigs2[0]) if err != nil { t.Fatalf("unexpected error merging jws: %v", err) } expectedSigs, err := expectedJWS.Signatures() if err != nil { t.Fatalf("unexpected error getting expected signatures: %v", err) } _, pl, err = fetched.Payload() if err != nil { t.Fatalf("error getting payload %#v", err) } receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } receivedPayload, err := receivedJWS.Payload() if err != nil { t.Fatalf("unexpected error extracting received payload: %v", err) } if !bytes.Equal(receivedPayload, payload) { t.Fatalf("payloads are not equal") } receivedSigs, err := receivedJWS.Signatures() if err != nil { t.Fatalf("error getting signatures: %v", err) } for i, sig := range receivedSigs { if !bytes.Equal(sig, expectedSigs[i]) { t.Fatalf("mismatched signatures from remote: %v != %v", string(sig), string(expectedSigs[i])) } } // Test deleting manifests err = ms.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } if exists { t.Errorf("Deleted manifest should not exist") } deletedManifest, err := ms.Get(ctx, dgst) if err == nil { t.Errorf("Unexpected success getting deleted manifest") } switch err.(type) { case distribution.ErrManifestUnknownRevision: break default: t.Errorf("Unexpected error getting deleted manifest: %s", reflect.ValueOf(err).Type()) } if deletedManifest != nil { t.Errorf("Deleted manifest get returned non-nil") } // Re-upload should restore manifest to a good state _, err = ms.Put(ctx, sm) if err != nil { t.Errorf("Error re-uploading deleted manifest") } exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } if !exists { t.Errorf("Restored manifest should exist") } deletedManifest, err = ms.Get(ctx, dgst) if err != nil { t.Errorf("Unexpected error getting manifest") } if deletedManifest == nil { t.Errorf("Deleted manifest get returned non-nil") } r, err := NewRegistry(ctx, env.driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } repo, err := r.Repository(ctx, env.name) if err != nil { t.Fatalf("unexpected error getting repo: %v", err) } ms, err = repo.Manifests(ctx) if err != nil { t.Fatal(err) } err = ms.Delete(ctx, dgst) if err == nil { t.Errorf("Unexpected success deleting while disabled") } } // TestLinkPathFuncs ensures that the link path functions behavior are locked // down and implemented as expected. func TestLinkPathFuncs(t *testing.T) { for _, testcase := range []struct { repo string digest digest.Digest linkPathFn linkPathFunc expected string }{ { repo: "foo/bar", digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: blobLinkPath, expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, { repo: "foo/bar", digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: manifestRevisionLinkPath, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, } { p, err := testcase.linkPathFn(testcase.repo, testcase.digest) if err != nil { t.Fatalf("unexpected error calling linkPathFn(pm, %q, %q): %v", testcase.repo, testcase.digest, err) } if p != testcase.expected { t.Fatalf("incorrect path returned: %q != %q", p, testcase.expected) } } } distribution-2.3.0/registry/storage/paths.go000066400000000000000000000374171265472114500212400ustar00rootroot00000000000000package storage import ( "fmt" "path" "strings" "github.com/docker/distribution/digest" ) const ( storagePathVersion = "v2" // fixed storage layout version storagePathRoot = "/docker/registry/" // all driver paths have a prefix // TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though // storage path root would configurable for all drivers through this // package. In reality, we've found it simpler to do this on a per driver // basis. ) // pathFor maps paths based on "object names" and their ids. The "object // names" mapped by are internal to the storage system. // // The path layout in the storage backend is roughly as follows: // // /v2 // -> repositories/ // ->/ // -> _manifests/ // revisions // -> // -> link // -> signatures // //link // tags/ // -> current/link // -> index // -> //link // -> _layers/ // // -> _uploads/ // data // startedat // hashstates// // -> blob/ // // // The storage backend layout is broken up into a content-addressable blob // store and repositories. The content-addressable blob store holds most data // throughout the backend, keyed by algorithm and digests of the underlying // content. Access to the blob store is controled through links from the // repository to blobstore. // // A repository is made up of layers, manifests and tags. The layers component // is just a directory of layers which are "linked" into a repository. A layer // can only be accessed through a qualified repository name if it is linked in // the repository. Uploads of layers are managed in the uploads directory, // which is key by upload id. When all data for an upload is received, the // data is moved into the blob store and the upload directory is deleted. // Abandoned uploads can be garbage collected by reading the startedat file // and removing uploads that have been active for longer than a certain time. // // The third component of the repository directory is the manifests store, // which is made up of a revision store and tag store. Manifests are stored in // the blob store and linked into the revision store. Signatures are separated // from the manifest payload data and linked into the blob store, as well. // While the registry can save all revisions of a manifest, no relationship is // implied as to the ordering of changes to a manifest. The tag store provides // support for name, tag lookups of manifests, using "current/link" under a // named tag directory. An index is maintained to support deletions of all // revisions of a given manifest tag. // // We cover the path formats implemented by this path mapper below. // // Manifests: // // manifestRevisionPathSpec: /v2/repositories//_manifests/revisions/// // manifestRevisionLinkPathSpec: /v2/repositories//_manifests/revisions///link // manifestSignaturesPathSpec: /v2/repositories//_manifests/revisions///signatures/ // manifestSignatureLinkPathSpec: /v2/repositories//_manifests/revisions///signatures///link // // Tags: // // manifestTagsPathSpec: /v2/repositories//_manifests/tags/ // manifestTagPathSpec: /v2/repositories//_manifests/tags// // manifestTagCurrentPathSpec: /v2/repositories//_manifests/tags//current/link // manifestTagIndexPathSpec: /v2/repositories//_manifests/tags//index/ // manifestTagIndexEntryPathSpec: /v2/repositories//_manifests/tags//index/// // manifestTagIndexEntryLinkPathSpec: /v2/repositories//_manifests/tags//index///link // // Blobs: // // layerLinkPathSpec: /v2/repositories//_layers///link // // Uploads: // // uploadDataPathSpec: /v2/repositories//_uploads//data // uploadStartedAtPathSpec: /v2/repositories//_uploads//startedat // uploadHashStatePathSpec: /v2/repositories//_uploads//hashstates// // // Blob Store: // // blobPathSpec: /v2/blobs/// // blobDataPathSpec: /v2/blobs////data // blobMediaTypePathSpec: /v2/blobs////data // // For more information on the semantic meaning of each path and their // contents, please see the path spec documentation. func pathFor(spec pathSpec) (string, error) { // Switch on the path object type and return the appropriate path. At // first glance, one may wonder why we don't use an interface to // accomplish this. By keep the formatting separate from the pathSpec, we // keep separate the path generation componentized. These specs could be // passed to a completely different mapper implementation and generate a // different set of paths. // // For example, imagine migrating from one backend to the other: one could // build a filesystem walker that converts a string path in one version, // to an intermediate path object, than can be consumed and mapped by the // other version. rootPrefix := []string{storagePathRoot, storagePathVersion} repoPrefix := append(rootPrefix, "repositories") switch v := spec.(type) { case manifestRevisionPathSpec: components, err := digestPathComponents(v.revision, false) if err != nil { return "", err } return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil case manifestRevisionLinkPathSpec: root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) if err != nil { return "", err } return path.Join(root, "link"), nil case manifestSignaturesPathSpec: root, err := pathFor(manifestRevisionPathSpec{ name: v.name, revision: v.revision, }) if err != nil { return "", err } return path.Join(root, "signatures"), nil case manifestSignatureLinkPathSpec: root, err := pathFor(manifestSignaturesPathSpec{ name: v.name, revision: v.revision, }) if err != nil { return "", err } signatureComponents, err := digestPathComponents(v.signature, false) if err != nil { return "", err } return path.Join(root, path.Join(append(signatureComponents, "link")...)), nil case manifestTagsPathSpec: return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil case manifestTagPathSpec: root, err := pathFor(manifestTagsPathSpec{ name: v.name, }) if err != nil { return "", err } return path.Join(root, v.tag), nil case manifestTagCurrentPathSpec: root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) if err != nil { return "", err } return path.Join(root, "current", "link"), nil case manifestTagIndexPathSpec: root, err := pathFor(manifestTagPathSpec{ name: v.name, tag: v.tag, }) if err != nil { return "", err } return path.Join(root, "index"), nil case manifestTagIndexEntryLinkPathSpec: root, err := pathFor(manifestTagIndexEntryPathSpec{ name: v.name, tag: v.tag, revision: v.revision, }) if err != nil { return "", err } return path.Join(root, "link"), nil case manifestTagIndexEntryPathSpec: root, err := pathFor(manifestTagIndexPathSpec{ name: v.name, tag: v.tag, }) if err != nil { return "", err } components, err := digestPathComponents(v.revision, false) if err != nil { return "", err } return path.Join(root, path.Join(components...)), nil case layerLinkPathSpec: components, err := digestPathComponents(v.digest, false) if err != nil { return "", err } // TODO(stevvooe): Right now, all blobs are linked under "_layers". If // we have future migrations, we may want to rename this to "_blobs". // A migration strategy would simply leave existing items in place and // write the new paths, commit a file then delete the old files. blobLinkPathComponents := append(repoPrefix, v.name, "_layers") return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil case blobDataPathSpec: components, err := digestPathComponents(v.digest, true) if err != nil { return "", err } components = append(components, "data") blobPathPrefix := append(rootPrefix, "blobs") return path.Join(append(blobPathPrefix, components...)...), nil case uploadDataPathSpec: return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil case uploadStartedAtPathSpec: return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil case uploadHashStatePathSpec: offset := fmt.Sprintf("%d", v.offset) if v.list { offset = "" // Limit to the prefix for listing offsets. } return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil case repositoriesRootPathSpec: return path.Join(repoPrefix...), nil default: // TODO(sday): This is an internal error. Ensure it doesn't escape (panic?). return "", fmt.Errorf("unknown path spec: %#v", v) } } // pathSpec is a type to mark structs as path specs. There is no // implementation because we'd like to keep the specs and the mappers // decoupled. type pathSpec interface { pathSpec() } // manifestRevisionPathSpec describes the components of the directory path for // a manifest revision. type manifestRevisionPathSpec struct { name string revision digest.Digest } func (manifestRevisionPathSpec) pathSpec() {} // manifestRevisionLinkPathSpec describes the path components required to look // up the data link for a revision of a manifest. If this file is not present, // the manifest blob is not available in the given repo. The contents of this // file should just be the digest. type manifestRevisionLinkPathSpec struct { name string revision digest.Digest } func (manifestRevisionLinkPathSpec) pathSpec() {} // manifestSignaturesPathSpec decribes the path components for the directory // containing all the signatures for the target blob. Entries are named with // the underlying key id. type manifestSignaturesPathSpec struct { name string revision digest.Digest } func (manifestSignaturesPathSpec) pathSpec() {} // manifestSignatureLinkPathSpec decribes the path components used to look up // a signature file by the hash of its blob. type manifestSignatureLinkPathSpec struct { name string revision digest.Digest signature digest.Digest } func (manifestSignatureLinkPathSpec) pathSpec() {} // manifestTagsPathSpec describes the path elements required to point to the // manifest tags directory. type manifestTagsPathSpec struct { name string } func (manifestTagsPathSpec) pathSpec() {} // manifestTagPathSpec describes the path elements required to point to the // manifest tag links files under a repository. These contain a blob id that // can be used to look up the data and signatures. type manifestTagPathSpec struct { name string tag string } func (manifestTagPathSpec) pathSpec() {} // manifestTagCurrentPathSpec describes the link to the current revision for a // given tag. type manifestTagCurrentPathSpec struct { name string tag string } func (manifestTagCurrentPathSpec) pathSpec() {} // manifestTagCurrentPathSpec describes the link to the index of revisions // with the given tag. type manifestTagIndexPathSpec struct { name string tag string } func (manifestTagIndexPathSpec) pathSpec() {} // manifestTagIndexEntryPathSpec contains the entries of the index by revision. type manifestTagIndexEntryPathSpec struct { name string tag string revision digest.Digest } func (manifestTagIndexEntryPathSpec) pathSpec() {} // manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a // manifest with given tag within the index. type manifestTagIndexEntryLinkPathSpec struct { name string tag string revision digest.Digest } func (manifestTagIndexEntryLinkPathSpec) pathSpec() {} // blobLinkPathSpec specifies a path for a blob link, which is a file with a // blob id. The blob link will contain a content addressable blob id reference // into the blob store. The format of the contents is as follows: // // : // // The following example of the file contents is more illustrative: // // sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36 // // This indicates that there is a blob with the id/digest, calculated via // sha256 that can be fetched from the blob store. type layerLinkPathSpec struct { name string digest digest.Digest } func (layerLinkPathSpec) pathSpec() {} // blobAlgorithmReplacer does some very simple path sanitization for user // input. Paths should be "safe" before getting this far due to strict digest // requirements but we can add further path conversion here, if needed. var blobAlgorithmReplacer = strings.NewReplacer( "+", "/", ".", "/", ";", "/", ) // // blobPathSpec contains the path for the registry global blob store. // type blobPathSpec struct { // digest digest.Digest // } // func (blobPathSpec) pathSpec() {} // blobDataPathSpec contains the path for the registry global blob store. For // now, this contains layer data, exclusively. type blobDataPathSpec struct { digest digest.Digest } func (blobDataPathSpec) pathSpec() {} // uploadDataPathSpec defines the path parameters of the data file for // uploads. type uploadDataPathSpec struct { name string id string } func (uploadDataPathSpec) pathSpec() {} // uploadDataPathSpec defines the path parameters for the file that stores the // start time of an uploads. If it is missing, the upload is considered // unknown. Admittedly, the presence of this file is an ugly hack to make sure // we have a way to cleanup old or stalled uploads that doesn't rely on driver // FileInfo behavior. If we come up with a more clever way to do this, we // should remove this file immediately and rely on the startetAt field from // the client to enforce time out policies. type uploadStartedAtPathSpec struct { name string id string } func (uploadStartedAtPathSpec) pathSpec() {} // uploadHashStatePathSpec defines the path parameters for the file that stores // the hash function state of an upload at a specific byte offset. If `list` is // set, then the path mapper will generate a list prefix for all hash state // offsets for the upload identified by the name, id, and alg. type uploadHashStatePathSpec struct { name string id string alg digest.Algorithm offset int64 list bool } func (uploadHashStatePathSpec) pathSpec() {} // repositoriesRootPathSpec returns the root of repositories type repositoriesRootPathSpec struct { } func (repositoriesRootPathSpec) pathSpec() {} // digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // // / // // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // // // // func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { if err := dgst.Validate(); err != nil { return nil, err } algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm} var suffix []string if multilevel { suffix = append(suffix, hex[:2]) } suffix = append(suffix, hex) return append(prefix, suffix...), nil } distribution-2.3.0/registry/storage/paths_test.go000066400000000000000000000076171265472114500222760ustar00rootroot00000000000000package storage import ( "testing" ) func TestPathMapper(t *testing.T) { for _, testcase := range []struct { spec pathSpec expected string err error }{ { spec: manifestRevisionPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestRevisionLinkPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignatureLinkPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignaturesPathSpec{ name: "foo/bar", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures", }, { spec: manifestTagsPathSpec{ name: "foo/bar", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags", }, { spec: manifestTagPathSpec{ name: "foo/bar", tag: "thetag", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag", }, { spec: manifestTagCurrentPathSpec{ name: "foo/bar", tag: "thetag", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/current/link", }, { spec: manifestTagIndexPathSpec{ name: "foo/bar", tag: "thetag", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index", }, { spec: manifestTagIndexEntryPathSpec{ name: "foo/bar", tag: "thetag", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestTagIndexEntryLinkPathSpec{ name: "foo/bar", tag: "thetag", revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: uploadDataPathSpec{ name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/data", }, { spec: uploadStartedAtPathSpec{ name: "foo/bar", id: "asdf-asdf-asdf-adsf", }, expected: "/docker/registry/v2/repositories/foo/bar/_uploads/asdf-asdf-asdf-adsf/startedat", }, } { p, err := pathFor(testcase.spec) if err != nil { t.Fatalf("unexpected generating path (%T): %v", testcase.spec, err) } if p != testcase.expected { t.Fatalf("unexpected path generated (%T): %q != %q", testcase.spec, p, testcase.expected) } } // Add a few test cases to ensure we cover some errors // Specify a path that requires a revision and get a digest validation error. badpath, err := pathFor(manifestSignaturesPathSpec{ name: "foo/bar", }) if err == nil { t.Fatalf("expected an error when mapping an invalid revision: %s", badpath) } } distribution-2.3.0/registry/storage/purgeuploads.go000066400000000000000000000077201265472114500226250ustar00rootroot00000000000000package storage import ( "path" "strings" "time" log "github.com/Sirupsen/logrus" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" ) // uploadData stored the location of temporary files created during a layer upload // along with the date the upload was started type uploadData struct { containingDir string startedAt time.Time } func newUploadData() uploadData { return uploadData{ containingDir: "", // default to far in future to protect against missing startedat startedAt: time.Now().Add(time.Duration(10000 * time.Hour)), } } // PurgeUploads deletes files from the upload directory // created before olderThan. The list of files deleted and errors // encountered are returned func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) { log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete) uploadData, errors := getOutstandingUploads(ctx, driver) var deleted []string for _, uploadData := range uploadData { if uploadData.startedAt.Before(olderThan) { var err error log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.", uploadData.containingDir, uploadData.startedAt, olderThan) if actuallyDelete { err = driver.Delete(ctx, uploadData.containingDir) } if err == nil { deleted = append(deleted, uploadData.containingDir) } else { errors = append(errors, err) } } } log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors)) return deleted, errors } // getOutstandingUploads walks the upload directory, collecting files // which could be eligible for deletion. The only reliable way to // classify the age of a file is with the date stored in the startedAt // file, so gather files by UUID with a date from startedAt. func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) { var errors []error uploads := make(map[string]uploadData, 0) inUploadDir := false root, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return uploads, append(errors, err) } err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file[0] == '_' { // Reserved directory inUploadDir = (file == "_uploads") if fileInfo.IsDir() && !inUploadDir { return ErrSkipDir } } uuid, isContainingDir := uUIDFromPath(filePath) if uuid == "" { // Cannot reliably delete return nil } ud, ok := uploads[uuid] if !ok { ud = newUploadData() } if isContainingDir { ud.containingDir = filePath } if file == "startedat" { if t, err := readStartedAtFile(driver, filePath); err == nil { ud.startedAt = t } else { errors = pushError(errors, filePath, err) } } uploads[uuid] = ud return nil }) if err != nil { errors = pushError(errors, root, err) } return uploads, errors } // uUIDFromPath extracts the upload UUID from a given path // If the UUID is the last path component, this is the containing // directory for all upload files func uUIDFromPath(path string) (string, bool) { components := strings.Split(path, "/") for i := len(components) - 1; i >= 0; i-- { if u, err := uuid.Parse(components[i]); err == nil { return u.String(), i == len(components)-1 } } return "", false } // readStartedAtFile reads the date from an upload's startedAtFile func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) { // todo:(richardscothern) - pass in a context startedAtBytes, err := driver.GetContent(context.Background(), path) if err != nil { return time.Now(), err } startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes)) if err != nil { return time.Now(), err } return startedAt, nil } distribution-2.3.0/registry/storage/purgeuploads_test.go000066400000000000000000000113061265472114500236570ustar00rootroot00000000000000package storage import ( "path" "strings" "testing" "time" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/uuid" ) func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) { d := inmemory.New() ctx := context.Background() for i := 0; i < numUploads; i++ { addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt) } return d, ctx } func addUploads(ctx context.Context, t *testing.T, d driver.StorageDriver, uploadID, repo string, startedAt time.Time) { dataPath, err := pathFor(uploadDataPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } if err := d.PutContent(ctx, dataPath, []byte("")); err != nil { t.Fatalf("Unable to write data file") } startedAtPath, err := pathFor(uploadStartedAtPathSpec{name: repo, id: uploadID}) if err != nil { t.Fatalf("Unable to resolve path") } if d.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil { t.Fatalf("Unable to write startedAt file") } } func TestPurgeGather(t *testing.T) { uploadCount := 5 fs, ctx := testUploadFS(t, uploadCount, "test-repo", time.Now()) uploadData, errs := getOutstandingUploads(ctx, fs) if len(errs) != 0 { t.Errorf("Unexepected errors: %q", errs) } if len(uploadData) != uploadCount { t.Errorf("Unexpected upload file count: %d != %d", uploadCount, len(uploadData)) } } func TestPurgeNone(t *testing.T) { fs, ctx := testUploadFS(t, 10, "test-repo", time.Now()) oneHourAgo := time.Now().Add(-1 * time.Hour) deleted, errs := PurgeUploads(ctx, fs, oneHourAgo, true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } if len(deleted) != 0 { t.Errorf("Unexpectedly deleted files for time: %s", oneHourAgo) } } func TestPurgeAll(t *testing.T) { uploadCount := 10 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, uploadCount, "test-repo", oneHourAgo) // Ensure > 1 repos are purged addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo2", oneHourAgo) uploadCount++ deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } fileCount := uploadCount if len(deleted) != fileCount { t.Errorf("Unexpectedly deleted file count %d != %d", len(deleted), fileCount) } } func TestPurgeSome(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, oldUploadCount, "library/test-repo", oneHourAgo) newUploadCount := 4 for i := 0; i < newUploadCount; i++ { addUploads(ctx, t, fs, uuid.Generate().String(), "test-repo", time.Now().Add(1*time.Hour)) } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors:", errs) } if len(deleted) != oldUploadCount { t.Errorf("Unexpectedly deleted file count %d != %d", len(deleted), oldUploadCount) } } func TestPurgeOnlyUploads(t *testing.T) { oldUploadCount := 5 oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, oldUploadCount, "test-repo", oneHourAgo) // Create a directory tree outside _uploads and ensure // these files aren't deleted. dataPath, err := pathFor(uploadDataPathSpec{name: "test-repo", id: uuid.Generate().String()}) if err != nil { t.Fatalf(err.Error()) } nonUploadPath := strings.Replace(dataPath, "_upload", "_important", -1) if strings.Index(nonUploadPath, "_upload") != -1 { t.Fatalf("Non-upload path not created correctly") } nonUploadFile := path.Join(nonUploadPath, "file") if err = fs.PutContent(ctx, nonUploadFile, []byte("")); err != nil { t.Fatalf("Unable to write data file") } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) != 0 { t.Error("Unexpected errors", errs) } for _, file := range deleted { if strings.Index(file, "_upload") == -1 { t.Errorf("Non-upload file deleted") } } } func TestPurgeMissingStartedAt(t *testing.T) { oneHourAgo := time.Now().Add(-1 * time.Hour) fs, ctx := testUploadFS(t, 1, "test-repo", oneHourAgo) err := Walk(ctx, fs, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() _, file := path.Split(filePath) if file == "startedat" { if err := fs.Delete(ctx, filePath); err != nil { t.Fatalf("Unable to delete startedat file: %s", filePath) } } return nil }) if err != nil { t.Fatalf("Unexpected error during Walk: %s ", err.Error()) } deleted, errs := PurgeUploads(ctx, fs, time.Now(), true) if len(errs) > 0 { t.Errorf("Unexpected errors") } if len(deleted) > 0 { t.Errorf("Files unexpectedly deleted: %s", deleted) } } distribution-2.3.0/registry/storage/registry.go000066400000000000000000000165211265472114500217620ustar00rootroot00000000000000package storage import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache" storagedriver "github.com/docker/distribution/registry/storage/driver" ) // registry is the top-level implementation of Registry for use in the storage // package. All instances should descend from this object. type registry struct { blobStore *blobStore blobServer *blobServer statter *blobStatter // global statter service. blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider deleteEnabled bool resumableDigestEnabled bool } // RegistryOption is the type used for functional options for NewRegistry. type RegistryOption func(*registry) error // EnableRedirect is a functional option for NewRegistry. It causes the backend // blob server to attempt using (StorageDriver).URLFor to serve all blobs. func EnableRedirect(registry *registry) error { registry.blobServer.redirect = true return nil } // EnableDelete is a functional option for NewRegistry. It enables deletion on // the registry. func EnableDelete(registry *registry) error { registry.deleteEnabled = true return nil } // DisableDigestResumption is a functional option for NewRegistry. It should be // used if the registry is acting as a caching proxy. func DisableDigestResumption(registry *registry) error { registry.resumableDigestEnabled = false return nil } // BlobDescriptorCacheProvider returns a functional option for // NewRegistry. It creates a cached blob statter for use by the // registry. func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption { // TODO(aaronl): The duplication of statter across several objects is // ugly, and prevents us from using interface types in the registry // struct. Ideally, blobStore and blobServer should be lazily // initialized, and use the current value of // blobDescriptorCacheProvider. return func(registry *registry) error { if blobDescriptorCacheProvider != nil { statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter) registry.blobStore.statter = statter registry.blobServer.statter = statter registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider } return nil } } // NewRegistry creates a new registry instance from the provided driver. The // resulting registry may be shared by multiple goroutines but is cheap to // allocate. If the Redirect option is specified, the backend blob server will // attempt to use (StorageDriver).URLFor to serve all blobs. func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) { // create global statter statter := &blobStatter{ driver: driver, } bs := &blobStore{ driver: driver, statter: statter, } registry := ®istry{ blobStore: bs, blobServer: &blobServer{ driver: driver, statter: statter, pathFn: bs.path, }, statter: statter, resumableDigestEnabled: true, } for _, option := range options { if err := option(registry); err != nil { return nil, err } } return registry, nil } // Scope returns the namespace scope for a registry. The registry // will only serve repositories contained within this scope. func (reg *registry) Scope() distribution.Scope { return distribution.GlobalScope } // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) if err != nil { return nil, err } } return &repository{ ctx: ctx, registry: reg, name: canonicalName, descriptorCache: descriptorCache, }, nil } // repository provides name-scoped access to various services. type repository struct { *registry ctx context.Context name reference.Named descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. func (repo *repository) Name() reference.Named { return repo.name } func (repo *repository) Tags(ctx context.Context) distribution.TagService { tags := &tagStore{ repository: repo, blobStore: repo.registry.blobStore, } return tags } // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { manifestLinkPathFns := []linkPathFunc{ // NOTE(stevvooe): Need to search through multiple locations since // 2.1.0 unintentionally linked into _layers. manifestRevisionLinkPath, blobLinkPath, } blobStore := &linkedBlobStore{ ctx: ctx, blobStore: repo.blobStore, repository: repo, deleteEnabled: repo.registry.deleteEnabled, blobAccessController: &linkedBlobStatter{ blobStore: repo.blobStore, repository: repo, linkPathFns: manifestLinkPathFns, }, // TODO(stevvooe): linkPath limits this blob store to only // manifests. This instance cannot be used for blob checks. linkPathFns: manifestLinkPathFns, } ms := &manifestStore{ ctx: ctx, repository: repo, blobStore: blobStore, schema1Handler: &signedManifestHandler{ ctx: ctx, repository: repo, blobStore: blobStore, signatures: &signatureStore{ ctx: ctx, repository: repo, blobStore: repo.blobStore, }, }, schema2Handler: &schema2ManifestHandler{ ctx: ctx, repository: repo, blobStore: blobStore, }, manifestListHandler: &manifestListHandler{ ctx: ctx, repository: repo, blobStore: blobStore, }, } // Apply options for _, option := range options { err := option.Apply(ms) if err != nil { return nil, err } } return ms, nil } // Blobs returns an instance of the BlobStore. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { var statter distribution.BlobDescriptorService = &linkedBlobStatter{ blobStore: repo.blobStore, repository: repo, linkPathFns: []linkPathFunc{blobLinkPath}, } if repo.descriptorCache != nil { statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter) } return &linkedBlobStore{ registry: repo.registry, blobStore: repo.blobStore, blobServer: repo.blobServer, blobAccessController: statter, repository: repo, ctx: ctx, // TODO(stevvooe): linkPath limits this blob store to only layers. // This instance cannot be used for manifest checks. linkPathFns: []linkPathFunc{blobLinkPath}, deleteEnabled: repo.registry.deleteEnabled, resumableDigestEnabled: repo.resumableDigestEnabled, } } distribution-2.3.0/registry/storage/schema2manifesthandler.go000066400000000000000000000055121265472114500245170ustar00rootroot00000000000000package storage import ( "fmt" "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema2" ) //schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. type schema2ManifestHandler struct { repository *repository blobStore *linkedBlobStore ctx context.Context } var _ ManifestHandler = &schema2ManifestHandler{} func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") var m schema2.DeserializedManifest if err := json.Unmarshal(content, &m); err != nil { return nil, err } return &m, nil } func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") m, ok := manifest.(*schema2.DeserializedManifest) if !ok { return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) } if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { return "", err } mt, payload, err := m.Payload() if err != nil { return "", err } revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } // Link the revision into the repository. if err := ms.blobStore.linkBlob(ctx, revision); err != nil { return "", err } return revision.Digest, nil } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. As a policy, the registry only tries to store // valid content, leaving trust policies of that content up to consumers. func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification if !skipDependencyVerification { target := mnfst.Target() _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) } // On error here, we always append unknown blob errors. errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) } for _, fsLayer := range mnfst.References() { _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) } // On error here, we always append unknown blob errors. errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) } } } if len(errs) != 0 { return errs } return nil } distribution-2.3.0/registry/storage/signaturestore.go000066400000000000000000000060301265472114500231620ustar00rootroot00000000000000package storage import ( "path" "sync" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) type signatureStore struct { repository *repository blobStore *blobStore ctx context.Context } func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ name: s.repository.Name().Name(), revision: dgst, }) if err != nil { return nil, err } // Need to append signature digest algorithm to path to get all items. // Perhaps, this should be in the pathMapper but it feels awkward. This // can be eliminated by implementing listAll on drivers. signaturesPath = path.Join(signaturesPath, "sha256") signaturePaths, err := s.blobStore.driver.List(s.ctx, signaturesPath) if err != nil { return nil, err } var wg sync.WaitGroup type result struct { index int signature []byte err error } ch := make(chan result) bs := s.linkedBlobStore(s.ctx, dgst) for i, sigPath := range signaturePaths { sigdgst, err := digest.ParseDigest("sha256:" + path.Base(sigPath)) if err != nil { context.GetLogger(s.ctx).Errorf("could not get digest from path: %q, skipping", sigPath) continue } wg.Add(1) go func(idx int, sigdgst digest.Digest) { defer wg.Done() context.GetLogger(s.ctx). Debugf("fetching signature %q", sigdgst) r := result{index: idx} if p, err := bs.Get(s.ctx, sigdgst); err != nil { context.GetLogger(s.ctx). Errorf("error fetching signature %q: %v", sigdgst, err) r.err = err } else { r.signature = p } ch <- r }(i, sigdgst) } done := make(chan struct{}) go func() { wg.Wait() close(done) }() // aggregrate the results signatures := make([][]byte, len(signaturePaths)) loop: for { select { case result := <-ch: signatures[result.index] = result.signature if result.err != nil && err == nil { // only set the first one. err = result.err } case <-done: break loop } } return signatures, err } func (s *signatureStore) Put(dgst digest.Digest, signatures ...[]byte) error { bs := s.linkedBlobStore(s.ctx, dgst) for _, signature := range signatures { if _, err := bs.Put(s.ctx, "application/json", signature); err != nil { return err } } return nil } // linkedBlobStore returns the namedBlobStore of the signatures for the // manifest with the given digest. Effectively, each signature link path // layout is a unique linked blob store. func (s *signatureStore) linkedBlobStore(ctx context.Context, revision digest.Digest) *linkedBlobStore { linkpath := func(name string, dgst digest.Digest) (string, error) { return pathFor(manifestSignatureLinkPathSpec{ name: name, revision: revision, signature: dgst, }) } return &linkedBlobStore{ ctx: ctx, repository: s.repository, blobStore: s.blobStore, blobAccessController: &linkedBlobStatter{ blobStore: s.blobStore, repository: s.repository, linkPathFns: []linkPathFunc{linkpath}, }, linkPathFns: []linkPathFunc{linkpath}, } } distribution-2.3.0/registry/storage/signedmanifesthandler.go000066400000000000000000000105151265472114500244450ustar00rootroot00000000000000package storage import ( "encoding/json" "fmt" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/libtrust" ) // signedManifestHandler is a ManifestHandler that covers schema1 manifests. It // can unmarshal and put schema1 manifests that have been signed by libtrust. type signedManifestHandler struct { repository *repository blobStore *linkedBlobStore ctx context.Context signatures *signatureStore } var _ ManifestHandler = &signedManifestHandler{} func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") // Fetch the signatures for the manifest signatures, err := ms.signatures.Get(dgst) if err != nil { return nil, err } jsig, err := libtrust.NewJSONSignature(content, signatures...) if err != nil { return nil, err } // Extract the pretty JWS raw, err := jsig.PrettySignature("signatures") if err != nil { return nil, err } var sm schema1.SignedManifest if err := json.Unmarshal(raw, &sm); err != nil { return nil, err } return &sm, nil } func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") sm, ok := manifest.(*schema1.SignedManifest) if !ok { return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) } if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { return "", err } mt := schema1.MediaTypeManifest payload := sm.Canonical revision, err := ms.blobStore.Put(ctx, mt, payload) if err != nil { context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) return "", err } // Link the revision into the repository. if err := ms.blobStore.linkBlob(ctx, revision); err != nil { return "", err } // Grab each json signature and store them. signatures, err := sm.Signatures() if err != nil { return "", err } if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { return "", err } return revision.Digest, nil } // verifyManifest ensures that the manifest content is valid from the // perspective of the registry. It ensures that the signature is valid for the // enclosed payload. As a policy, the registry only tries to store valid // content, leaving trust policies of that content up to consumers. func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { var errs distribution.ErrManifestVerification if len(mnfst.Name) > reference.NameTotalLengthMax { errs = append(errs, distribution.ErrManifestNameInvalid{ Name: mnfst.Name, Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), }) } if !reference.NameRegexp.MatchString(mnfst.Name) { errs = append(errs, distribution.ErrManifestNameInvalid{ Name: mnfst.Name, Reason: fmt.Errorf("invalid manifest name format"), }) } if len(mnfst.History) != len(mnfst.FSLayers) { errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", len(mnfst.History), len(mnfst.FSLayers))) } if _, err := schema1.Verify(&mnfst); err != nil { switch err { case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: errs = append(errs, distribution.ErrManifestUnverified{}) default: if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust errs = append(errs, distribution.ErrManifestUnverified{}) } else { errs = append(errs, err) } } } if !skipDependencyVerification { for _, fsLayer := range mnfst.References() { _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) if err != nil { if err != distribution.ErrBlobUnknown { errs = append(errs, err) } // On error here, we always append unknown blob errors. errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) } } } if len(errs) != 0 { return errs } return nil } distribution-2.3.0/registry/storage/tagstore.go000066400000000000000000000115561265472114500217450ustar00rootroot00000000000000package storage import ( "path" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" storagedriver "github.com/docker/distribution/registry/storage/driver" ) var _ distribution.TagService = &tagStore{} // tagStore provides methods to manage manifest tags in a backend storage driver. // This implementation uses the same on-disk layout as the (now deleted) tag // store. This provides backward compatibility with current registry deployments // which only makes use of the Digest field of the returned distribution.Descriptor // but does not enable full roundtripping of Descriptor objects type tagStore struct { repository *repository blobStore *blobStore } // All returns all tags func (ts *tagStore) All(ctx context.Context) ([]string, error) { var tags []string pathSpec, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name().Name(), }) if err != nil { return tags, err } entries, err := ts.blobStore.driver.List(ctx, pathSpec) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()} default: return tags, err } } for _, entry := range entries { _, filename := path.Split(entry) tags = append(tags, filename) } return tags, nil } // exists returns true if the specified manifest tag exists in the repository. func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name().Name(), tag: tag, }) if err != nil { return false, err } exists, err := exists(ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } return exists, nil } // Tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name().Name(), tag: tag, }) if err != nil { return err } lbs := ts.linkedBlobStore(ctx, tag) // Link into the index if err := lbs.linkBlob(ctx, desc); err != nil { return err } // Overwrite the current link return ts.blobStore.link(ctx, currentPath, desc.Digest) } // resolve the current revision for name and tag. func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ name: ts.repository.Name().Name(), tag: tag, }) if err != nil { return distribution.Descriptor{}, err } revision, err := ts.blobStore.readlink(ctx, currentPath) if err != nil { switch err.(type) { case storagedriver.PathNotFoundError: return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} } return distribution.Descriptor{}, err } return distribution.Descriptor{Digest: revision}, nil } // Untag removes the tag association func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ name: ts.repository.Name().Name(), tag: tag, }) switch err.(type) { case storagedriver.PathNotFoundError: return distribution.ErrTagUnknown{Tag: tag} case nil: break default: return err } return ts.blobStore.driver.Delete(ctx, tagPath) } // linkedBlobStore returns the linkedBlobStore for the named tag, allowing one // to index manifest blobs by tag name. While the tag store doesn't map // precisely to the linked blob store, using this ensures the links are // managed via the same code path. func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore { return &linkedBlobStore{ blobStore: ts.blobStore, repository: ts.repository, ctx: ctx, linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) { return pathFor(manifestTagIndexEntryLinkPathSpec{ name: name, tag: tag, revision: dgst, }) }}, } } // Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by // digest, tag entries which point to it need to be recovered to avoid dangling tags. func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { allTags, err := ts.All(ctx) switch err.(type) { case distribution.ErrRepositoryUnknown: // This tag store has been initialized but not yet populated break case nil: break default: return nil, err } var tags []string for _, tag := range allTags { tagLinkPathSpec := manifestTagCurrentPathSpec{ name: ts.repository.Name().Name(), tag: tag, } tagLinkPath, err := pathFor(tagLinkPathSpec) tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) if err != nil { return nil, err } if tagDigest == desc.Digest { tags = append(tags, tag) } } return tags, nil } distribution-2.3.0/registry/storage/tagstore_test.go000066400000000000000000000100761265472114500230000ustar00rootroot00000000000000package storage import ( "testing" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver/inmemory" ) type tagsTestEnv struct { ts distribution.TagService ctx context.Context } func testTagStore(t *testing.T) *tagsTestEnv { ctx := context.Background() d := inmemory.New() reg, err := NewRegistry(ctx, d) if err != nil { t.Fatal(err) } repoRef, _ := reference.ParseNamed("a/b") repo, err := reg.Repository(ctx, repoRef) if err != nil { t.Fatal(err) } return &tagsTestEnv{ ctx: ctx, ts: repo.Tags(ctx), } } func TestTagStoreTag(t *testing.T) { env := testTagStore(t) tags := env.ts ctx := env.ctx d := distribution.Descriptor{} err := tags.Tag(ctx, "latest", d) if err == nil { t.Errorf("unexpected error putting malformed descriptor : %s", err) } d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" err = tags.Tag(ctx, "latest", d) if err != nil { t.Error(err) } d1, err := tags.Get(ctx, "latest") if err != nil { t.Error(err) } if d1.Digest != d.Digest { t.Error("put and get digest differ") } // Overwrite existing d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" err = tags.Tag(ctx, "latest", d) if err != nil { t.Error(err) } d1, err = tags.Get(ctx, "latest") if err != nil { t.Error(err) } if d1.Digest != d.Digest { t.Error("put and get digest differ") } } func TestTagStoreUnTag(t *testing.T) { env := testTagStore(t) tags := env.ts ctx := env.ctx desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} err := tags.Untag(ctx, "latest") if err == nil { t.Errorf("Expected error untagging non-existant tag") } err = tags.Tag(ctx, "latest", desc) if err != nil { t.Error(err) } err = tags.Untag(ctx, "latest") if err != nil { t.Error(err) } _, err = tags.Get(ctx, "latest") if err == nil { t.Error("Expected error getting untagged tag") } } func TestTagStoreAll(t *testing.T) { env := testTagStore(t) tagStore := env.ts ctx := env.ctx alpha := "abcdefghijklmnopqrstuvwxyz" for i := 0; i < len(alpha); i++ { tag := alpha[i] desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} err := tagStore.Tag(ctx, string(tag), desc) if err != nil { t.Error(err) } } all, err := tagStore.All(ctx) if err != nil { t.Error(err) } if len(all) != len(alpha) { t.Errorf("Unexpected count returned from enumerate") } for i, c := range all { if c != string(alpha[i]) { t.Errorf("unexpected tag in enumerate %s", c) } } removed := "a" err = tagStore.Untag(ctx, removed) if err != nil { t.Error(err) } all, err = tagStore.All(ctx) if err != nil { t.Error(err) } for _, tag := range all { if tag == removed { t.Errorf("unexpected tag in enumerate %s", removed) } } } func TestTagLookup(t *testing.T) { env := testTagStore(t) tagStore := env.ts ctx := env.ctx descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} tags, err := tagStore.Lookup(ctx, descA) if err != nil { t.Fatal(err) } if len(tags) != 0 { t.Fatalf("Lookup returned > 0 tags from empty store") } err = tagStore.Tag(ctx, "a", descA) if err != nil { t.Fatal(err) } err = tagStore.Tag(ctx, "b", descA) if err != nil { t.Fatal(err) } err = tagStore.Tag(ctx, "0", desc0) if err != nil { t.Fatal(err) } err = tagStore.Tag(ctx, "1", desc0) if err != nil { t.Fatal(err) } tags, err = tagStore.Lookup(ctx, descA) if err != nil { t.Fatal(err) } if len(tags) != 2 { t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) } tags, err = tagStore.Lookup(ctx, desc0) if err != nil { t.Fatal(err) } if len(tags) != 2 { t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) } } distribution-2.3.0/registry/storage/util.go000066400000000000000000000007711265472114500210670ustar00rootroot00000000000000package storage import ( "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) // Exists provides a utility method to test whether or not a path exists in // the given driver. func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) { if _, err := drv.Stat(ctx, path); err != nil { switch err := err.(type) { case driver.PathNotFoundError: return false, nil default: return false, err } } return true, nil } distribution-2.3.0/registry/storage/vacuum.go000066400000000000000000000027421265472114500214120ustar00rootroot00000000000000package storage import ( "path" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" "github.com/docker/distribution/registry/storage/driver" ) // vacuum contains functions for cleaning up repositories and blobs // These functions will only reliably work on strongly consistent // storage systems. // https://en.wikipedia.org/wiki/Consistency_model // NewVacuum creates a new Vacuum func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum { return Vacuum{ ctx: ctx, driver: driver, } } // Vacuum removes content from the filesystem type Vacuum struct { driver driver.StorageDriver ctx context.Context } // RemoveBlob removes a blob from the filesystem func (v Vacuum) RemoveBlob(dgst string) error { d, err := digest.ParseDigest(dgst) if err != nil { return err } blobPath, err := pathFor(blobDataPathSpec{digest: d}) if err != nil { return err } context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath) err = v.driver.Delete(v.ctx, blobPath) if err != nil { return err } return nil } // RemoveRepository removes a repository directory from the // filesystem func (v Vacuum) RemoveRepository(repoName string) error { rootForRepository, err := pathFor(repositoriesRootPathSpec{}) if err != nil { return err } repoDir := path.Join(rootForRepository, repoName) context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir) err = v.driver.Delete(v.ctx, repoDir) if err != nil { return err } return nil } distribution-2.3.0/registry/storage/walk.go000066400000000000000000000033771265472114500210550ustar00rootroot00000000000000package storage import ( "errors" "fmt" "sort" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" ) // ErrSkipDir is used as a return value from onFileFunc to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var ErrSkipDir = errors.New("skip this directory") // WalkFn is called once per file by Walk // If the returned error is ErrSkipDir and fileInfo refers // to a directory, the directory will not be entered and Walk // will continue the traversal. Otherwise Walk will return type WalkFn func(fileInfo storageDriver.FileInfo) error // Walk traverses a filesystem defined within driver, starting // from the given path, calling f on each file func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error { children, err := driver.List(ctx, from) if err != nil { return err } sort.Stable(sort.StringSlice(children)) for _, child := range children { // TODO(stevvooe): Calling driver.Stat for every entry is quite // expensive when running against backends with a slow Stat // implementation, such as s3. This is very likely a serious // performance bottleneck. fileInfo, err := driver.Stat(ctx, child) if err != nil { return err } err = f(fileInfo) skipDir := (err == ErrSkipDir) if err != nil && !skipDir { return err } if fileInfo.IsDir() && !skipDir { if err := Walk(ctx, driver, child, f); err != nil { return err } } } return nil } // pushError formats an error type given a path and an error // and pushes it to a slice of errors func pushError(errors []error, path string, err error) []error { return append(errors, fmt.Errorf("%s: %s", path, err)) } distribution-2.3.0/registry/storage/walk_test.go000066400000000000000000000065171265472114500221130ustar00rootroot00000000000000package storage import ( "fmt" "sort" "testing" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" ) func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { d := inmemory.New() ctx := context.Background() expected := map[string]string{ "/a": "dir", "/a/b": "dir", "/a/b/c": "dir", "/a/b/c/d": "file", "/a/b/c/e": "file", "/a/b/f": "dir", "/a/b/f/g": "file", "/a/b/f/h": "file", "/a/b/f/i": "file", "/z": "dir", "/z/y": "file", } for p, typ := range expected { if typ != "file" { continue } if err := d.PutContent(ctx, p, []byte(p)); err != nil { t.Fatalf("unable to put content into fixture: %v", err) } } return d, expected, ctx } func TestWalkErrors(t *testing.T) { d, expected, ctx := testFS(t) fileCount := len(expected) err := Walk(ctx, d, "", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { t.Error("Expected invalid root err") } errEarlyExpected := fmt.Errorf("Early termination") err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { // error on the 2nd file if fileInfo.Path() == "/a/b" { return errEarlyExpected } delete(expected, fileInfo.Path()) return nil }) if len(expected) != fileCount-1 { t.Error("Walk failed to terminate with error") } if err != errEarlyExpected { if err == nil { t.Fatalf("expected an error due to early termination") } else { t.Error(err.Error()) } } err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { return nil }) if err == nil { t.Errorf("Expected missing file err") } } func TestWalk(t *testing.T) { d, expected, ctx := testFS(t) var traversed []string err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() filetype, ok := expected[filePath] if !ok { t.Fatalf("Unexpected file in walk: %q", filePath) } if fileInfo.IsDir() { if filetype != "dir" { t.Errorf("Unexpected file type: %q", filePath) } } else { if filetype != "file" { t.Errorf("Unexpected file type: %q", filePath) } // each file has its own path as the contents. If the length // doesn't match the path length, fail. if fileInfo.Size() != int64(len(fileInfo.Path())) { t.Fatalf("unexpected size for %q: %v != %v", fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) } } delete(expected, filePath) traversed = append(traversed, filePath) return nil }) if len(expected) > 0 { t.Errorf("Missed files in walk: %q", expected) } if !sort.StringsAreSorted(traversed) { t.Errorf("result should be sorted: %v", traversed) } if err != nil { t.Fatalf(err.Error()) } } func TestWalkSkipDir(t *testing.T) { d, expected, ctx := testFS(t) err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() if filePath == "/a/b" { // skip processing /a/b/c and /a/b/c/d return ErrSkipDir } delete(expected, filePath) return nil }) if err != nil { t.Fatalf(err.Error()) } if _, ok := expected["/a/b/c"]; !ok { t.Errorf("/a/b/c not skipped") } if _, ok := expected["/a/b/c/d"]; !ok { t.Errorf("/a/b/c/d not skipped") } if _, ok := expected["/a/b/c/e"]; !ok { t.Errorf("/a/b/c/e not skipped") } } distribution-2.3.0/tags.go000066400000000000000000000017361265472114500155360ustar00rootroot00000000000000package distribution import ( "github.com/docker/distribution/context" ) // TagService provides access to information about tagged objects. type TagService interface { // Get retrieves the descriptor identified by the tag. Some // implementations may differentiate between "trusted" tags and // "untrusted" tags. If a tag is "untrusted", the mapping will be returned // as an ErrTagUntrusted error, with the target descriptor. Get(ctx context.Context, tag string) (Descriptor, error) // Tag associates the tag with the provided descriptor, updating the // current association, if needed. Tag(ctx context.Context, tag string, desc Descriptor) error // Untag removes the given tag association Untag(ctx context.Context, tag string) error // All returns the set of tags managed by this tag service All(ctx context.Context) ([]string, error) // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) } distribution-2.3.0/testutil/000077500000000000000000000000001265472114500161175ustar00rootroot00000000000000distribution-2.3.0/testutil/handler.go000066400000000000000000000071331265472114500200670ustar00rootroot00000000000000package testutil import ( "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "sort" "strings" ) // RequestResponseMap is an ordered mapping from Requests to Responses type RequestResponseMap []RequestResponseMapping // RequestResponseMapping defines a Response to be sent in response to a given // Request type RequestResponseMapping struct { Request Request Response Response } // Request is a simplified http.Request object type Request struct { // Method is the http method of the request, for example GET Method string // Route is the http route of this request Route string // QueryParams are the query parameters of this request QueryParams map[string][]string // Body is the byte contents of the http request Body []byte // Headers are the header for this request Headers http.Header } func (r Request) String() string { queryString := "" if len(r.QueryParams) > 0 { keys := make([]string, 0, len(r.QueryParams)) queryParts := make([]string, 0, len(r.QueryParams)) for k := range r.QueryParams { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { for _, val := range r.QueryParams[k] { queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val))) } } queryString = "?" + strings.Join(queryParts, "&") } var headers []string if len(r.Headers) > 0 { var headerKeys []string for k := range r.Headers { headerKeys = append(headerKeys, k) } sort.Strings(headerKeys) for _, k := range headerKeys { for _, val := range r.Headers[k] { headers = append(headers, fmt.Sprintf("%s:%s", k, val)) } } } return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body) } // Response is a simplified http.Response object type Response struct { // Statuscode is the http status code of the Response StatusCode int // Headers are the http headers of this Response Headers http.Header // Body is the response body Body []byte } // testHandler is an http.Handler with a defined mapping from Request to an // ordered list of Response objects type testHandler struct { responseMap map[string][]Response } // NewHandler returns a new test handler that responds to defined requests // with specified responses // Each time a Request is received, the next Response is returned in the // mapping, until no Responses are defined, at which point a 404 is sent back func NewHandler(requestResponseMap RequestResponseMap) http.Handler { responseMap := make(map[string][]Response) for _, mapping := range requestResponseMap { responses, ok := responseMap[mapping.Request.String()] if ok { responseMap[mapping.Request.String()] = append(responses, mapping.Response) } else { responseMap[mapping.Request.String()] = []Response{mapping.Response} } } return &testHandler{responseMap: responseMap} } func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() requestBody, _ := ioutil.ReadAll(r.Body) request := Request{ Method: r.Method, Route: r.URL.Path, QueryParams: r.URL.Query(), Body: requestBody, Headers: make(map[string][]string), } // Add headers of interest here for k, v := range r.Header { if k == "If-None-Match" { request.Headers[k] = v } } responses, ok := app.responseMap[request.String()] if !ok || len(responses) == 0 { http.NotFound(w, r) return } response := responses[0] app.responseMap[request.String()] = responses[1:] responseHeader := w.Header() for k, v := range response.Headers { responseHeader[k] = v } w.WriteHeader(response.StatusCode) io.Copy(w, bytes.NewReader(response.Body)) } distribution-2.3.0/testutil/tarfile.go000066400000000000000000000033151265472114500200760ustar00rootroot00000000000000package testutil import ( "archive/tar" "bytes" "crypto/rand" "fmt" "io" mrand "math/rand" "time" "github.com/docker/distribution/digest" ) // CreateRandomTarFile creates a random tarfile, returning it as an // io.ReadSeeker along with its digest. An error is returned if there is a // problem generating valid content. func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) { nFiles := mrand.Intn(10) + 10 target := &bytes.Buffer{} wr := tar.NewWriter(target) // Perturb this on each iteration of the loop below. header := &tar.Header{ Mode: 0644, ModTime: time.Now(), Typeflag: tar.TypeReg, Uname: "randocalrissian", Gname: "cloudcity", AccessTime: time.Now(), ChangeTime: time.Now(), } for fileNumber := 0; fileNumber < nFiles; fileNumber++ { fileSize := mrand.Int63n(1<<20) + 1<<20 header.Name = fmt.Sprint(fileNumber) header.Size = fileSize if err := wr.WriteHeader(header); err != nil { return nil, "", err } randomData := make([]byte, fileSize) // Fill up the buffer with some random data. n, err := rand.Read(randomData) if n != len(randomData) { return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData)) } if err != nil { return nil, "", err } nn, err := io.Copy(wr, bytes.NewReader(randomData)) if nn != fileSize { return nil, "", fmt.Errorf("short copy writing random file to tar") } if err != nil { return nil, "", err } if err := wr.Flush(); err != nil { return nil, "", err } } if err := wr.Close(); err != nil { return nil, "", err } dgst = digest.FromBytes(target.Bytes()) return bytes.NewReader(target.Bytes()), dgst, nil } distribution-2.3.0/uuid/000077500000000000000000000000001265472114500152105ustar00rootroot00000000000000distribution-2.3.0/uuid/uuid.go000066400000000000000000000060331265472114500165070ustar00rootroot00000000000000// Package uuid provides simple UUID generation. Only version 4 style UUIDs // can be generated. // // Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. package uuid import ( "crypto/rand" "fmt" "io" "os" "syscall" "time" ) const ( // Bits is the number of bits in a UUID Bits = 128 // Size is the number of bytes in a UUID Size = Bits / 8 format = "%08x-%04x-%04x-%04x-%012x" ) var ( // ErrUUIDInvalid indicates a parsed string is not a valid uuid. ErrUUIDInvalid = fmt.Errorf("invalid uuid") // Loggerf can be used to override the default logging destination. Such // log messages in this library should be logged at warning or higher. Loggerf = func(format string, args ...interface{}) {} ) // UUID represents a UUID value. UUIDs can be compared and set to other values // and accessed by byte. type UUID [Size]byte // Generate creates a new, version 4 uuid. func Generate() (u UUID) { const ( // ensures we backoff for less than 450ms total. Use the following to // select new value, in units of 10ms: // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 maxretries = 9 backoff = time.Millisecond * 10 ) var ( totalBackoff time.Duration count int retries int ) for { // This should never block but the read may fail. Because of this, // we just try to read the random number generator until we get // something. This is a very rare condition but may happen. b := time.Duration(retries) * backoff time.Sleep(b) totalBackoff += b n, err := io.ReadFull(rand.Reader, u[count:]) if err != nil { if retryOnError(err) && retries < maxretries { count += n retries++ Loggerf("error generating version 4 uuid, retrying: %v", err) continue } // Any other errors represent a system problem. What did someone // do to /dev/urandom? panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) } break } u[6] = (u[6] & 0x0f) | 0x40 // set version byte u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} return u } // Parse attempts to extract a uuid from the string or returns an error. func Parse(s string) (u UUID, err error) { if len(s) != 36 { return UUID{}, ErrUUIDInvalid } // create stack addresses for each section of the uuid. p := make([][]byte, 5) if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { return u, err } copy(u[0:4], p[0]) copy(u[4:6], p[1]) copy(u[6:8], p[2]) copy(u[8:10], p[3]) copy(u[10:16], p[4]) return } func (u UUID) String() string { return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) } // retryOnError tries to detect whether or not retrying would be fruitful. func retryOnError(err error) bool { switch err := err.(type) { case *os.PathError: return retryOnError(err.Err) // unpack the target error case syscall.Errno: if err == syscall.EPERM { // EPERM represents an entropy pool exhaustion, a condition under // which we backoff and retry. return true } } return false } distribution-2.3.0/uuid/uuid_test.go000066400000000000000000000020671265472114500175510ustar00rootroot00000000000000package uuid import ( "testing" ) const iterations = 1000 func TestUUID4Generation(t *testing.T) { for i := 0; i < iterations; i++ { u := Generate() if u[6]&0xf0 != 0x40 { t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) } if u[8]&0xc0 != 0x80 { t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) } } } func TestParseAndEquality(t *testing.T) { for i := 0; i < iterations; i++ { u := Generate() parsed, err := Parse(u.String()) if err != nil { t.Fatalf("error parsing uuid %v: %v", u, err) } if parsed != u { t.Fatalf("parsing round trip failed: %v != %v", parsed, u) } } for _, c := range []string{ "bad", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space "00000000-0000-0000-0000-x00000000000", // out of range character } { if _, err := Parse(c); err == nil { t.Fatalf("parsing %q should have failed", c) } } } distribution-2.3.0/version/000077500000000000000000000000001265472114500157275ustar00rootroot00000000000000distribution-2.3.0/version/print.go000066400000000000000000000011101265472114500174030ustar00rootroot00000000000000package version import ( "fmt" "io" "os" ) // FprintVersion outputs the version string to the writer, in the following // format, followed by a newline: // // // // For example, a binary "registry" built from github.com/docker/distribution // with version "v2.0" would print the following: // // registry github.com/docker/distribution v2.0 // func FprintVersion(w io.Writer) { fmt.Fprintln(w, os.Args[0], Package, Version) } // PrintVersion outputs the version information, from Fprint, to stdout. func PrintVersion() { FprintVersion(os.Stdout) } distribution-2.3.0/version/version.go000066400000000000000000000007351265472114500177500ustar00rootroot00000000000000package version // Package is the overall, canonical project import path under which the // package was built. var Package = "github.com/docker/distribution" // Version indicates which version of the binary is running. This is set to // the latest release tag by hand, always suffixed by "+unknown". During // build, it will be replaced by the actual version. The value here will be // used if the registry is run after a go get based install. var Version = "v2.3.0+unknown" distribution-2.3.0/version/version.sh000077500000000000000000000014571265472114500177620ustar00rootroot00000000000000#!/bin/sh # This bash script outputs the current, desired content of version.go, using # git describe. For best effect, pipe this to the target file. Generally, this # only needs to updated for releases. The actual value of will be replaced # during build time if the makefile is used. set -e cat <