notary-0.1/000077500000000000000000000000001262207326400126725ustar00rootroot00000000000000notary-0.1/.gitignore000066400000000000000000000002411262207326400146570ustar00rootroot00000000000000/cmd/notary-server/notary-server /cmd/notary-server/local.config.json /cmd/notary-signer/local.config.json cover bin cross .cover *.swp .idea *.iml coverage.out notary-0.1/CONTRIBUTING.md000066400000000000000000000077301262207326400151320ustar00rootroot00000000000000# Contributing to notary ## Before reporting an issue... ### If your problem is with... - automated builds - your account on the [Docker Hub](https://hub.docker.com/) - any other [Docker Hub](https://hub.docker.com/) issue Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) ### If you... - need help setting up notary - can't figure out something - are not sure what's going on or what your problem is Then please do not open an issue here yet - you should first try one of the following support forums: - irc: #docker-trust on freenode - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/trust ## Reporting an issue properly By following these simple rules you will get better and faster feedback on your issue. - search the bugtracker for an already reported issue ### If you found an issue that describes your problem: - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - please refrain from adding "same thing here" or "+1" comments - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - comment if you have some new, technical and relevant information to add to the case ### If you have not found an existing issue that describes your problem: 1. create a new issue, with a succinct title that describes your issue: - bad title: "It doesn't work with my docker" - good title: "Publish fail: 400 error with E_INVALID_DIGEST" 2. copy the output of: - `docker version` - `docker info` - `docker exec registry -version` 3. copy the command line you used to run `notary` or launch `notaryserver` 4. if relevant, copy your `notaryserver` logs that show the error ## Contributing a patch for a known bug, or a small correction You should follow the basic GitHub workflow: 1. fork 2. commit a change 3. make sure the tests pass 4. PR Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - sign your commits using `-s`: `git commit -s -m "My commit"` Some simple rules to ensure quick merge: - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - if you need to amend your PR following comments, please squash instead of adding more commits ## Contributing new features You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work Then you should submit your implementation, clearly linking to the issue (and possible proposal). Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. It's mandatory to: - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - address maintainers' comments and modify your submission accordingly - write tests for any new code Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. notary-0.1/CONTRIBUTORS000066400000000000000000000003631262207326400145540ustar00rootroot00000000000000David Williamson (github: davidwilliamson) Aaron Lehmann (github: aaronlehmann) Lewis Marshall (github: lmars) Jonathan Rudenberg (github: titanous) notary-0.1/Dockerfile000066400000000000000000000006511262207326400146660ustar00rootroot00000000000000FROM golang:1.5.1 RUN apt-get update && apt-get install -y \ libltdl-dev \ libsqlite3-dev \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* RUN go get golang.org/x/tools/cmd/vet \ && go get golang.org/x/tools/cmd/cover \ && go get github.com/tools/godep COPY . /go/src/github.com/docker/notary ENV GOPATH /go/src/github.com/docker/notary/Godeps/_workspace:$GOPATH WORKDIR /go/src/github.com/docker/notary notary-0.1/Dockerfile.server000066400000000000000000000011601262207326400161670ustar00rootroot00000000000000FROM golang:1.5.1 RUN apt-get update && apt-get install -y \ libltdl-dev \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* EXPOSE 4443 ENV NOTARYPKG github.com/docker/notary ENV GOPATH /go/src/${NOTARYPKG}/Godeps/_workspace:$GOPATH COPY . /go/src/github.com/docker/notary WORKDIR /go/src/${NOTARYPKG} RUN go install \ -tags pkcs11 \ -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ ${NOTARYPKG}/cmd/notary-server ENTRYPOINT [ "notary-server" ] CMD [ "-config", "cmd/notary-server/config.json" ] notary-0.1/Dockerfile.signer000066400000000000000000000023771262207326400161630ustar00rootroot00000000000000FROM dockersecurity/golang-softhsm2 MAINTAINER Diogo Monica "diogo@docker.com" # CHANGE-ME: Default values for SoftHSM2 PIN and SOPIN, used to initialize the first token ENV NOTARY_SIGNER_PIN="1234" ENV SOPIN="1234" ENV LIBDIR="/usr/local/lib/softhsm/" ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1" ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword" # Install openSC and dependencies RUN apt-get update && apt-get install -y \ libltdl-dev \ libpcsclite-dev \ opensc \ usbutils \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Initialize the SoftHSM2 token on slod 0, using PIN and SOPIN varaibles RUN softhsm2-util --init-token --slot 0 --label "test_token" --pin $NOTARY_SIGNER_PIN --so-pin $SOPIN ENV NOTARYPKG github.com/docker/notary ENV GOPATH /go/src/${NOTARYPKG}/Godeps/_workspace:$GOPATH EXPOSE 4443 # Copy the local repo to the expected go path COPY . /go/src/github.com/docker/notary WORKDIR /go/src/${NOTARYPKG} # Install notary-signer RUN go install \ -tags pkcs11 \ -ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \ ${NOTARYPKG}/cmd/notary-signer ENTRYPOINT [ "notary-signer" ] CMD [ "-config=cmd/notary-signer/config.json" ] notary-0.1/Godeps/000077500000000000000000000000001262207326400141135ustar00rootroot00000000000000notary-0.1/Godeps/Godeps.json000066400000000000000000000204421262207326400162310ustar00rootroot00000000000000{ "ImportPath": "github.com/docker/notary", "GoVersion": "go1.5.1", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "github.com/BurntSushi/toml", "Rev": "bd2bdf7f18f849530ef7a1c29a4290217cab32a1" }, { "ImportPath": "github.com/Sirupsen/logrus", "Comment": "v0.7.3", "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, { "ImportPath": "github.com/agl/ed25519", "Rev": "d2b94fd789ea21d12fac1a4443dd3a3f79cda72c" }, { "ImportPath": "github.com/beorn7/perks/quantile", "Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d" }, { "ImportPath": "github.com/bugsnag/bugsnag-go", "Comment": "v1.0.4-2-g13fd6b8", "Rev": "13fd6b8acda029830ef9904df6b63be0a83369d0" }, { "ImportPath": "github.com/bugsnag/osext", "Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702" }, { "ImportPath": "github.com/bugsnag/panicwrap", "Rev": "e5f9854865b9778a45169fc249e99e338d4d6f27" }, { "ImportPath": "github.com/cpuguy83/go-md2man/md2man", "Comment": "v1.0.4", "Rev": "71acacd42f85e5e82f70a55327789582a5200a90" }, { "ImportPath": "github.com/docker/distribution/context", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/digest", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/health", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/registry/api/errcode", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/registry/api/v2", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/registry/auth", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/registry/client/auth", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/registry/client/transport", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/distribution/uuid", "Comment": "v2.0.0-353-gfed58bd", "Rev": "fed58bd2d3c096055c0e69c2fb86c9a4965d1b8b" }, { "ImportPath": "github.com/docker/docker/pkg/tarsum", "Comment": "v1.7.1", "Rev": "786b29d4db80a6175e72b47a794ee044918ba734" }, { "ImportPath": "github.com/docker/docker/pkg/term", "Comment": "v1.7.1", "Rev": "786b29d4db80a6175e72b47a794ee044918ba734" }, { "ImportPath": "github.com/docker/libtrust", "Rev": "fa567046d9b14f6aa788882a950d69651d230b21" }, { "ImportPath": "github.com/dvsekhvalnov/jose2go", "Rev": "5307afb3bb6169b0f68cdf519a5964c843344441" }, { "ImportPath": "github.com/go-sql-driver/mysql", "Comment": "v1.2-97-g0cc29e9", "Rev": "0cc29e9fe8e25c2c58cf47bcab566e029bbaa88b" }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "3d2510a4dd961caffa2ae781669c628d82db700a" }, { "ImportPath": "github.com/google/gofuzz", "Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5" }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" }, { "ImportPath": "github.com/gorilla/mux", "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" }, { "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, { "ImportPath": "github.com/jfrazelle/go/canonical/json", "Comment": "v1.5.1-1-1-gbaf439e", "Rev": "baf439e6c161bd2106346fc8022b74ac2444e311" }, { "ImportPath": "github.com/jinzhu/gorm", "Rev": "82d726bbfd8cefbe2dcdc7f7f0484551c0d40433" }, { "ImportPath": "github.com/kr/pretty", "Comment": "go.weekly.2011-12-22-18-gbc9499c", "Rev": "bc9499caa0f45ee5edb2f0209fbd61fbf3d9018f" }, { "ImportPath": "github.com/kr/text", "Rev": "6807e777504f54ad073ecef66747de158294b639" }, { "ImportPath": "github.com/lib/pq/hstore", "Comment": "go1.0-cutoff-58-g0dad96c", "Rev": "0dad96c0b94f8dee039aa40467f767467392a0af" }, { "ImportPath": "github.com/magiconair/properties", "Comment": "v1.5.3", "Rev": "624009598839a9432bd97bb75552389422357723" }, { "ImportPath": "github.com/mattn/go-sqlite3", "Comment": "v1.0.0", "Rev": "b4142c444a8941d0d92b0b7103a24df9cd815e42" }, { "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "Rev": "d0c3fe89de86839aecf2e0579c40ba3bb336a453" }, { "ImportPath": "github.com/miekg/pkcs11", "Rev": "88c9f842544e629ec046105d7fb50d5daafae737" }, { "ImportPath": "github.com/mitchellh/go-homedir", "Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4" }, { "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "2caf8efc93669b6c43e0441cdc6aed17546c96f3" }, { "ImportPath": "github.com/olekukonko/tablewriter", "Rev": "a5eefc286b03d5560735698ef36c83728a6ae560" }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Comment": "0.7.0-53-g449ccef", "Rev": "449ccefff16c8e2b7229f6be1921ba22f62461fe" }, { "ImportPath": "github.com/prometheus/client_model/go", "Comment": "model-0.0.2-12-gfa8ad6f", "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" }, { "ImportPath": "github.com/prometheus/common/expfmt", "Rev": "4fdc91a58c9d3696b982e8a680f4997403132d44" }, { "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", "Rev": "4fdc91a58c9d3696b982e8a680f4997403132d44" }, { "ImportPath": "github.com/prometheus/common/model", "Rev": "4fdc91a58c9d3696b982e8a680f4997403132d44" }, { "ImportPath": "github.com/prometheus/procfs", "Rev": "b1afdc266f54247f5dc725544f5d351a8661f502" }, { "ImportPath": "github.com/russross/blackfriday", "Comment": "v1.3", "Rev": "8cec3a854e68dba10faabbe31c089abf4a3e57a6" }, { "ImportPath": "github.com/shurcooL/sanitized_anchor_name", "Rev": "244f5ac324cb97e1987ef901a0081a77bfd8e845" }, { "ImportPath": "github.com/spf13/cast", "Rev": "4d07383ffe94b5e5a6fa3af9211374a4507a0184" }, { "ImportPath": "github.com/spf13/cobra", "Rev": "2e6a42892123dda608922f8af8ce85c3bff19575" }, { "ImportPath": "github.com/spf13/jwalterweatherman", "Rev": "3d60171a64319ef63c78bd45bd60e6eab1e75f8b" }, { "ImportPath": "github.com/spf13/pflag", "Rev": "08b1a584251b5b62f458943640fc8ebd4d50aaa5" }, { "ImportPath": "github.com/spf13/viper", "Rev": "be5ff3e4840cf692388bde7a057595a474ef379e" }, { "ImportPath": "github.com/stretchr/testify/assert", "Comment": "v1.0-17-g089c718", "Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff" }, { "ImportPath": "golang.org/x/crypto/bcrypt", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/blowfish", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/nacl/secretbox", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/pbkdf2", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/poly1305", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/salsa20/salsa", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/crypto/scrypt", "Rev": "bfc286917c5fcb7420d7e3092b50bbfd31b38a98" }, { "ImportPath": "golang.org/x/net/context", "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { "ImportPath": "golang.org/x/net/http2", "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { "ImportPath": "golang.org/x/net/internal/timeseries", "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { "ImportPath": "golang.org/x/net/trace", "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" }, { "ImportPath": "google.golang.org/grpc", "Rev": "3e7b7e58f491074e9577050058fb95d2351a60b0" }, { "ImportPath": "gopkg.in/yaml.v2", "Rev": "bef53efd0c76e49e6de55ead051f886bea7e9420" } ] } notary-0.1/Godeps/Readme000066400000000000000000000002101262207326400152240ustar00rootroot00000000000000This directory tree is generated automatically by godep. Please do not edit. See https://github.com/tools/godep for more information. notary-0.1/LICENSE000066400000000000000000000260551262207326400137070ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. notary-0.1/MAINTAINERS000066400000000000000000000003701262207326400143670ustar00rootroot00000000000000David Lawrence (@endophage) Ying Li (@cyli) Nathan McCauley (@NathanMcCauley) Derek McGowan (@dmcgowan) Diogo Monica (@diogomonica) notary-0.1/Makefile000066400000000000000000000121171262207326400143340ustar00rootroot00000000000000# Set an output prefix, which is the local directory if not specified PREFIX?=$(shell pwd) # Populate version variables # Add to compile time flags NOTARY_PKG := github.com/docker/notary NOTARY_VERSION := $(shell cat NOTARY_VERSION) GITCOMMIT := $(shell git rev-parse --short HEAD) GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no) ifneq ($(GITUNTRACKEDCHANGES),) GITCOMMIT := $(GITCOMMIT)-dirty endif CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION) GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)" GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static" GOOSES = darwin freebsd linux GOARCHS = amd64 NOTARY_BUILDTAGS ?= pkcs11 GO_EXC = go NOTARYDIR := /go/src/github.com/docker/notary # check to be sure pkcs11 lib is always imported with a build tag GO_LIST_PKCS11 := $(shell go list -e -f '{{join .Deps "\n"}}' ./... | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -q pkcs11) ifeq ($(GO_LIST_PKCS11),) $(info pkcs11 import was not found anywhere without a build tag, yay) else $(error You are importing pkcs11 somewhere and not using a build tag) endif _empty := _space := $(empty) $(empty) # go cover test variables COVERDIR=.cover COVERPROFILE=$(COVERDIR)/cover.out COVERMODE=count PKGS = $(shell go list ./... | tr '\n' ' ') GO_VERSION = $(shell go version | awk '{print $$3}') .PHONY: clean all fmt vet lint build test binaries cross cover docker-images notary-dockerfile .DELETE_ON_ERROR: cover .DEFAULT: default go_version: ifneq ("$(GO_VERSION)", "go1.5.1") $(error Requires go version 1.5.1 - found $(GO_VERSION)) else @echo endif all: AUTHORS clean fmt vet fmt lint build test binaries AUTHORS: .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ # This only needs to be generated by hand when cutting full releases. version/version.go: ./version/version.sh > $@ ${PREFIX}/bin/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go') @echo "+ $@" @godep go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-server ${PREFIX}/bin/notary: NOTARY_VERSION $(shell find . -type f -name '*.go') @echo "+ $@" @godep go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary ${PREFIX}/bin/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go') @echo "+ $@" @godep go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-signer vet: go_version @echo "+ $@" @test -z "$$(go tool vet -printf=false . 2>&1 | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" fmt: @echo "+ $@" @test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" lint: @echo "+ $@" @test -z "$$(golint ./... | grep -v .pb. | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" build: go_version @echo "+ $@" @go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} ./... test: TESTOPTS = test: go_version @echo "+ $@ $(TESTOPTS)" go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) ./... test-full: vet lint @echo "+ $@" go test -tags "${NOTARY_BUILDTAGS}" -v ./... protos: @protoc --go_out=plugins=grpc:. proto/*.proto # This allows coverage for a package to come from tests in different package. # Requires that the following: # go get github.com/wadey/gocovmerge; go install github.com/wadey/gocovmerge # # be run first define gocover $(GO_EXC) test $(OPTS) $(TESTOPTS) -covermode="$(COVERMODE)" -coverprofile="$(COVERDIR)/$(subst /,-,$(1)).$(subst $(_space),.,$(NOTARY_BUILDTAGS)).cover" "$(1)" || exit 1; endef gen-cover: go_version @mkdir -p "$(COVERDIR)" $(foreach PKG,$(PKGS),$(call gocover,$(PKG))) cover: GO_EXC := go OPTS = -tags "${NOTARY_BUILDTAGS}" -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))" cover: gen-cover covmerge @go tool cover -html="$(COVERPROFILE)" # Codecov knows how to merge multiple coverage files ci: OPTS = -tags "${NOTARY_BUILDTAGS}" -race -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))" GO_EXC := godep go ci: gen-cover covmerge: @gocovmerge $(shell ls -1 $(COVERDIR)/* | tr "\n" " ") > $(COVERPROFILE) @go tool cover -func="$(COVERPROFILE)" clean-protos: @rm proto/*.pb.go binaries: go_version ${PREFIX}/bin/notary-server ${PREFIX}/bin/notary ${PREFIX}/bin/notary-signer @echo "+ $@" define template mkdir -p ${PREFIX}/cross/$(1)/$(2); GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build -o ${PREFIX}/cross/$(1)/$(2)/notary -a -tags "static_build netgo" -installsuffix netgo ${GO_LDFLAGS_STATIC} ./cmd/notary; endef cross: go_version $(foreach GOARCH,$(GOARCHS),$(foreach GOOS,$(GOOSES),$(call template,$(GOOS),$(GOARCH)))) notary-dockerfile: @docker build --rm --force-rm -t notary . server-dockerfile: @docker build --rm --force-rm -f Dockerfile.server -t notary-server . signer-dockerfile: @docker build --rm --force-rm -f Dockerfile.signer -t notary-signer . docker-images: notary-dockerfile server-dockerfile signer-dockerfile shell: notary-dockerfile docker run --rm -it -v $(CURDIR)/cross:$(NOTARYDIR)/cross -v $(CURDIR)/bin:$(NOTARYDIR)/bin notary bash clean: @echo "+ $@" @rm -rf "$(COVERDIR)" @rm -rf "${PREFIX}/bin/notary-server" "${PREFIX}/bin/notary" "${PREFIX}/bin/notary-signer" notary-0.1/NOTARY_VERSION000066400000000000000000000000101262207326400150250ustar00rootroot000000000000001.0-rc1 notary-0.1/README.md000066400000000000000000000217171262207326400141610ustar00rootroot00000000000000# Notary [![Circle CI](https://circleci.com/gh/docker/notary/tree/master.svg?style=shield)](https://circleci.com/gh/docker/notary/tree/master) The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting with trusted collections. Notary aims to make the internet more secure by making it easy for people to publish and verify content. We often rely on TLS to secure our communications with a web server which is inherently flawed, as any compromise of the server enables malicious content to be substituted for the legitimate content. With Notary, publishers can sign their content offline using keys kept highly secure. Once the publisher is ready to make the content available, they can push their signed trusted collection to a Notary Server. Consumers, having acquired the publisher's public key through a secure channel, can then communicate with any notary server or (insecure) mirror, relying only on the publisher's key to determine the validity and integrity of the received content. ## Goals Notary is based on [The Update Framework](http://theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, notary achieves a number of key advantages: * **Survivable Key Compromise**: Content publishers must manage keys in order to sign their content. Signing keys may be compromised or lost so systems must be designed in order to be flexible and recoverable in the case of key compromise. TUF's notion of key roles is utilized to separate responsibilities across a hierarchy of keys such that loss of any particular key (except the root role) by itself is not fatal to the security of the system. * **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users. * **Configurable Trust Thresholds**: Oftentimes there are a large number of publishers that are allowed to publish a particular piece of content. For example, open source projects where there are a number of core maintainers. Trust thresholds can be used so that content consumers require a configurable number of signatures on a piece of content in order to trust it. Using thresholds increases security so that loss of individual signing keys doesn't allow publishing of malicious content. * **Signing Delegation**: To allow for flexible publishing of trusted collections, a content publisher can delegate part of their collection to another signer. This delegation is represented as signed metadata so that a consumer of the content can verify both the content and the delegation. * **Use of Existing Distribution**: Notary's trust guarantees are not tied at all to particular distribution channels from which content is delivered. Therefore, trust can be added to any existing content delivery mechanism. * **Untrusted Mirrors and Transport**: All of the notary metadata can be mirrored and distributed via arbitrary channels. # Notary CLI Notary is a tool for publishing and managing trusted collections of content. Publishers can digitally sign collections and consumers can verify integrity and origin of content. This ability is built on a straightforward key management and signing interface to create signed collections and configure trusted publishers. ## Using Notary Lets try using notary. Prerequisites: - Requirements from the [Compiling Notary Server](#compiling-notary-server) section (such as go 1.5.1) - [docker and docker-compose](http://docs.docker.com/compose/install/) - [Notary server configuration](#configuring-notary-server) As setup, let's build notary and then start up a local notary-server (don't forget to add `127.0.0.1 notary-server` to your `/etc/hosts`, or if using docker-machine, add `$(docker-machine ip) notary-server`). ```sh make binaries docker-compose build docker-compose up -d ``` Note: In order to have notary use the local notary server and development root CA we can load the local development configuration by appending `-c cmd/notary/config.json` to every command. If you would rather not have to use `-c` on every command, copy `cmd/notary/config.json and cmd/notary/root-ca.crt` to `~/.notary`. First, let's initiate a notary collection called `example.com/scripts` ```sh notary init example.com/scripts ``` Now, look at the keys you created as a result of initialization ```sh notary key list ``` Cool, now add a local file `install.sh` and call it `v1` ```sh notary add example.com/scripts v1 install.sh ``` Wouldn't it be nice if others could know that you've signed this content? Use `publish` to publish your collection to your default notary-server ```sh notary publish example.com/scripts ``` Now, others can pull your trusted collection ```sh notary list example.com/scripts ``` More importantly, they can verify the content of your script by using `notary verify`: ```sh curl example.com/install.sh | notary verify example.com/scripts v1 | sh ``` # Notary Server Notary Server manages TUF data over an HTTP API compatible with the [notary client](../notary/). It may be configured to use either JWT or HTTP Basic Auth for authentication. Currently it only supports MySQL for storage of the TUF data, we intend to expand this to other storage options. ## Setup for Development The notary repository comes with Dockerfiles and a docker-compose file to facilitate development. Simply run the following commands to start a notary server with a temporary MySQL database in containers: ``` $ docker-compose build $ docker-compose up ``` If you are on Mac OSX with boot2docker or kitematic, you'll need to update your hosts file such that the name `notary` is associated with the IP address of your VM (for boot2docker, this can be determined by running `boot2docker ip`, with kitematic, `echo $DOCKER_HOST` should show the IP of the VM). If you are using the default Linux setup, you need to add `127.0.0.1 notary` to your hosts file. ## Successfully connecting over TLS By default notary-server runs with TLS with certificates signed by a local CA. In order to be able to successfully connect to it using either `curl` or `openssl`, you will have to use the root CA file in `fixtures/root-ca.crt`. OpenSSL example: `openssl s_client -connect notary-server:4443 -CAfile fixtures/root-ca.crt` ## Compiling Notary Server Prerequisites: - Go = 1.5.1 - [godep](https://github.com/tools/godep) installed - libtool development headers installed Install dependencies by running `godep restore`. From the root of this git repository, run `make binaries`. This will compile the `notary`, `notary-server`, and `notary-signer` applications and place them in a `bin` directory at the root of the git repository (the `bin` directory is ignored by the .gitignore file). `notary-signer` depends upon `pkcs11`, which requires that libtool headers be installed (`libtool-dev` on Ubuntu, `libtool-ltdl-devel` on CentOS/RedHat). If you are using Mac OS, you can `brew install libtool`, and run `make binaries` with the following environment variables (assuming a standard installation of Homebrew): ```sh export CPATH=/usr/local/include:${CPATH} export LIBRARY_PATH=/usr/local/lib:${LIBRARY_PATH} ``` ## Running Notary Server The `notary-server` application has the following usage: ``` $ bin/notary-server --help usage: bin/notary-serve -config="": Path to configuration file -debug=false: Enable the debugging server on localhost:8080 ``` ## Configuring Notary Server The configuration file must be a json file with the following format: ```json { "server": { "addr": ":4443", "tls_cert_file": "./fixtures/notary-server.crt", "tls_key_file": "./fixtures/notary-server.key" }, "logging": { "level": 5 } } ``` The pem and key provided in fixtures are purely for local development and testing. For production, you must create your own keypair and certificate, either via the CA of your choice, or a self signed certificate. If using the pem and key provided in fixtures, either: - Add `fixtures/root-ca.crt` to your trusted root certificates - Use the default configuration for notary client that loads the CA root for you by using the flag `-c ./cmd/notary/config.json` - Disable TLS verification by adding the following option notary configuration file in `~/.notary/config.json`: "skipTLSVerify": true Otherwise, you will see TLS errors or X509 errors upon initializing the notary collection: ``` $ notary list diogomonica.com/openvpn * fatal: Get https://notary-server:4443/v2/: x509: certificate signed by unknown authority $ notary list diogomonica.com/openvpn -c cmd/notary/config.json latest b1df2ad7cbc19f06f08b69b4bcd817649b509f3e5420cdd2245a85144288e26d 4056 ``` notary-0.1/ROADMAP.md000066400000000000000000000006421262207326400143010ustar00rootroot00000000000000# Roadmap The Trust project consists of a number of moving parts of which Notary Server is one. Notary Server is the front line metadata service that clients interact with. It manages TUF metadata and interacts with a pluggable signing service to issue new TUF timestamp files. The Notary-signer is provided as our reference implementation of a signing service. It supports HSMs along with Ed25519 software signing. notary-0.1/circle.yml000066400000000000000000000040501262207326400146550ustar00rootroot00000000000000# Pony-up! machine: pre: # Install gvm - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) post: # Install many go versions - gvm install go1.5.1 -B --name=stable environment: # Convenient shortcuts to "common" locations CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME BASE_DIR: src/github.com/docker/notary # Trick circle brainflat "no absolute path" behavior BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" hosts: # Not used yet fancy: 127.0.0.1 dependencies: pre: # Copy the code to the gopath of all go versions - > gvm use stable && mkdir -p "$(dirname $BASE_STABLE)" && cp -R "$CHECKOUT" "$BASE_STABLE" override: # Install dependencies for every copied clone/go version - gvm use stable && go get github.com/tools/godep: pwd: $BASE_STABLE post: # For the stable go version, additionally install linting tools - > gvm use stable && go get github.com/golang/lint/golint github.com/wadey/gocovmerge && go install github.com/wadey/gocovmerge test: pre: # Output the go versions we are going to test - gvm use stable && go version # CLEAN - gvm use stable && make clean: pwd: $BASE_STABLE # FMT - gvm use stable && make fmt: pwd: $BASE_STABLE # VET - gvm use stable && make vet: pwd: $BASE_STABLE # LINT - gvm use stable && make lint: pwd: $BASE_STABLE override: # Test stable, and report # hacking this to be parallel - case $CIRCLE_NODE_INDEX in 0) gvm use stable && NOTARY_BUILDTAGS=pkcs11 make ci ;; 1) gvm use stable && NOTARY_BUILDTAGS=none make ci ;; esac: parallel: true timeout: 600 pwd: $BASE_STABLE post: - gvm use stable && make covmerge: timeout: 600 pwd: $BASE_STABLE # Report to codecov.io # - bash <(curl -s https://codecov.io/bash): # pwd: $BASE_STABLE notary-0.1/client/000077500000000000000000000000001262207326400141505ustar00rootroot00000000000000notary-0.1/client/changelist/000077500000000000000000000000001262207326400162715ustar00rootroot00000000000000notary-0.1/client/changelist/change.go000066400000000000000000000036431262207326400200530ustar00rootroot00000000000000package changelist import ( "github.com/docker/notary/tuf/data" ) // Scopes for TufChanges are simply the TUF roles. // Unfortunately because of targets delegations, we can only // cover the base roles. const ( ScopeRoot = "root" ScopeTargets = "targets" ScopeSnapshot = "snapshot" ScopeTimestamp = "timestamp" ) // Types for TufChanges are namespaced by the Role they // are relevant for. The Root and Targets roles are the // only ones for which user action can cause a change, as // all changes in Snapshot and Timestamp are programatically // generated base on Root and Targets changes. const ( TypeRootRole = "role" TypeTargetsTarget = "target" TypeTargetsDelegation = "delegation" ) // TufChange represents a change to a TUF repo type TufChange struct { // Abbreviated because Go doesn't permit a field and method of the same name Actn string `json:"action"` Role string `json:"role"` ChangeType string `json:"type"` ChangePath string `json:"path"` Data []byte `json:"data"` } // TufRootData represents a modification of the keys associated // with a role that appears in the root.json type TufRootData struct { Keys data.KeyList `json:"keys"` RoleName string `json:"role"` } // NewTufChange initializes a tufChange object func NewTufChange(action string, role, changeType, changePath string, content []byte) *TufChange { return &TufChange{ Actn: action, Role: role, ChangeType: changeType, ChangePath: changePath, Data: content, } } // Action return c.Actn func (c TufChange) Action() string { return c.Actn } // Scope returns c.Role func (c TufChange) Scope() string { return c.Role } // Type returns c.ChangeType func (c TufChange) Type() string { return c.ChangeType } // Path return c.ChangePath func (c TufChange) Path() string { return c.ChangePath } // Content returns c.Data func (c TufChange) Content() []byte { return c.Data } notary-0.1/client/changelist/changelist.go000066400000000000000000000027051262207326400207450ustar00rootroot00000000000000package changelist // memChangeList implements a simple in memory change list. type memChangelist struct { changes []Change } // NewMemChangelist instantiates a new in-memory changelist func NewMemChangelist() Changelist { return &memChangelist{} } // List returns a list of Changes func (cl memChangelist) List() []Change { return cl.changes } // Add adds a change to the in-memory change list func (cl *memChangelist) Add(c Change) error { cl.changes = append(cl.changes, c) return nil } // Clear empties the changelist file. func (cl *memChangelist) Clear(archive string) error { // appending to a nil list initializes it. cl.changes = nil return nil } // Close is a no-op in this in-memory change-list func (cl *memChangelist) Close() error { return nil } func (cl *memChangelist) NewIterator() (ChangeIterator, error) { return &MemChangeListIterator{index: 0, collection: cl.changes}, nil } // MemChangeListIterator is a concrete instance of ChangeIterator type MemChangeListIterator struct { index int collection []Change // Same type as memChangeList.changes } // Next returns the next Change func (m *MemChangeListIterator) Next() (item Change, err error) { if m.index >= len(m.collection) { return nil, IteratorBoundsError(m.index) } item = m.collection[m.index] m.index++ return item, err } // HasNext indicates whether the iterator is exhausted func (m *MemChangeListIterator) HasNext() bool { return m.index < len(m.collection) } notary-0.1/client/changelist/changelist_test.go000066400000000000000000000040551262207326400220040ustar00rootroot00000000000000package changelist import ( "testing" "github.com/stretchr/testify/assert" ) func TestMemChangelist(t *testing.T) { cl := memChangelist{} c := NewTufChange(ActionCreate, "targets", "target", "test/targ", []byte{1}) err := cl.Add(c) assert.Nil(t, err, "Non-nil error while adding change") cs := cl.List() assert.Equal(t, 1, len(cs), "List should have returned exactly one item") assert.Equal(t, c.Action(), cs[0].Action(), "Action mismatch") assert.Equal(t, c.Scope(), cs[0].Scope(), "Scope mismatch") assert.Equal(t, c.Type(), cs[0].Type(), "Type mismatch") assert.Equal(t, c.Path(), cs[0].Path(), "Path mismatch") assert.Equal(t, c.Content(), cs[0].Content(), "Content mismatch") err = cl.Clear("") assert.Nil(t, err, "Non-nil error while clearing") cs = cl.List() assert.Equal(t, 0, len(cs), "List should be empty") } func TestMemChangeIterator(t *testing.T) { cl := memChangelist{} it, err := cl.NewIterator() assert.Nil(t, err, "Non-nil error from NewIterator") assert.False(t, it.HasNext(), "HasNext returns false for empty ChangeList") c1 := NewTufChange(ActionCreate, "t1", "target1", "test/targ1", []byte{1}) cl.Add(c1) c2 := NewTufChange(ActionUpdate, "t2", "target2", "test/targ2", []byte{2}) cl.Add(c2) c3 := NewTufChange(ActionUpdate, "t3", "target3", "test/targ3", []byte{3}) cl.Add(c3) cs := cl.List() index := 0 it, _ = cl.NewIterator() for it.HasNext() { c, err := it.Next() assert.Nil(t, err, "Next err should be false") assert.Equal(t, c.Action(), cs[index].Action(), "Action mismatch") assert.Equal(t, c.Scope(), cs[index].Scope(), "Scope mismatch") assert.Equal(t, c.Type(), cs[index].Type(), "Type mismatch") assert.Equal(t, c.Path(), cs[index].Path(), "Path mismatch") assert.Equal(t, c.Content(), cs[index].Content(), "Content mismatch") index++ } assert.Equal(t, index, len(cs), "Iterator produced all data in ChangeList") _, err = it.Next() assert.NotNil(t, err, "Next errors gracefully when exhausted") var iterError IteratorBoundsError assert.IsType(t, iterError, err, "IteratorBoundsError type") } notary-0.1/client/changelist/file_changelist.go000066400000000000000000000077271262207326400217550ustar00rootroot00000000000000package changelist import ( "encoding/json" "fmt" "io/ioutil" "os" "path" "sort" "time" "github.com/Sirupsen/logrus" "github.com/docker/distribution/uuid" ) // FileChangelist stores all the changes as files type FileChangelist struct { dir string } // NewFileChangelist is a convenience method for returning FileChangeLists func NewFileChangelist(dir string) (*FileChangelist, error) { logrus.Debug("Making dir path: ", dir) err := os.MkdirAll(dir, 0700) if err != nil { return nil, err } return &FileChangelist{dir: dir}, nil } // getFileNames reads directory, filtering out child directories func getFileNames(dirName string) ([]os.FileInfo, error) { var dirListing, fileInfos []os.FileInfo dir, err := os.Open(dirName) if err != nil { return fileInfos, err } defer dir.Close() dirListing, err = dir.Readdir(0) if err != nil { return fileInfos, err } for _, f := range dirListing { if f.IsDir() { continue } fileInfos = append(fileInfos, f) } return fileInfos, nil } // Read a JSON formatted file from disk; convert to TufChange struct func unmarshalFile(dirname string, f os.FileInfo) (*TufChange, error) { c := &TufChange{} raw, err := ioutil.ReadFile(path.Join(dirname, f.Name())) if err != nil { return c, err } err = json.Unmarshal(raw, c) if err != nil { return c, err } return c, nil } // List returns a list of sorted changes func (cl FileChangelist) List() []Change { var changes []Change fileInfos, err := getFileNames(cl.dir) if err != nil { return changes } sort.Sort(fileChanges(fileInfos)) for _, f := range fileInfos { c, err := unmarshalFile(cl.dir, f) if err != nil { logrus.Warn(err.Error()) continue } changes = append(changes, c) } return changes } // Add adds a change to the file change list func (cl FileChangelist) Add(c Change) error { cJSON, err := json.Marshal(c) if err != nil { return err } filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate()) return ioutil.WriteFile(path.Join(cl.dir, filename), cJSON, 0644) } // Clear clears the change list func (cl FileChangelist) Clear(archive string) error { dir, err := os.Open(cl.dir) if err != nil { return err } defer dir.Close() files, err := dir.Readdir(0) if err != nil { return err } for _, f := range files { os.Remove(path.Join(cl.dir, f.Name())) } return nil } // Close is a no-op func (cl FileChangelist) Close() error { // Nothing to do here return nil } // NewIterator creates an iterator from FileChangelist func (cl FileChangelist) NewIterator() (ChangeIterator, error) { fileInfos, err := getFileNames(cl.dir) if err != nil { return &FileChangeListIterator{}, err } sort.Sort(fileChanges(fileInfos)) return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil } // IteratorBoundsError is an Error type used by Next() type IteratorBoundsError int // Error implements the Error interface func (e IteratorBoundsError) Error() string { return fmt.Sprintf("Iterator index (%d) out of bounds", e) } // FileChangeListIterator is a concrete instance of ChangeIterator type FileChangeListIterator struct { index int dirname string collection []os.FileInfo } // Next returns the next Change in the FileChangeList func (m *FileChangeListIterator) Next() (item Change, err error) { if m.index >= len(m.collection) { return nil, IteratorBoundsError(m.index) } f := m.collection[m.index] m.index++ item, err = unmarshalFile(m.dirname, f) return } // HasNext indicates whether iterator is exhausted func (m *FileChangeListIterator) HasNext() bool { return m.index < len(m.collection) } type fileChanges []os.FileInfo // Len returns the length of a file change list func (cs fileChanges) Len() int { return len(cs) } // Less compares the names of two different file changes func (cs fileChanges) Less(i, j int) bool { return cs[i].Name() < cs[j].Name() } // Swap swaps the position of two file changes func (cs fileChanges) Swap(i, j int) { tmp := cs[i] cs[i] = cs[j] cs[j] = tmp } notary-0.1/client/changelist/file_changelist_test.go000066400000000000000000000126541262207326400230070ustar00rootroot00000000000000package changelist import ( "io/ioutil" "os" "path" "testing" "github.com/stretchr/testify/assert" ) func TestAdd(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "test") if err != nil { t.Fatal(err.Error()) } defer os.RemoveAll(tmpDir) cl, err := NewFileChangelist(tmpDir) assert.Nil(t, err, "Error initializing fileChangelist") c := NewTufChange(ActionCreate, "targets", "target", "test/targ", []byte{1}) err = cl.Add(c) assert.Nil(t, err, "Non-nil error while adding change") cs := cl.List() assert.Equal(t, 1, len(cs), "List should have returned exactly one item") assert.Equal(t, c.Action(), cs[0].Action(), "Action mismatch") assert.Equal(t, c.Scope(), cs[0].Scope(), "Scope mismatch") assert.Equal(t, c.Type(), cs[0].Type(), "Type mismatch") assert.Equal(t, c.Path(), cs[0].Path(), "Path mismatch") assert.Equal(t, c.Content(), cs[0].Content(), "Content mismatch") err = cl.Clear("") assert.Nil(t, err, "Non-nil error while clearing") cs = cl.List() assert.Equal(t, 0, len(cs), "List should be empty") err = os.Remove(tmpDir) // will error if anything left in dir assert.Nil(t, err, "Clear should have left the tmpDir empty") } func TestErrorConditions(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "test") if err != nil { t.Fatal(err.Error()) } defer os.RemoveAll(tmpDir) cl, err := NewFileChangelist(tmpDir) // Attempt to unmarshall a bad JSON file. Note: causes a WARN on the console. ioutil.WriteFile(path.Join(tmpDir, "broken_file.change"), []byte{5}, 0644) noItems := cl.List() assert.Len(t, noItems, 0, "List returns zero items on bad JSON file error") os.RemoveAll(tmpDir) err = cl.Clear("") assert.Error(t, err, "Clear on missing change list should return err") noItems = cl.List() assert.Len(t, noItems, 0, "List returns zero items on directory read error") } func TestListOrder(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "test") if err != nil { t.Fatal(err.Error()) } defer os.RemoveAll(tmpDir) cl, err := NewFileChangelist(tmpDir) assert.Nil(t, err, "Error initializing fileChangelist") c1 := NewTufChange(ActionCreate, "targets", "target", "test/targ1", []byte{1}) err = cl.Add(c1) assert.Nil(t, err, "Non-nil error while adding change") c2 := NewTufChange(ActionCreate, "targets", "target", "test/targ2", []byte{1}) err = cl.Add(c2) assert.Nil(t, err, "Non-nil error while adding change") cs := cl.List() assert.Equal(t, 2, len(cs), "List should have returned exactly one item") assert.Equal(t, c1.Action(), cs[0].Action(), "Action mismatch") assert.Equal(t, c1.Scope(), cs[0].Scope(), "Scope mismatch") assert.Equal(t, c1.Type(), cs[0].Type(), "Type mismatch") assert.Equal(t, c1.Path(), cs[0].Path(), "Path mismatch") assert.Equal(t, c1.Content(), cs[0].Content(), "Content mismatch") assert.Equal(t, c2.Action(), cs[1].Action(), "Action 2 mismatch") assert.Equal(t, c2.Scope(), cs[1].Scope(), "Scope 2 mismatch") assert.Equal(t, c2.Type(), cs[1].Type(), "Type 2 mismatch") assert.Equal(t, c2.Path(), cs[1].Path(), "Path 2 mismatch") assert.Equal(t, c2.Content(), cs[1].Content(), "Content 2 mismatch") } func TestFileChangeIterator(t *testing.T) { tmpDir, err := ioutil.TempDir("/tmp", "test") if err != nil { t.Fatal(err.Error()) } defer os.RemoveAll(tmpDir) cl, err := NewFileChangelist(tmpDir) assert.Nil(t, err, "Error initializing fileChangelist") it, err := cl.NewIterator() assert.Nil(t, err, "Error initializing iterator") assert.False(t, it.HasNext(), "HasNext returns false for empty ChangeList") c1 := NewTufChange(ActionCreate, "t1", "target1", "test/targ1", []byte{1}) cl.Add(c1) c2 := NewTufChange(ActionUpdate, "t2", "target2", "test/targ2", []byte{2}) cl.Add(c2) c3 := NewTufChange(ActionUpdate, "t3", "target3", "test/targ3", []byte{3}) cl.Add(c3) cs := cl.List() index := 0 it, err = cl.NewIterator() assert.Nil(t, err, "Error initializing iterator") for it.HasNext() { c, err := it.Next() assert.Nil(t, err, "Next err should be false") assert.Equal(t, c.Action(), cs[index].Action(), "Action mismatch") assert.Equal(t, c.Scope(), cs[index].Scope(), "Scope mismatch") assert.Equal(t, c.Type(), cs[index].Type(), "Type mismatch") assert.Equal(t, c.Path(), cs[index].Path(), "Path mismatch") assert.Equal(t, c.Content(), cs[index].Content(), "Content mismatch") index++ } assert.Equal(t, index, len(cs), "Iterator produced all data in ChangeList") // negative test case: index out of range _, err = it.Next() assert.Error(t, err, "Next errors gracefully when exhausted") var iterError IteratorBoundsError assert.IsType(t, iterError, err, "IteratorBoundsError type") assert.Regexp(t, "out of bounds", err, "Message for iterator bounds error") // negative test case: changelist files missing it, err = cl.NewIterator() assert.Nil(t, err, "Error initializing iterator") for it.HasNext() { cl.Clear("") _, err := it.Next() assert.Error(t, err, "Next() error for missing changelist files") } // negative test case: bad JSON file to unmarshall via Next() cl.Clear("") ioutil.WriteFile(path.Join(tmpDir, "broken_file.change"), []byte{5}, 0644) it, err = cl.NewIterator() assert.Nil(t, err, "Error initializing iterator") for it.HasNext() { _, err := it.Next() assert.Error(t, err, "Next should indicate error for bad JSON file") } // negative test case: changelist directory does not exist os.RemoveAll(tmpDir) it, err = cl.NewIterator() assert.Error(t, err, "Initializing iterator without underlying file store") } notary-0.1/client/changelist/interface.go000066400000000000000000000037011262207326400205610ustar00rootroot00000000000000package changelist // Changelist is the interface for all TUF change lists type Changelist interface { // List returns the ordered list of changes // currently stored List() []Change // Add change appends the provided change to // the list of changes Add(Change) error // Clear empties the current change list. // Archive may be provided as a directory path // to save a copy of the changelist in that location Clear(archive string) error // Close syncronizes any pending writes to the underlying // storage and closes the file/connection Close() error // NewIterator returns an iterator for walking through the list // of changes currently stored NewIterator() (ChangeIterator, error) } const ( // ActionCreate represents a Create action ActionCreate = "create" // ActionUpdate represents an Update action ActionUpdate = "update" // ActionDelete represents a Delete action ActionDelete = "delete" ) // Change is the interface for a TUF Change type Change interface { // "create","update", or "delete" Action() string // Where the change should be made. // For TUF this will be the role Scope() string // The content type being affected. // For TUF this will be "target", or "delegation". // If the type is "delegation", the Scope will be // used to determine if a root role is being updated // or a target delegation. Type() string // Path indicates the entry within a role to be affected by the // change. For targets, this is simply the target's path, // for delegations it's the delegated role name. Path() string // Serialized content that the interpreter of a changelist // can use to apply the change. // For TUF this will be the serialized JSON that needs // to be inserted or merged. In the case of a "delete" // action, it will be nil. Content() []byte } // ChangeIterator is the interface for iterating across collections of // TUF Change items type ChangeIterator interface { Next() (Change, error) HasNext() bool } notary-0.1/client/client.go000066400000000000000000000371131262207326400157620ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "os" "path/filepath" "github.com/Sirupsen/logrus" "github.com/docker/notary/client/changelist" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/keystoremanager" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf" tufclient "github.com/docker/notary/tuf/client" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/store" ) const ( maxSize = 5 << 20 ) func init() { data.SetDefaultExpiryTimes( map[string]int{ "root": 3650, "targets": 1095, "snapshot": 1095, }, ) } // ErrRepoNotInitialized is returned when trying to can publish on an uninitialized // notary repository type ErrRepoNotInitialized struct{} // ErrRepoNotInitialized is returned when trying to can publish on an uninitialized // notary repository func (err *ErrRepoNotInitialized) Error() string { return "Repository has not been initialized" } // ErrExpired is returned when the metadata for a role has expired type ErrExpired struct { signed.ErrExpired } const ( tufDir = "tuf" ) // ErrRepositoryNotExist gets returned when trying to make an action over a repository /// that doesn't exist. var ErrRepositoryNotExist = errors.New("repository does not exist") // NotaryRepository stores all the information needed to operate on a notary // repository. type NotaryRepository struct { baseDir string gun string baseURL string tufRepoPath string fileStore store.MetadataStore CryptoService signed.CryptoService tufRepo *tuf.Repo roundTrip http.RoundTripper KeyStoreManager *keystoremanager.KeyStoreManager } // Target represents a simplified version of the data TUF operates on, so external // applications don't have to depend on tuf data types. type Target struct { Name string Hashes data.Hashes Length int64 } // NewTarget is a helper method that returns a Target func NewTarget(targetName string, targetPath string) (*Target, error) { b, err := ioutil.ReadFile(targetPath) if err != nil { return nil, err } meta, err := data.NewFileMeta(bytes.NewBuffer(b)) if err != nil { return nil, err } return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length}, nil } // Initialize creates a new repository by using rootKey as the root Key for the // TUF repository. func (r *NotaryRepository) Initialize(rootKeyID string) error { privKey, _, err := r.CryptoService.GetPrivateKey(rootKeyID) if err != nil { return err } rootCert, err := cryptoservice.GenerateCertificate(privKey, r.gun) if err != nil { return err } r.KeyStoreManager.AddTrustedCert(rootCert) // The root key gets stored in the TUF metadata X509 encoded, linking // the tuf root.json to our X509 PKI. // If the key is RSA, we store it as type RSAx509, if it is ECDSA we store it // as ECDSAx509 to allow the gotuf verifiers to correctly decode the // key on verification of signatures. var rootKey data.PublicKey switch privKey.Algorithm() { case data.RSAKey: rootKey = data.NewRSAx509PublicKey(trustmanager.CertToPEM(rootCert)) case data.ECDSAKey: rootKey = data.NewECDSAx509PublicKey(trustmanager.CertToPEM(rootCert)) default: return fmt.Errorf("invalid format for root key: %s", privKey.Algorithm()) } // All the timestamp keys are generated by the remote server. remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) if err != nil { return err } rawTSKey, err := remote.GetKey("timestamp") if err != nil { return err } timestampKey, err := data.UnmarshalPublicKey(rawTSKey) if err != nil { return err } logrus.Debugf("got remote %s timestamp key with keyID: %s", timestampKey.Algorithm(), timestampKey.ID()) // This is currently hardcoding the targets and snapshots keys to ECDSA // Targets and snapshot keys are always generated locally. targetsKey, err := r.CryptoService.Create("targets", data.ECDSAKey) if err != nil { return err } snapshotKey, err := r.CryptoService.Create("snapshot", data.ECDSAKey) if err != nil { return err } kdb := keys.NewDB() kdb.AddKey(rootKey) kdb.AddKey(targetsKey) kdb.AddKey(snapshotKey) kdb.AddKey(timestampKey) err = initRoles(kdb, rootKey, targetsKey, snapshotKey, timestampKey) if err != nil { return err } r.tufRepo = tuf.NewRepo(kdb, r.CryptoService) err = r.tufRepo.InitRoot(false) if err != nil { logrus.Debug("Error on InitRoot: ", err.Error()) switch err.(type) { case signed.ErrInsufficientSignatures, trustmanager.ErrPasswordInvalid: default: return err } } err = r.tufRepo.InitTargets() if err != nil { logrus.Debug("Error on InitTargets: ", err.Error()) return err } err = r.tufRepo.InitSnapshot() if err != nil { logrus.Debug("Error on InitSnapshot: ", err.Error()) return err } return r.saveMetadata() } // AddTarget adds a new target to the repository, forcing a timestamps check from TUF func (r *NotaryRepository) AddTarget(target *Target) error { cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) if err != nil { return err } defer cl.Close() logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length) meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes} metaJSON, err := json.Marshal(meta) if err != nil { return err } c := changelist.NewTufChange(changelist.ActionCreate, changelist.ScopeTargets, "target", target.Name, metaJSON) err = cl.Add(c) if err != nil { return err } return nil } // RemoveTarget creates a new changelist entry to remove a target from the repository // when the changelist gets applied at publish time func (r *NotaryRepository) RemoveTarget(targetName string) error { cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) if err != nil { return err } logrus.Debugf("Removing target \"%s\"", targetName) c := changelist.NewTufChange(changelist.ActionDelete, changelist.ScopeTargets, "target", targetName, nil) err = cl.Add(c) if err != nil { return err } return nil } // ListTargets lists all targets for the current repository func (r *NotaryRepository) ListTargets() ([]*Target, error) { c, err := r.bootstrapClient() if err != nil { return nil, err } err = c.Update() if err != nil { if err, ok := err.(signed.ErrExpired); ok { return nil, ErrExpired{err} } return nil, err } var targetList []*Target for name, meta := range r.tufRepo.Targets["targets"].Signed.Targets { target := &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length} targetList = append(targetList, target) } return targetList, nil } // GetTargetByName returns a target given a name func (r *NotaryRepository) GetTargetByName(name string) (*Target, error) { c, err := r.bootstrapClient() if err != nil { return nil, err } err = c.Update() if err != nil { if err, ok := err.(signed.ErrExpired); ok { return nil, ErrExpired{err} } return nil, err } meta, err := c.TargetMeta(name) if meta == nil { return nil, fmt.Errorf("No trust data for %s", name) } else if err != nil { return nil, err } return &Target{Name: name, Hashes: meta.Hashes, Length: meta.Length}, nil } // GetChangelist returns the list of the repository's unpublished changes func (r *NotaryRepository) GetChangelist() (changelist.Changelist, error) { changelistDir := filepath.Join(r.tufRepoPath, "changelist") cl, err := changelist.NewFileChangelist(changelistDir) if err != nil { logrus.Debug("Error initializing changelist") return nil, err } return cl, nil } // Publish pushes the local changes in signed material to the remote notary-server // Conceptually it performs an operation similar to a `git rebase` func (r *NotaryRepository) Publish() error { var updateRoot bool var root *data.Signed // attempt to initialize the repo from the remote store c, err := r.bootstrapClient() if err != nil { if _, ok := err.(store.ErrMetaNotFound); ok { // if the remote store return a 404 (translated into ErrMetaNotFound), // the repo hasn't been initialized yet. Attempt to load it from disk. err := r.bootstrapRepo() if err != nil { // Repo hasn't been initialized, It must be initialized before // it can be published. Return an error and let caller determine // what it wants to do. logrus.Debug(err.Error()) logrus.Debug("Repository not initialized during Publish") return &ErrRepoNotInitialized{} } // We had local data but the server doesn't know about the repo yet, // ensure we will push the initial root file root, err = r.tufRepo.Root.ToSigned() if err != nil { return err } updateRoot = true } else { // The remote store returned an error other than 404. We're // unable to determine if the repo has been initialized or not. logrus.Error("Could not publish Repository: ", err.Error()) return err } } else { // If we were successfully able to bootstrap the client (which only pulls // root.json), update it the rest of the tuf metadata in preparation for // applying the changelist. err = c.Update() if err != nil { if err, ok := err.(signed.ErrExpired); ok { return ErrExpired{err} } return err } } cl, err := r.GetChangelist() if err != nil { return err } // apply the changelist to the repo err = applyChangelist(r.tufRepo, cl) if err != nil { logrus.Debug("Error applying changelist") return err } // check if our root file is nearing expiry. Resign if it is. if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty { if err != nil { return err } root, err = r.tufRepo.SignRoot(data.DefaultExpires("root")) if err != nil { return err } updateRoot = true } // we will always resign targets and snapshots targets, err := r.tufRepo.SignTargets("targets", data.DefaultExpires("targets")) if err != nil { return err } snapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot")) if err != nil { return err } remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) if err != nil { return err } // ensure we can marshal all the json before sending anything to remote targetsJSON, err := json.Marshal(targets) if err != nil { return err } snapshotJSON, err := json.Marshal(snapshot) if err != nil { return err } update := make(map[string][]byte) // if we need to update the root, marshal it and push the update to remote if updateRoot { rootJSON, err := json.Marshal(root) if err != nil { return err } update["root"] = rootJSON } update["targets"] = targetsJSON update["snapshot"] = snapshotJSON err = remote.SetMultiMeta(update) if err != nil { return err } err = cl.Clear("") if err != nil { // This is not a critical problem when only a single host is pushing // but will cause weird behaviour if changelist cleanup is failing // and there are multiple hosts writing to the repo. logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", filepath.Join(r.tufRepoPath, "changelist")) } return nil } func (r *NotaryRepository) bootstrapRepo() error { kdb := keys.NewDB() tufRepo := tuf.NewRepo(kdb, r.CryptoService) logrus.Debugf("Loading trusted collection.") rootJSON, err := r.fileStore.GetMeta("root", 0) if err != nil { return err } root := &data.SignedRoot{} err = json.Unmarshal(rootJSON, root) if err != nil { return err } err = tufRepo.SetRoot(root) if err != nil { return err } targetsJSON, err := r.fileStore.GetMeta("targets", 0) if err != nil { return err } targets := &data.SignedTargets{} err = json.Unmarshal(targetsJSON, targets) if err != nil { return err } tufRepo.SetTargets("targets", targets) snapshotJSON, err := r.fileStore.GetMeta("snapshot", 0) if err != nil { return err } snapshot := &data.SignedSnapshot{} err = json.Unmarshal(snapshotJSON, snapshot) if err != nil { return err } tufRepo.SetSnapshot(snapshot) r.tufRepo = tufRepo return nil } func (r *NotaryRepository) saveMetadata() error { logrus.Debugf("Saving changes to Trusted Collection.") signedRoot, err := r.tufRepo.SignRoot(data.DefaultExpires("root")) if err != nil { return err } rootJSON, err := json.Marshal(signedRoot) if err != nil { return err } targetsToSave := make(map[string][]byte) for t := range r.tufRepo.Targets { signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires("targets")) if err != nil { return err } targetsJSON, err := json.Marshal(signedTargets) if err != nil { return err } targetsToSave[t] = targetsJSON } signedSnapshot, err := r.tufRepo.SignSnapshot(data.DefaultExpires("snapshot")) if err != nil { return err } snapshotJSON, err := json.Marshal(signedSnapshot) if err != nil { return err } err = r.fileStore.SetMeta("root", rootJSON) if err != nil { return err } for role, blob := range targetsToSave { parentDir := filepath.Dir(role) os.MkdirAll(parentDir, 0755) r.fileStore.SetMeta(role, blob) } return r.fileStore.SetMeta("snapshot", snapshotJSON) } func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) { var rootJSON []byte remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip) if err == nil { // if remote store successfully set up, try and get root from remote rootJSON, err = remote.GetMeta("root", maxSize) } // if remote store couldn't be setup, or we failed to get a root from it // load the root from cache (offline operation) if err != nil { if err, ok := err.(store.ErrMetaNotFound); ok { // if the error was MetaNotFound then we successfully contacted // the store and it doesn't know about the repo. return nil, err } result, cacheErr := r.fileStore.GetMeta("root", maxSize) if cacheErr != nil { // if cache didn't return a root, we cannot proceed - just return // the original error. return nil, err } rootJSON = result logrus.Debugf( "Using local cache instead of remote due to failure: %s", err.Error()) } // can't just unmarshal into SignedRoot because validate root // needs the root.Signed field to still be []byte for signature // validation root := &data.Signed{} err = json.Unmarshal(rootJSON, root) if err != nil { return nil, err } err = r.KeyStoreManager.ValidateRoot(root, r.gun) if err != nil { return nil, err } kdb := keys.NewDB() r.tufRepo = tuf.NewRepo(kdb, r.CryptoService) signedRoot, err := data.RootFromSigned(root) if err != nil { return nil, err } err = r.tufRepo.SetRoot(signedRoot) if err != nil { return nil, err } return tufclient.NewClient( r.tufRepo, remote, kdb, r.fileStore, ), nil } // RotateKeys removes all existing keys associated with role and adds // the keys specified by keyIDs to the role. These changes are staged // in a changelist until publish is called. func (r *NotaryRepository) RotateKeys() error { for _, role := range []string{"targets", "snapshot"} { key, err := r.CryptoService.Create(role, data.ECDSAKey) if err != nil { return err } err = r.rootFileKeyChange(role, changelist.ActionCreate, key) if err != nil { return err } } return nil } func (r *NotaryRepository) rootFileKeyChange(role, action string, key data.PublicKey) error { cl, err := changelist.NewFileChangelist(filepath.Join(r.tufRepoPath, "changelist")) if err != nil { return err } defer cl.Close() kl := make(data.KeyList, 0, 1) kl = append(kl, key) meta := changelist.TufRootData{ RoleName: role, Keys: kl, } metaJSON, err := json.Marshal(meta) if err != nil { return err } c := changelist.NewTufChange( action, changelist.ScopeRoot, changelist.TypeRootRole, role, metaJSON, ) err = cl.Add(c) if err != nil { return err } return nil } notary-0.1/client/client_pkcs11_test.go000066400000000000000000000005201262207326400201730ustar00rootroot00000000000000// +build pkcs11 package client import "github.com/docker/notary/trustmanager/yubikey" // clear out all keys func init() { yubikey.SetYubikeyKeyMode(0) if !yubikey.YubikeyAccessible() { return } store, err := yubikey.NewYubiKeyStore(nil, nil) if err == nil { for k := range store.ListKeys() { store.RemoveKey(k) } } } notary-0.1/client/client_root_validation_test.go000066400000000000000000000057131262207326400222770ustar00rootroot00000000000000package client import ( "io/ioutil" "os" "testing" "github.com/Sirupsen/logrus" "github.com/docker/notary/keystoremanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) var passphraseRetriever = func(string, string, bool, int) (string, bool, error) { return "passphrase", false, nil } // TestValidateRoot through the process of initializing a repository and makes // sure the repository looks correct on disk. // We test this with both an RSA and ECDSA root key func TestValidateRoot(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) validateRootSuccessfully(t, data.ECDSAKey) if !testing.Short() { validateRootSuccessfully(t, data.RSAKey) } } func validateRootSuccessfully(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts, mux := simpleTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // tests need to manually boostrap timestamp as client doesn't generate it err = repo.tufRepo.InitTimestamp() assert.NoError(t, err, "error creating repository: %s", err) // Initialize is supposed to have created new certificate for this repository // Lets check for it and store it for later use allCerts := repo.KeyStoreManager.TrustedCertificateStore().GetCertificates() assert.Len(t, allCerts, 1) fakeServerData(t, repo, mux) // // Test TOFUS logic. We remove all certs and expect a new one to be added after ListTargets // err = repo.KeyStoreManager.TrustedCertificateStore().RemoveAll() assert.NoError(t, err) assert.Len(t, repo.KeyStoreManager.TrustedCertificateStore().GetCertificates(), 0) // This list targets is expected to succeed and the certificate store to have the new certificate _, err = repo.ListTargets() assert.NoError(t, err) assert.Len(t, repo.KeyStoreManager.TrustedCertificateStore().GetCertificates(), 1) // // Test certificate mismatch logic. We remove all certs, add a different cert to the // same CN, and expect ValidateRoot to fail // // First, remove all certs err = repo.KeyStoreManager.TrustedCertificateStore().RemoveAll() assert.NoError(t, err) assert.Len(t, repo.KeyStoreManager.TrustedCertificateStore().GetCertificates(), 0) // Add a previously generated certificate with CN=docker.com/notary err = repo.KeyStoreManager.TrustedCertificateStore().AddCertFromFile( "../fixtures/self-signed_docker.com-notary.crt") assert.NoError(t, err) // This list targets is expected to fail, since there already exists a certificate // in the store for the dnsName docker.com/notary, so TOFUS doesn't apply _, err = repo.ListTargets() if assert.Error(t, err, "An error was expected") { assert.Equal(t, err, &keystoremanager.ErrValidationFail{ Reason: "failed to validate data with current trusted certificates", }) } } notary-0.1/client/client_test.go000066400000000000000000000633101262207326400170170ustar00rootroot00000000000000package client import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/Sirupsen/logrus" ctxu "github.com/docker/distribution/context" "github.com/docker/notary/client/changelist" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/server" "github.com/docker/notary/server/storage" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/store" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) const timestampKeyJSON = `{"keytype":"rsa","keyval":{"public":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyvBtTg2xzYS+MTTIBqSpI4V78tt8Yzqi7Jki/Z6NqjiDvcnbgcTqNR2t6B2W5NjGdp/hSaT2jyHM+kdmEGaPxg/zIuHbL3NIp4e0qwovWiEgACPIaELdn8O/kt5swsSKl1KMvLCH1sM86qMibNMAZ/hXOwd90TcHXCgZ91wHEAmsdjDC3dB0TT+FBgOac8RM01Y196QrZoOaDMTWh0EQfw7YbXAElhFVDFxBzDdYWbcIHSIogXQmq0CP+zaL/1WgcZZIClt2M6WCaxxF1S34wNn45gCvVZiZQ/iKWHerSr/2dGQeGo+7ezMSutRzvJ+01fInD86RS/CEtBCFZ1VyQIDAQAB","private":"MIIEpAIBAAKCAQEAyyvBtTg2xzYS+MTTIBqSpI4V78tt8Yzqi7Jki/Z6NqjiDvcnbgcTqNR2t6B2W5NjGdp/hSaT2jyHM+kdmEGaPxg/zIuHbL3NIp4e0qwovWiEgACPIaELdn8O/kt5swsSKl1KMvLCH1sM86qMibNMAZ/hXOwd90TcHXCgZ91wHEAmsdjDC3dB0TT+FBgOac8RM01Y196QrZoOaDMTWh0EQfw7YbXAElhFVDFxBzDdYWbcIHSIogXQmq0CP+zaL/1WgcZZIClt2M6WCaxxF1S34wNn45gCvVZiZQ/iKWHerSr/2dGQeGo+7ezMSutRzvJ+01fInD86RS/CEtBCFZ1VyQIDAQABAoIBAHar8FFxrE1gAGTeUpOF8fG8LIQMRwO4U6eVY7V9GpWiv6gOJTHXYFxU/aL0Ty3eQRxwy9tyVRo8EJz5pRex+e6ws1M+jLOviYqW4VocxQ8dZYd+zBvQfWmRfah7XXJ/HPUx2I05zrmR7VbGX6Bu4g5w3KnyIO61gfyQNKF2bm2Q3yblfupx3URvX0bl180R/+QN2Aslr4zxULFE6b+qJqBydrztq+AAP3WmskRxGa6irFnKxkspJqUpQN1mFselj6iQrzAcwkRPoCw0RwCCMq1/OOYvQtgxTJcO4zDVlbw54PvnxPZtcCWw7fO8oZ2Fvo2SDo75CDOATOGaT4Y9iqECgYEAzWZSpFbN9ZHmvq1lJQg//jFAyjsXRNn/nSvyLQILXltz6EHatImnXo3v+SivG91tfzBI1GfDvGUGaJpvKHoomB+qmhd8KIQhO5MBdAKZMf9fZqZofOPTD9xRXECCwdi+XqHBmL+l1OWz+O9Bh+Qobs2as/hQVgHaoXhQpE0NkTcCgYEA/Tjf6JBGl1+WxQDoGZDJrXoejzG9OFW19RjMdmPrg3t4fnbDtqTpZtCzXxPTCSeMrvplKbqAqZglWyq227ksKw4p7O6YfyhdtvC58oJmivlLr6sFaTsER7mDcYce8sQpqm+XQ8IPbnOk0Z1l6g56euTwTnew49uy25M6U1xL0P8CgYEAxEXv2Kw+OVhHV5PX4BBHHj6we88FiDyMfwM8cvfOJ0datekf9X7ImZkmZEAVPJpWBMD+B0J0jzU2b4SLjfFVkzBHVOH2Ob0xCH2MWPAWtekin7OKizUlPbW5ZV8b0+Kq30DQ/4a7D3rEhK8UPqeuX1tHZox1MAqrgbq3zJj4yvcCgYEAktYPKPm4pYCdmgFrlZ+bA0iEPf7Wvbsd91F5BtHsOOM5PQQ7e0bnvWIaEXEad/2CG9lBHlBy2WVLjDEZthILpa/h6e11ao8KwNGY0iKBuebT17rxOVMqqTjPGt8CuD2994IcEgOPFTpkAdUmyvG4XlkxbB8F6St17NPUB5DGuhsCgYA//Lfytk0FflXEeRQ16LT1YXgV7pcR2jsha4+4O5pxSFw/kTsOfJaYHg8StmROoyFnyE3sg76dCgLn0LENRCe5BvDhJnp5bMpQldG3XwcAxH8FGFNY4LtV/2ZKnJhxcONkfmzQPOmTyedOzrKQ+bNURsqLukCypP7/by6afBY4dA=="}}` const timestampECDSAKeyJSON = ` {"keytype":"ecdsa","keyval":{"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw==","private":"MHcCAQEEIDqtcdzU7H3AbIPSQaxHl9+xYECt7NpK7B1+6ep5cv9CoAoGCCqGSM49AwEHoUQDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw=="}}` func simpleTestServer(t *testing.T) (*httptest.Server, *http.ServeMux) { mux := http.NewServeMux() // TUF will request /v2/docker.com/notary/_trust/tuf/timestamp.key // Return a canned timestamp.key mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/timestamp.key", func(w http.ResponseWriter, r *http.Request) { // Also contains the private key, but for the purpose of this // test, we don't care fmt.Fprint(w, timestampECDSAKeyJSON) }) ts := httptest.NewServer(mux) return ts, mux } func fullTestServer(t *testing.T) *httptest.Server { // Set up server ctx := context.WithValue( context.Background(), "metaStore", storage.NewMemStorage()) // Do not pass one of the const KeyAlgorithms here as the value! Passing a // string is in itself good test that we are handling it correctly as we // will be receiving a string from the configuration. ctx = context.WithValue(ctx, "keyAlgorithm", "ecdsa") // Eat the logs instead of spewing them out var b bytes.Buffer l := logrus.New() l.Out = &b ctx = ctxu.WithLogger(ctx, logrus.NewEntry(l)) cryptoService := cryptoservice.NewCryptoService( "", trustmanager.NewKeyMemoryStore(passphraseRetriever)) return httptest.NewServer(server.RootHandler(nil, ctx, cryptoService)) } // server that returns some particular error code all the time func errorTestServer(t *testing.T, errorCode int) *httptest.Server { handler := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(errorCode) } server := httptest.NewServer(http.HandlerFunc(handler)) return server } func initializeRepo(t *testing.T, rootType, tempBaseDir, gun, url string) (*NotaryRepository, string) { repo, err := NewNotaryRepository( tempBaseDir, gun, url, http.DefaultTransport, passphraseRetriever) assert.NoError(t, err, "error creating repo: %s", err) rootPubKey, err := repo.CryptoService.Create("root", rootType) assert.NoError(t, err, "error generating root key: %s", err) err = repo.Initialize(rootPubKey.ID()) assert.NoError(t, err, "error creating repository: %s", err) return repo, rootPubKey.ID() } // TestInitRepo runs through the process of initializing a repository and makes // sure the repository looks correct on disk. // We test this with both an RSA and ECDSA root key func TestInitRepo(t *testing.T) { testInitRepo(t, data.ECDSAKey) if !testing.Short() { testInitRepo(t, data.RSAKey) } } func testInitRepo(t *testing.T, rootType string) { gun := "docker.com/notary" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) ts, _ := simpleTestServer(t) defer ts.Close() repo, rootKeyID := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // Inspect contents of the temporary directory expectedDirs := []string{ "private", filepath.Join("private", "tuf_keys", filepath.FromSlash(gun)), filepath.Join("private", "root_keys"), "trusted_certificates", filepath.Join("trusted_certificates", filepath.FromSlash(gun)), "tuf", filepath.Join("tuf", filepath.FromSlash(gun), "metadata"), } for _, dir := range expectedDirs { fi, err := os.Stat(filepath.Join(tempBaseDir, dir)) assert.NoError(t, err, "missing directory in base directory: %s", dir) assert.True(t, fi.Mode().IsDir(), "%s is not a directory", dir) } // Look for keys in private. The filenames should match the key IDs // in the private key store. keyFileStore, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err) privKeyList := keyFileStore.ListFiles() for _, privKeyName := range privKeyList { privKeyFileName := filepath.Join(keyFileStore.BaseDir(), privKeyName) _, err := os.Stat(privKeyFileName) assert.NoError(t, err, "missing private key: %s", privKeyName) } // Look for keys in root_keys // There should be a file named after the key ID of the root key we // passed in. rootKeyFilename := rootKeyID + "_root.key" _, err = os.Stat(filepath.Join(tempBaseDir, "private", "root_keys", rootKeyFilename)) assert.NoError(t, err, "missing root key") certificates := repo.KeyStoreManager.TrustedCertificateStore().GetCertificates() assert.Len(t, certificates, 1, "unexpected number of certificates") certID, err := trustmanager.FingerprintCert(certificates[0]) assert.NoError(t, err, "unable to fingerprint the certificate") // There should be a trusted certificate _, err = os.Stat(filepath.Join(tempBaseDir, "trusted_certificates", filepath.FromSlash(gun), certID+".crt")) assert.NoError(t, err, "missing trusted certificate") // Sanity check the TUF metadata files. Verify that they exist, the JSON is // well-formed, and the signatures exist. For the root.json file, also check // that the root, snapshot, and targets key IDs are present. expectedTUFMetadataFiles := []string{ filepath.Join("tuf", filepath.FromSlash(gun), "metadata", "root.json"), filepath.Join("tuf", filepath.FromSlash(gun), "metadata", "snapshot.json"), filepath.Join("tuf", filepath.FromSlash(gun), "metadata", "targets.json"), } for _, filename := range expectedTUFMetadataFiles { fullPath := filepath.Join(tempBaseDir, filename) _, err := os.Stat(fullPath) assert.NoError(t, err, "missing TUF metadata file: %s", filename) jsonBytes, err := ioutil.ReadFile(fullPath) assert.NoError(t, err, "error reading TUF metadata file %s: %s", filename, err) var decoded data.Signed err = json.Unmarshal(jsonBytes, &decoded) assert.NoError(t, err, "error parsing TUF metadata file %s: %s", filename, err) assert.Len(t, decoded.Signatures, 1, "incorrect number of signatures in TUF metadata file %s", filename) assert.NotEmpty(t, decoded.Signatures[0].KeyID, "empty key ID field in TUF metadata file %s", filename) assert.NotEmpty(t, decoded.Signatures[0].Method, "empty method field in TUF metadata file %s", filename) assert.NotEmpty(t, decoded.Signatures[0].Signature, "empty signature in TUF metadata file %s", filename) // Special case for root.json: also check that the signed // content for keys and roles if strings.HasSuffix(filename, "root.json") { var decodedRoot data.Root err := json.Unmarshal(decoded.Signed, &decodedRoot) assert.NoError(t, err, "error parsing root.json signed section: %s", err) assert.Equal(t, "Root", decodedRoot.Type, "_type mismatch in root.json") // Expect 4 keys in the Keys map: root, targets, snapshot, timestamp assert.Len(t, decodedRoot.Keys, 4, "wrong number of keys in root.json") roleCount := 0 for role := range decodedRoot.Roles { roleCount++ if role != "root" && role != "snapshot" && role != "targets" && role != "timestamp" { t.Fatalf("unexpected role %s in root.json", role) } } assert.Equal(t, 4, roleCount, "wrong number of roles (%d) in root.json", roleCount) } } } // TestAddTarget adds a target to the repo and confirms that the changelist // is updated correctly. // We test this with both an RSA and ECDSA root key func TestAddTarget(t *testing.T) { testAddTarget(t, data.ECDSAKey) if !testing.Short() { testAddTarget(t, data.RSAKey) } } func addTarget(t *testing.T, repo *NotaryRepository, targetName, targetFile string) *Target { target, err := NewTarget(targetName, targetFile) assert.NoError(t, err, "error creating target") err = repo.AddTarget(target) assert.NoError(t, err, "error adding target") return target } // calls GetChangelist and gets the actual changes out func getChanges(t *testing.T, repo *NotaryRepository) []changelist.Change { changeList, err := repo.GetChangelist() assert.NoError(t, err) return changeList.List() } func testAddTarget(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts, _ := simpleTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // tests need to manually boostrap timestamp as client doesn't generate it err = repo.tufRepo.InitTimestamp() assert.NoError(t, err, "error creating repository: %s", err) assert.Len(t, getChanges(t, repo), 0, "should start with zero changes") // Add fixtures/intermediate-ca.crt as a target. There's no particular // reason for using this file except that it happens to be available as // a fixture. addTarget(t, repo, "latest", "../fixtures/intermediate-ca.crt") changes := getChanges(t, repo) assert.Len(t, changes, 1, "wrong number of changes files found") for _, c := range changes { // there is only one assert.EqualValues(t, changelist.ActionCreate, c.Action()) assert.Equal(t, "targets", c.Scope()) assert.Equal(t, "target", c.Type()) assert.Equal(t, "latest", c.Path()) assert.NotEmpty(t, c.Content()) } // Create a second target addTarget(t, repo, "current", "../fixtures/intermediate-ca.crt") changes = getChanges(t, repo) assert.Len(t, changes, 2, "wrong number of changelist files found") newFileFound := false for _, c := range changes { if c.Path() != "latest" { assert.EqualValues(t, changelist.ActionCreate, c.Action()) assert.Equal(t, "targets", c.Scope()) assert.Equal(t, "target", c.Type()) assert.Equal(t, "current", c.Path()) assert.NotEmpty(t, c.Content()) newFileFound = true } } assert.True(t, newFileFound, "second changelist file not found") } // TestListTarget fakes serving signed metadata files over the test's // internal HTTP server to ensure that ListTargets returns the correct number // of listed targets. // We test this with both an RSA and ECDSA root key func TestListTarget(t *testing.T) { testListEmptyTargets(t, data.ECDSAKey) testListTarget(t, data.ECDSAKey) if !testing.Short() { testListEmptyTargets(t, data.RSAKey) testListTarget(t, data.RSAKey) } } func testListEmptyTargets(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts := fullTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // tests need to manually boostrap timestamp as client doesn't generate it err = repo.tufRepo.InitTimestamp() assert.NoError(t, err, "error creating repository: %s", err) _, err = repo.ListTargets() assert.Error(t, err) // no trust data } // reads data from the repository in order to fake data being served via // the ServeMux. func fakeServerData(t *testing.T, repo *NotaryRepository, mux *http.ServeMux) { tempKey, err := data.UnmarshalPrivateKey([]byte(timestampECDSAKeyJSON)) assert.NoError(t, err) savedTUFRepo := repo.tufRepo // in case this is overwritten fileStore, err := trustmanager.NewKeyFileStore(repo.baseDir, passphraseRetriever) assert.NoError(t, err) fileStore.AddKey( filepath.Join(filepath.FromSlash(repo.gun), tempKey.ID()), "nonroot", tempKey) rootJSONFile := filepath.Join(repo.baseDir, "tuf", filepath.FromSlash(repo.gun), "metadata", "root.json") rootFileBytes, err := ioutil.ReadFile(rootJSONFile) signedTargets, err := savedTUFRepo.SignTargets( "targets", data.DefaultExpires("targets")) assert.NoError(t, err) signedSnapshot, err := savedTUFRepo.SignSnapshot( data.DefaultExpires("snapshot")) assert.NoError(t, err) signedTimestamp, err := savedTUFRepo.SignTimestamp( data.DefaultExpires("timestamp")) assert.NoError(t, err) mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/root.json", func(w http.ResponseWriter, r *http.Request) { assert.NoError(t, err) fmt.Fprint(w, string(rootFileBytes)) }) mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/timestamp.json", func(w http.ResponseWriter, r *http.Request) { timestampJSON, _ := json.Marshal(signedTimestamp) fmt.Fprint(w, string(timestampJSON)) }) mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/snapshot.json", func(w http.ResponseWriter, r *http.Request) { snapshotJSON, _ := json.Marshal(signedSnapshot) fmt.Fprint(w, string(snapshotJSON)) }) mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/targets.json", func(w http.ResponseWriter, r *http.Request) { targetsJSON, _ := json.Marshal(signedTargets) fmt.Fprint(w, string(targetsJSON)) }) } func testListTarget(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts, mux := simpleTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // tests need to manually boostrap timestamp as client doesn't generate it err = repo.tufRepo.InitTimestamp() assert.NoError(t, err, "error creating repository: %s", err) latestTarget := addTarget(t, repo, "latest", "../fixtures/intermediate-ca.crt") currentTarget := addTarget(t, repo, "current", "../fixtures/intermediate-ca.crt") // Apply the changelist. Normally, this would be done by Publish // load the changelist for this repo cl, err := changelist.NewFileChangelist( filepath.Join(tempBaseDir, "tuf", filepath.FromSlash(gun), "changelist")) assert.NoError(t, err, "could not open changelist") // apply the changelist to the repo err = applyChangelist(repo.tufRepo, cl) assert.NoError(t, err, "could not apply changelist") fakeServerData(t, repo, mux) targets, err := repo.ListTargets() assert.NoError(t, err) // Should be two targets assert.Len(t, targets, 2, "unexpected number of targets returned by ListTargets") if targets[0].Name == "latest" { assert.Equal(t, latestTarget, targets[0], "latest target does not match") assert.Equal(t, currentTarget, targets[1], "current target does not match") } else if targets[0].Name == "current" { assert.Equal(t, currentTarget, targets[0], "current target does not match") assert.Equal(t, latestTarget, targets[1], "latest target does not match") } else { t.Fatalf("unexpected target name: %s", targets[0].Name) } // Also test GetTargetByName newLatestTarget, err := repo.GetTargetByName("latest") assert.NoError(t, err) assert.Equal(t, latestTarget, newLatestTarget, "latest target does not match") newCurrentTarget, err := repo.GetTargetByName("current") assert.NoError(t, err) assert.Equal(t, currentTarget, newCurrentTarget, "current target does not match") } // TestValidateRootKey verifies that the public data in root.json for the root // key is a valid x509 certificate. func TestValidateRootKey(t *testing.T) { testValidateRootKey(t, data.ECDSAKey) if !testing.Short() { testValidateRootKey(t, data.RSAKey) } } func testValidateRootKey(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts, _ := simpleTestServer(t) defer ts.Close() initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) rootJSONFile := filepath.Join(tempBaseDir, "tuf", filepath.FromSlash(gun), "metadata", "root.json") jsonBytes, err := ioutil.ReadFile(rootJSONFile) assert.NoError(t, err, "error reading TUF metadata file %s: %s", rootJSONFile, err) var decoded data.Signed err = json.Unmarshal(jsonBytes, &decoded) assert.NoError(t, err, "error parsing TUF metadata file %s: %s", rootJSONFile, err) var decodedRoot data.Root err = json.Unmarshal(decoded.Signed, &decodedRoot) assert.NoError(t, err, "error parsing root.json signed section: %s", err) keyids := []string{} for role, roleData := range decodedRoot.Roles { if role == "root" { keyids = append(keyids, roleData.KeyIDs...) } } assert.NotEmpty(t, keyids) for _, keyid := range keyids { if key, ok := decodedRoot.Keys[keyid]; !ok { t.Fatal("key id not found in keys") } else { _, err := trustmanager.LoadCertFromPEM(key.Public()) assert.NoError(t, err, "key is not a valid cert") } } } // TestGetChangelist ensures that the changelist returned matches the changes // added. // We test this with both an RSA and ECDSA root key func TestGetChangelist(t *testing.T) { testGetChangelist(t, data.ECDSAKey) if !testing.Short() { testGetChangelist(t, data.RSAKey) } } func testGetChangelist(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts, _ := simpleTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) assert.Len(t, getChanges(t, repo), 0, "No changes should be in changelist yet") // Create 2 targets addTarget(t, repo, "latest", "../fixtures/intermediate-ca.crt") addTarget(t, repo, "current", "../fixtures/intermediate-ca.crt") // Test loading changelist chgs := getChanges(t, repo) assert.Len(t, chgs, 2, "Wrong number of changes returned from changelist") changes := make(map[string]changelist.Change) for _, ch := range chgs { changes[ch.Path()] = ch } currentChange := changes["current"] assert.NotNil(t, currentChange, "Expected changelist to contain a change for path 'current'") assert.EqualValues(t, changelist.ActionCreate, currentChange.Action()) assert.Equal(t, "targets", currentChange.Scope()) assert.Equal(t, "target", currentChange.Type()) assert.Equal(t, "current", currentChange.Path()) latestChange := changes["latest"] assert.NotNil(t, latestChange, "Expected changelist to contain a change for path 'latest'") assert.EqualValues(t, changelist.ActionCreate, latestChange.Action()) assert.Equal(t, "targets", latestChange.Scope()) assert.Equal(t, "target", latestChange.Type()) assert.Equal(t, "latest", latestChange.Path()) } // TestPublish creates a repo, instantiates a notary server, and publishes // the repo to the server. // We test this with both an RSA and ECDSA root key func TestPublish(t *testing.T) { testPublish(t, data.ECDSAKey) if !testing.Short() { testPublish(t, data.RSAKey) } } func testPublish(t *testing.T, rootType string) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts := fullTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, rootType, tempBaseDir, gun, ts.URL) // Create 2 targets latestTarget := addTarget(t, repo, "latest", "../fixtures/intermediate-ca.crt") currentTarget := addTarget(t, repo, "current", "../fixtures/intermediate-ca.crt") assert.Len(t, getChanges(t, repo), 2, "wrong number of changelist files found") // Now test Publish err = repo.Publish() assert.NoError(t, err) assert.Len(t, getChanges(t, repo), 0, "wrong number of changelist files found") // Create a new repo and pull from the server tempBaseDir2, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir2) assert.NoError(t, err, "failed to create a temporary directory: %s", err) repo2, err := NewNotaryRepository(tempBaseDir, gun, ts.URL, http.DefaultTransport, passphraseRetriever) assert.NoError(t, err, "error creating repository: %s", err) targets, err := repo2.ListTargets() assert.NoError(t, err) // Should be two targets assert.Len(t, targets, 2, "unexpected number of targets returned by ListTargets") if targets[0].Name == "latest" { assert.Equal(t, latestTarget, targets[0], "latest target does not match") assert.Equal(t, currentTarget, targets[1], "current target does not match") } else if targets[0].Name == "current" { assert.Equal(t, currentTarget, targets[0], "current target does not match") assert.Equal(t, latestTarget, targets[1], "latest target does not match") } else { t.Fatalf("unexpected target name: %s", targets[0].Name) } // Also test GetTargetByName newLatestTarget, err := repo2.GetTargetByName("latest") assert.NoError(t, err) assert.Equal(t, latestTarget, newLatestTarget, "latest target does not match") newCurrentTarget, err := repo2.GetTargetByName("current") assert.NoError(t, err) assert.Equal(t, currentTarget, newCurrentTarget, "current target does not match") } func TestRotate(t *testing.T) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) gun := "docker.com/notary" ts := fullTestServer(t) defer ts.Close() repo, _ := initializeRepo(t, data.ECDSAKey, tempBaseDir, gun, ts.URL) // Adding a target will allow us to confirm the repository is still valid after // rotating the keys. addTarget(t, repo, "latest", "../fixtures/intermediate-ca.crt") // Publish err = repo.Publish() assert.NoError(t, err) // Get root.json and capture targets + snapshot key IDs repo.GetTargetByName("latest") // force a pull targetsKeyIDs := repo.tufRepo.Root.Signed.Roles["targets"].KeyIDs snapshotKeyIDs := repo.tufRepo.Root.Signed.Roles["snapshot"].KeyIDs assert.Len(t, targetsKeyIDs, 1) assert.Len(t, snapshotKeyIDs, 1) // Do rotation repo.RotateKeys() // Publish err = repo.Publish() assert.NoError(t, err) // Get root.json. Check targets + snapshot keys have changed // and that they match those found in the changelist. _, err = repo.GetTargetByName("latest") // force a pull assert.NoError(t, err) newTargetsKeyIDs := repo.tufRepo.Root.Signed.Roles["targets"].KeyIDs newSnapshotKeyIDs := repo.tufRepo.Root.Signed.Roles["snapshot"].KeyIDs assert.Len(t, newTargetsKeyIDs, 1) assert.Len(t, newSnapshotKeyIDs, 1) assert.NotEqual(t, targetsKeyIDs[0], newTargetsKeyIDs[0]) assert.NotEqual(t, snapshotKeyIDs[0], newSnapshotKeyIDs[0]) // Confirm changelist dir empty after publishing changes changes := getChanges(t, repo) assert.Len(t, changes, 0, "wrong number of changelist files found") } // If there is no local cache, notary operations return the remote error code func TestRemoteServerUnavailableNoLocalCache(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory: %s", err) defer os.RemoveAll(tempBaseDir) ts := errorTestServer(t, 500) defer ts.Close() repo, err := NewNotaryRepository(tempBaseDir, "docker.com/notary", ts.URL, http.DefaultTransport, passphraseRetriever) assert.NoError(t, err, "error creating repo: %s", err) _, err = repo.ListTargets() assert.Error(t, err) assert.IsType(t, store.ErrServerUnavailable{}, err) _, err = repo.GetTargetByName("targetName") assert.Error(t, err) assert.IsType(t, store.ErrServerUnavailable{}, err) err = repo.Publish() assert.Error(t, err) assert.IsType(t, store.ErrServerUnavailable{}, err) } notary-0.1/client/helpers.go000066400000000000000000000065531262207326400161520ustar00rootroot00000000000000package client import ( "encoding/json" "net/http" "time" "github.com/Sirupsen/logrus" "github.com/docker/notary/client/changelist" tuf "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/store" ) // Use this to initialize remote HTTPStores from the config settings func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStore, error) { return store.NewHTTPStore( baseURL+"/v2/"+gun+"/_trust/tuf/", "", "json", "", "key", rt, ) } func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error { it, err := cl.NewIterator() if err != nil { return err } index := 0 for it.HasNext() { c, err := it.Next() if err != nil { return err } switch c.Scope() { case changelist.ScopeTargets: err = applyTargetsChange(repo, c) case changelist.ScopeRoot: err = applyRootChange(repo, c) default: logrus.Debug("scope not supported: ", c.Scope()) } index++ if err != nil { return err } } logrus.Debugf("applied %d change(s)", index) return nil } func applyTargetsChange(repo *tuf.Repo, c changelist.Change) error { var err error switch c.Action() { case changelist.ActionCreate: logrus.Debug("changelist add: ", c.Path()) meta := &data.FileMeta{} err = json.Unmarshal(c.Content(), meta) if err != nil { return err } files := data.Files{c.Path(): *meta} _, err = repo.AddTargets(c.Scope(), files) case changelist.ActionDelete: logrus.Debug("changelist remove: ", c.Path()) err = repo.RemoveTargets(c.Scope(), c.Path()) default: logrus.Debug("action not yet supported: ", c.Action()) } if err != nil { return err } return nil } func applyRootChange(repo *tuf.Repo, c changelist.Change) error { var err error switch c.Type() { case changelist.TypeRootRole: err = applyRootRoleChange(repo, c) default: logrus.Debug("type of root change not yet supported: ", c.Type()) } return err // might be nil } func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error { switch c.Action() { case changelist.ActionCreate: // replaces all keys for a role d := &changelist.TufRootData{} err := json.Unmarshal(c.Content(), d) if err != nil { return err } err = repo.ReplaceBaseKeys(d.RoleName, d.Keys...) if err != nil { return err } default: logrus.Debug("action not yet supported for root: ", c.Action()) } return nil } func nearExpiry(r *data.SignedRoot) bool { plus6mo := time.Now().AddDate(0, 6, 0) return r.Signed.Expires.Before(plus6mo) } func initRoles(kdb *keys.KeyDB, rootKey, targetsKey, snapshotKey, timestampKey data.PublicKey) error { rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil) if err != nil { return err } targetsRole, err := data.NewRole("targets", 1, []string{targetsKey.ID()}, nil, nil) if err != nil { return err } snapshotRole, err := data.NewRole("snapshot", 1, []string{snapshotKey.ID()}, nil, nil) if err != nil { return err } timestampRole, err := data.NewRole("timestamp", 1, []string{timestampKey.ID()}, nil, nil) if err != nil { return err } if err := kdb.AddRole(rootRole); err != nil { return err } if err := kdb.AddRole(targetsRole); err != nil { return err } if err := kdb.AddRole(snapshotRole); err != nil { return err } if err := kdb.AddRole(timestampRole); err != nil { return err } return nil } notary-0.1/client/helpers_test.go000066400000000000000000000067331262207326400172110ustar00rootroot00000000000000package client import ( "crypto/sha256" "encoding/json" "testing" "github.com/docker/notary/client/changelist" tuf "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/stretchr/testify/assert" ) func TestApplyTargetsChange(t *testing.T) { kdb := keys.NewDB() role, err := data.NewRole("targets", 1, nil, nil, nil) assert.NoError(t, err) kdb.AddRole(role) repo := tuf.NewRepo(kdb, nil) err = repo.InitTargets() assert.NoError(t, err) hash := sha256.Sum256([]byte{}) f := &data.FileMeta{ Length: 1, Hashes: map[string][]byte{ "sha256": hash[:], }, } fjson, err := json.Marshal(f) assert.NoError(t, err) addChange := &changelist.TufChange{ Actn: changelist.ActionCreate, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: fjson, } err = applyTargetsChange(repo, addChange) assert.NoError(t, err) assert.NotNil(t, repo.Targets["targets"].Signed.Targets["latest"]) removeChange := &changelist.TufChange{ Actn: changelist.ActionDelete, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: nil, } err = applyTargetsChange(repo, removeChange) assert.NoError(t, err) _, ok := repo.Targets["targets"].Signed.Targets["latest"] assert.False(t, ok) } func TestApplyChangelist(t *testing.T) { kdb := keys.NewDB() role, err := data.NewRole("targets", 1, nil, nil, nil) assert.NoError(t, err) kdb.AddRole(role) repo := tuf.NewRepo(kdb, nil) err = repo.InitTargets() assert.NoError(t, err) hash := sha256.Sum256([]byte{}) f := &data.FileMeta{ Length: 1, Hashes: map[string][]byte{ "sha256": hash[:], }, } fjson, err := json.Marshal(f) assert.NoError(t, err) cl := changelist.NewMemChangelist() addChange := &changelist.TufChange{ Actn: changelist.ActionCreate, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: fjson, } cl.Add(addChange) err = applyChangelist(repo, cl) assert.NoError(t, err) assert.NotNil(t, repo.Targets["targets"].Signed.Targets["latest"]) cl.Clear("") removeChange := &changelist.TufChange{ Actn: changelist.ActionDelete, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: nil, } cl.Add(removeChange) err = applyChangelist(repo, cl) assert.NoError(t, err) _, ok := repo.Targets["targets"].Signed.Targets["latest"] assert.False(t, ok) } func TestApplyChangelistMulti(t *testing.T) { kdb := keys.NewDB() role, err := data.NewRole("targets", 1, nil, nil, nil) assert.NoError(t, err) kdb.AddRole(role) repo := tuf.NewRepo(kdb, nil) err = repo.InitTargets() assert.NoError(t, err) hash := sha256.Sum256([]byte{}) f := &data.FileMeta{ Length: 1, Hashes: map[string][]byte{ "sha256": hash[:], }, } fjson, err := json.Marshal(f) assert.NoError(t, err) cl := changelist.NewMemChangelist() addChange := &changelist.TufChange{ Actn: changelist.ActionCreate, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: fjson, } removeChange := &changelist.TufChange{ Actn: changelist.ActionDelete, Role: changelist.ScopeTargets, ChangeType: "target", ChangePath: "latest", Data: nil, } cl.Add(addChange) cl.Add(removeChange) err = applyChangelist(repo, cl) assert.NoError(t, err) _, ok := repo.Targets["targets"].Signed.Targets["latest"] assert.False(t, ok) } notary-0.1/client/repo.go000066400000000000000000000026441262207326400154520ustar00rootroot00000000000000// +build !pkcs11 package client import ( "fmt" "net/http" "path/filepath" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/keystoremanager" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/store" ) // NewNotaryRepository is a helper method that returns a new notary repository. // It takes the base directory under where all the trust files will be stored // (usually ~/.docker/trust/). func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, retriever passphrase.Retriever) (*NotaryRepository, error) { fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) if err != nil { return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir) } keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir) if err != nil { return nil, err } cryptoService := cryptoservice.NewCryptoService(gun, fileKeyStore) nRepo := &NotaryRepository{ gun: gun, baseDir: baseDir, baseURL: baseURL, tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)), CryptoService: cryptoService, roundTrip: rt, KeyStoreManager: keyStoreManager, } fileStore, err := store.NewFilesystemStore( nRepo.tufRepoPath, "metadata", "json", "", ) if err != nil { return nil, err } nRepo.fileStore = fileStore return nRepo, nil } notary-0.1/client/repo_pkcs11.go000066400000000000000000000032731262207326400166330ustar00rootroot00000000000000// +build pkcs11 package client import ( "fmt" "net/http" "path/filepath" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/keystoremanager" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustmanager/yubikey" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/store" ) // NewNotaryRepository is a helper method that returns a new notary repository. // It takes the base directory under where all the trust files will be stored // (usually ~/.docker/trust/). func NewNotaryRepository(baseDir, gun, baseURL string, rt http.RoundTripper, retriever passphrase.Retriever) (*NotaryRepository, error) { fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever) if err != nil { return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir) } keyStoreManager, err := keystoremanager.NewKeyStoreManager(baseDir) yubiKeyStore, _ := yubikey.NewYubiKeyStore(fileKeyStore, retriever) var cryptoService signed.CryptoService if yubiKeyStore == nil { cryptoService = cryptoservice.NewCryptoService(gun, fileKeyStore) } else { cryptoService = cryptoservice.NewCryptoService(gun, yubiKeyStore, fileKeyStore) } nRepo := &NotaryRepository{ gun: gun, baseDir: baseDir, baseURL: baseURL, tufRepoPath: filepath.Join(baseDir, tufDir, filepath.FromSlash(gun)), CryptoService: cryptoService, roundTrip: rt, KeyStoreManager: keyStoreManager, } fileStore, err := store.NewFilesystemStore( nRepo.tufRepoPath, "metadata", "json", "", ) if err != nil { return nil, err } nRepo.fileStore = fileStore return nRepo, nil } notary-0.1/cmd/000077500000000000000000000000001262207326400134355ustar00rootroot00000000000000notary-0.1/cmd/notary-server/000077500000000000000000000000001262207326400162555ustar00rootroot00000000000000notary-0.1/cmd/notary-server/config.json000066400000000000000000000010701262207326400204130ustar00rootroot00000000000000{ "server": { "addr": ":4443", "tls_key_file": "./fixtures/notary-server.key", "tls_cert_file": "./fixtures/notary-server.crt" }, "trust_service": { "type": "remote", "hostname": "notarysigner", "port": "7899", "tls_ca_file": "./fixtures/root-ca.crt", "key_algorithm": "ecdsa", "tls_client_cert": "./fixtures/notary-server.crt", "tls_client_key": "./fixtures/notary-server.key" }, "logging": { "level": "debug" }, "storage": { "backend": "mysql", "db_url": "dockercondemo:dockercondemo@tcp(notarymysql:3306)/dockercondemo" } } notary-0.1/cmd/notary-server/main.go000066400000000000000000000157741262207326400175460ustar00rootroot00000000000000package main import ( "crypto/tls" _ "expvar" "flag" "fmt" "net/http" _ "net/http/pprof" "os" "path/filepath" "strings" "time" "github.com/Sirupsen/logrus" "github.com/bugsnag/bugsnag-go" "github.com/docker/distribution/health" _ "github.com/docker/distribution/registry/auth/htpasswd" _ "github.com/docker/distribution/registry/auth/token" "github.com/docker/notary/signer/client" "github.com/docker/notary/tuf/signed" _ "github.com/go-sql-driver/mysql" "golang.org/x/net/context" bugsnag_hook "github.com/Sirupsen/logrus/hooks/bugsnag" "github.com/docker/notary/server" "github.com/docker/notary/server/storage" "github.com/docker/notary/utils" "github.com/docker/notary/version" "github.com/spf13/viper" ) // DebugAddress is the debug server address to listen on const DebugAddress = "localhost:8080" var ( debug bool configFile string mainViper = viper.New() ) func init() { // set default log level to Error mainViper.SetDefault("logging", map[string]interface{}{"level": 2}) // Setup flags flag.StringVar(&configFile, "config", "", "Path to configuration file") flag.BoolVar(&debug, "debug", false, "Enable the debugging server on localhost:8080") } // optionally sets up TLS for the server - if no TLS configuration is // specified, TLS is not enabled. func serverTLS(configuration *viper.Viper) (*tls.Config, error) { tlsCertFile := configuration.GetString("server.tls_cert_file") tlsKeyFile := configuration.GetString("server.tls_key_file") if tlsCertFile == "" && tlsKeyFile == "" { return nil, nil } else if tlsCertFile == "" || tlsKeyFile == "" { return nil, fmt.Errorf("Partial TLS configuration found. Either include both a cert and key file in the configuration, or include neither to disable TLS.") } tlsConfig, err := utils.ConfigureServerTLS(&utils.ServerTLSOpts{ ServerCertFile: tlsCertFile, ServerKeyFile: tlsKeyFile, }) if err != nil { return nil, fmt.Errorf("Unable to set up TLS: %s", err.Error()) } return tlsConfig, nil } // sets up TLS for the GRPC connection to notary-signer func grpcTLS(configuration *viper.Viper) (*tls.Config, error) { rootCA := configuration.GetString("trust_service.tls_ca_file") serverName := configuration.GetString("trust_service.hostname") clientCert := configuration.GetString("trust_service.tls_client_cert") clientKey := configuration.GetString("trust_service.tls_client_key") if (clientCert == "" && clientKey != "") || (clientCert != "" && clientKey == "") { return nil, fmt.Errorf("Partial TLS configuration found. Either include both a client cert and client key file in the configuration, or include neither.") } tlsConfig, err := utils.ConfigureClientTLS(&utils.ClientTLSOpts{ RootCAFile: rootCA, ServerName: serverName, ClientCertFile: clientCert, ClientKeyFile: clientKey, }) if err != nil { return nil, fmt.Errorf( "Unable to configure TLS to the trust service: %s", err.Error()) } return tlsConfig, nil } func main() { flag.Usage = usage flag.Parse() if debug { go debugServer(DebugAddress) } // when the server starts print the version for debugging and issue logs later logrus.Infof("Version: %s, Git commit: %s", version.NotaryVersion, version.GitCommit) ctx := context.Background() filename := filepath.Base(configFile) ext := filepath.Ext(configFile) configPath := filepath.Dir(configFile) mainViper.SetConfigType(strings.TrimPrefix(ext, ".")) mainViper.SetConfigName(strings.TrimSuffix(filename, ext)) mainViper.AddConfigPath(configPath) // Automatically accept configuration options from the environment mainViper.SetEnvPrefix("NOTARY_SERVER") mainViper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) mainViper.AutomaticEnv() err := mainViper.ReadInConfig() if err != nil { logrus.Error("Viper Error: ", err.Error()) logrus.Error("Could not read config at ", configFile) os.Exit(1) } lvl, err := logrus.ParseLevel(mainViper.GetString("logging.level")) if err != nil { lvl = logrus.ErrorLevel logrus.Error("Could not parse log level from config. Defaulting to ErrorLevel") } logrus.SetLevel(lvl) // set up bugsnag and attach to logrus bugs := mainViper.GetString("reporting.bugsnag") if bugs != "" { apiKey := mainViper.GetString("reporting.bugsnag_api_key") releaseStage := mainViper.GetString("reporting.bugsnag_release_stage") bugsnag.Configure(bugsnag.Configuration{ APIKey: apiKey, ReleaseStage: releaseStage, }) hook, err := bugsnag_hook.NewBugsnagHook() if err != nil { logrus.Error("Could not attach bugsnag to logrus: ", err.Error()) } else { logrus.AddHook(hook) } } keyAlgo := mainViper.GetString("trust_service.key_algorithm") if keyAlgo == "" { logrus.Fatal("no key algorithm configured.") os.Exit(1) } ctx = context.WithValue(ctx, "keyAlgorithm", keyAlgo) var trust signed.CryptoService if mainViper.GetString("trust_service.type") == "remote" { logrus.Info("Using remote signing service") clientTLS, err := grpcTLS(mainViper) if err != nil { logrus.Fatal(err.Error()) } notarySigner := client.NewNotarySigner( mainViper.GetString("trust_service.hostname"), mainViper.GetString("trust_service.port"), clientTLS, ) trust = notarySigner minute := 1 * time.Minute health.RegisterPeriodicFunc( "Trust operational", // If the trust service fails, the server is degraded but not // exactly unheatlthy, so always return healthy and just log an // error. func() error { err := notarySigner.CheckHealth(minute) if err != nil { logrus.Error("Trust not fully operational: ", err.Error()) } return nil }, minute) } else { logrus.Info("Using local signing service") trust = signed.NewEd25519() } if mainViper.GetString("storage.backend") == "mysql" { logrus.Info("Using mysql backend") dbURL := mainViper.GetString("storage.db_url") store, err := storage.NewSQLStorage("mysql", dbURL) if err != nil { logrus.Fatal("Error starting DB driver: ", err.Error()) return // not strictly needed but let's be explicit } health.RegisterPeriodicFunc( "DB operational", store.CheckHealth, time.Second*60) ctx = context.WithValue(ctx, "metaStore", store) } else { logrus.Debug("Using memory backend") ctx = context.WithValue(ctx, "metaStore", storage.NewMemStorage()) } tlsConfig, err := serverTLS(mainViper) if err != nil { logrus.Fatal(err.Error()) } logrus.Info("Starting Server") err = server.Run( ctx, mainViper.GetString("server.addr"), tlsConfig, trust, mainViper.GetString("auth.type"), mainViper.Get("auth.options"), ) logrus.Error(err.Error()) return } func usage() { fmt.Println("usage:", os.Args[0]) flag.PrintDefaults() } // debugServer starts the debug server with pprof, expvar among other // endpoints. The addr should not be exposed externally. For most of these to // work, tls cannot be enabled on the endpoint, so it is generally separate. func debugServer(addr string) { logrus.Info("Debug server listening on", addr) if err := http.ListenAndServe(addr, nil); err != nil { logrus.Fatal("error listening on debug interface: ", err) } } notary-0.1/cmd/notary-server/main_test.go000066400000000000000000000111461262207326400205720ustar00rootroot00000000000000package main import ( "bytes" "crypto/tls" "fmt" "strings" "testing" "github.com/spf13/viper" "github.com/stretchr/testify/assert" ) const ( Cert = "../../fixtures/notary-server.crt" Key = "../../fixtures/notary-server.key" Root = "../../fixtures/root-ca.crt" ) // initializes a viper object with test configuration func configure(jsonConfig []byte) *viper.Viper { config := viper.New() config.SetConfigType("json") config.ReadConfig(bytes.NewBuffer(jsonConfig)) return config } // If neither the cert nor the key are provided, a nil tls config is returned. func TestServerTLSMissingCertAndKey(t *testing.T) { tlsConfig, err := serverTLS(configure([]byte(`{"server": {}}`))) assert.NoError(t, err) assert.Nil(t, tlsConfig) } // Cert and Key either both have to be empty or both have to be provided. func TestServerTLSMissingCertAndOrKey(t *testing.T) { configs := []string{ fmt.Sprintf(`{"tls_cert_file": "%s"}`, Cert), fmt.Sprintf(`{"tls_key_file": "%s"}`, Key), } for _, serverConfig := range configs { config := configure( []byte(fmt.Sprintf(`{"server": %s}`, serverConfig))) tlsConfig, err := serverTLS(config) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.True(t, strings.Contains(err.Error(), "Partial TLS configuration found.")) } } // The rest of the functionality of serverTLS depends upon // utils.ConfigureServerTLS, so this test just asserts that if successful, // the correct tls.Config is returned based on all the configuration parameters func TestServerTLSSuccess(t *testing.T) { keypair, err := tls.LoadX509KeyPair(Cert, Key) assert.NoError(t, err, "Unable to load cert and key for testing") config := fmt.Sprintf( `{"server": {"tls_cert_file": "%s", "tls_key_file": "%s"}}`, Cert, Key) tlsConfig, err := serverTLS(configure([]byte(config))) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) } // The rest of the functionality of serverTLS depends upon // utils.ConfigureServerTLS, so this test just asserts that if it fails, // the error is propogated. func TestServerTLSFailure(t *testing.T) { config := fmt.Sprintf( `{"server": {"tls_cert_file": "non-exist", "tls_key_file": "%s"}}`, Key) tlsConfig, err := serverTLS(configure([]byte(config))) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.True(t, strings.Contains(err.Error(), "Unable to set up TLS")) } // Client cert and Key either both have to be empty or both have to be // provided. func TestGrpcTLSMissingCertOrKey(t *testing.T) { configs := []string{ fmt.Sprintf(`"tls_client_cert": "%s"`, Cert), fmt.Sprintf(`"tls_client_key": "%s"`, Key), } for _, trustConfig := range configs { jsonConfig := fmt.Sprintf( `{"trust_service": {"hostname": "notary-signer", %s}}`, trustConfig) config := configure([]byte(jsonConfig)) tlsConfig, err := grpcTLS(config) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.True(t, strings.Contains(err.Error(), "Partial TLS configuration found.")) } } // If no TLS configuration is provided for the host server, a tls config with // the provided serverName is still returned. func TestGrpcTLSNoConfig(t *testing.T) { tlsConfig, err := grpcTLS( configure([]byte(`{"trust_service": {"hostname": "notary-signer"}}`))) assert.NoError(t, err) assert.Equal(t, "notary-signer", tlsConfig.ServerName) assert.Nil(t, tlsConfig.RootCAs) assert.Nil(t, tlsConfig.Certificates) } // The rest of the functionality of grpcTLS depends upon // utils.ConfigureClientTLS, so this test just asserts that if successful, // the correct tls.Config is returned based on all the configuration parameters func TestGrpcTLSSuccess(t *testing.T) { keypair, err := tls.LoadX509KeyPair(Cert, Key) assert.NoError(t, err, "Unable to load cert and key for testing") config := fmt.Sprintf( `{"trust_service": { "hostname": "notary-server", "tls_client_cert": "%s", "tls_client_key": "%s"}}`, Cert, Key) tlsConfig, err := grpcTLS(configure([]byte(config))) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) } // The rest of the functionality of grpcTLS depends upon // utils.ConfigureServerTLS, so this test just asserts that if it fails, // the error is propogated. func TestGrpcTLSFailure(t *testing.T) { config := fmt.Sprintf( `{"trust_service": { "hostname": "notary-server", "tls_client_cert": "no-exist", "tls_client_key": "%s"}}`, Key) tlsConfig, err := grpcTLS(configure([]byte(config))) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.True(t, strings.Contains(err.Error(), "Unable to configure TLS to the trust service")) } notary-0.1/cmd/notary-signer/000077500000000000000000000000001262207326400162365ustar00rootroot00000000000000notary-0.1/cmd/notary-signer/config.json000066400000000000000000000006571262207326400204060ustar00rootroot00000000000000{ "server": { "http_addr": ":4444", "grpc_addr": ":7899", "cert_file": "./fixtures/notary-signer.crt", "key_file": "./fixtures/notary-signer.key", "client_ca_file": "./fixtures/notary-server.crt" }, "crypto": { "pkcslib": "/usr/local/lib/softhsm/libsofthsm2.so" }, "logging": { "level": 5 }, "storage": { "backend": "mysql", "db_url": "dockercondemo:dockercondemo@tcp(notarymysql:3306)/dockercondemo" } } notary-0.1/cmd/notary-signer/main.go000066400000000000000000000157461262207326400175260ustar00rootroot00000000000000// +build pkcs11 package main import ( "crypto/tls" "database/sql" "errors" _ "expvar" "flag" "fmt" "log" "net" "net/http" "os" "path/filepath" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "github.com/docker/distribution/health" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/signer" "github.com/docker/notary/signer/api" "github.com/docker/notary/signer/keydbstore" "github.com/docker/notary/tuf/data" "github.com/docker/notary/utils" "github.com/docker/notary/version" _ "github.com/go-sql-driver/mysql" "github.com/miekg/pkcs11" "github.com/spf13/viper" "github.com/Sirupsen/logrus" pb "github.com/docker/notary/proto" ) const ( debugAddr = "localhost:8080" dbType = "mysql" envPrefix = "NOTARY_SIGNER" defaultAliasEnv = "DEFAULT_ALIAS" pinCode = "PIN" ) var ( debug bool configFile string mainViper = viper.New() ) func init() { // set default log level to Error mainViper.SetDefault("logging", map[string]interface{}{"level": 2}) mainViper.SetEnvPrefix(envPrefix) mainViper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) mainViper.AutomaticEnv() // Setup flags flag.StringVar(&configFile, "config", "", "Path to configuration file") flag.BoolVar(&debug, "debug", false, "show the version and exit") } func passphraseRetriever(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) { passphrase = mainViper.GetString(strings.ToUpper(alias)) if passphrase == "" { return "", false, errors.New("expected env variable to not be empty: " + alias) } return passphrase, false, nil } // parses and sets up the TLS for the signer http + grpc server func signerTLS(configuration *viper.Viper, printUsage bool) (*tls.Config, error) { certFile := configuration.GetString("server.cert_file") keyFile := configuration.GetString("server.key_file") if certFile == "" || keyFile == "" { if printUsage { usage() } return nil, fmt.Errorf("Certificate and key are mandatory") } clientCAFile := configuration.GetString("server.client_ca_file") tlsConfig, err := utils.ConfigureServerTLS(&utils.ServerTLSOpts{ ServerCertFile: certFile, ServerKeyFile: keyFile, RequireClientAuth: clientCAFile != "", ClientCAFile: clientCAFile, }) if err != nil { return nil, fmt.Errorf("Unable to set up TLS: %s", err.Error()) } return tlsConfig, nil } func main() { flag.Usage = usage flag.Parse() if debug { go debugServer(debugAddr) } // when the signer starts print the version for debugging and issue logs later logrus.Infof("Version: %s, Git commit: %s", version.NotaryVersion, version.GitCommit) filename := filepath.Base(configFile) ext := filepath.Ext(configFile) configPath := filepath.Dir(configFile) mainViper.SetConfigType(strings.TrimPrefix(ext, ".")) mainViper.SetConfigName(strings.TrimSuffix(filename, ext)) mainViper.AddConfigPath(configPath) err := mainViper.ReadInConfig() if err != nil { logrus.Error("Viper Error: ", err.Error()) logrus.Error("Could not read config at ", configFile) os.Exit(1) } logrus.SetLevel(logrus.Level(mainViper.GetInt("logging.level"))) tlsConfig, err := signerTLS(mainViper, true) if err != nil { logrus.Fatalf(err.Error()) } cryptoServices := make(signer.CryptoServiceIndex) configDBType := strings.ToLower(mainViper.GetString("storage.backend")) dbURL := mainViper.GetString("storage.db_url") if configDBType != dbType || dbURL == "" { usage() log.Fatalf("Currently only a MySQL database backend is supported.") } dbSQL, err := sql.Open(configDBType, dbURL) if err != nil { log.Fatalf("failed to open the database: %s, %v", dbURL, err) } defaultAlias := mainViper.GetString(defaultAliasEnv) logrus.Debug("Default Alias: ", defaultAlias) keyStore, err := keydbstore.NewKeyDBStore(passphraseRetriever, defaultAlias, configDBType, dbSQL) if err != nil { log.Fatalf("failed to create a new keydbstore: %v", err) } health.RegisterPeriodicFunc( "DB operational", keyStore.HealthCheck, time.Second*60) cryptoService := cryptoservice.NewCryptoService("", keyStore) cryptoServices[data.ED25519Key] = cryptoService cryptoServices[data.ECDSAKey] = cryptoService //RPC server setup kms := &api.KeyManagementServer{CryptoServices: cryptoServices, HealthChecker: health.CheckStatus} ss := &api.SignerServer{CryptoServices: cryptoServices, HealthChecker: health.CheckStatus} rpcAddr := mainViper.GetString("server.grpc_addr") lis, err := net.Listen("tcp", rpcAddr) if err != nil { log.Fatalf("failed to listen %v", err) } creds := credentials.NewTLS(tlsConfig) opts := []grpc.ServerOption{grpc.Creds(creds)} grpcServer := grpc.NewServer(opts...) pb.RegisterKeyManagementServer(grpcServer, kms) pb.RegisterSignerServer(grpcServer, ss) go grpcServer.Serve(lis) httpAddr := mainViper.GetString("server.http_addr") if httpAddr == "" { log.Fatalf("Server address is required") } //HTTP server setup server := http.Server{ Addr: httpAddr, Handler: api.Handlers(cryptoServices), TLSConfig: tlsConfig, } if debug { log.Println("RPC server listening on", rpcAddr) log.Println("HTTP server listening on", httpAddr) } err = server.ListenAndServeTLS("", "") if err != nil { log.Fatal("HTTPS server failed to start:", err) } } func usage() { log.Println("usage:", os.Args[0], "") flag.PrintDefaults() } // debugServer starts the debug server with pprof, expvar among other // endpoints. The addr should not be exposed externally. For most of these to // work, tls cannot be enabled on the endpoint, so it is generally separate. func debugServer(addr string) { log.Println("Debug server listening on", addr) if err := http.ListenAndServe(addr, nil); err != nil { log.Fatalf("error listening on debug interface: %v", err) } } // SetupHSMEnv is a method that depends on the existences func SetupHSMEnv(libraryPath, pin string) (*pkcs11.Ctx, pkcs11.SessionHandle) { p := pkcs11.New(libraryPath) if p == nil { log.Fatalf("Failed to init library") } if err := p.Initialize(); err != nil { log.Fatalf("Initialize error %s\n", err.Error()) } slots, err := p.GetSlotList(true) if err != nil { log.Fatalf("Failed to list HSM slots %s", err) } // Check to see if we got any slots from the HSM. if len(slots) < 1 { log.Fatalln("No HSM Slots found") } // CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application. // CKF_RW_SESSION: TRUE if the session is read/write; FALSE if the session is read-only session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) if err != nil { log.Fatalf("Failed to Start Session with HSM %s", err) } if err = p.Login(session, pkcs11.CKU_USER, pin); err != nil { log.Fatalf("User PIN %s\n", err.Error()) } return p, session } func cleanup(ctx *pkcs11.Ctx, session pkcs11.SessionHandle) { ctx.Destroy() ctx.Finalize() ctx.CloseSession(session) ctx.Logout(session) } notary-0.1/cmd/notary-signer/main_test.go000066400000000000000000000042141262207326400205510ustar00rootroot00000000000000// +build pkcs11 package main import ( "bytes" "crypto/tls" "fmt" "strings" "testing" "github.com/spf13/viper" "github.com/stretchr/testify/assert" ) const ( Cert = "../../fixtures/notary-signer.crt" Key = "../../fixtures/notary-signer.key" Root = "../../fixtures/root-ca.crt" ) // initializes a viper object with test configuration func configure(jsonConfig []byte) *viper.Viper { config := viper.New() config.SetConfigType("json") config.ReadConfig(bytes.NewBuffer(jsonConfig)) return config } func TestSignerTLSMissingCertAndOrKey(t *testing.T) { configs := []string{ "{}", fmt.Sprintf(`{"cert_file": "%s"}`, Cert), fmt.Sprintf(`{"key_file": "%s"}`, Key), } for _, serverConfig := range configs { config := configure( []byte(fmt.Sprintf(`{"server": %s}`, serverConfig))) tlsConfig, err := signerTLS(config, false) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.Equal(t, "Certificate and key are mandatory", err.Error()) } } // The rest of the functionality of signerTLS depends upon // utils.ConfigureServerTLS, so this test just asserts that if successful, // the correct tls.Config is returned based on all the configuration parameters func TestSignerTLSSuccess(t *testing.T) { keypair, err := tls.LoadX509KeyPair(Cert, Key) assert.NoError(t, err, "Unable to load cert and key for testing") config := fmt.Sprintf( `{"server": {"cert_file": "%s", "key_file": "%s", "client_ca_file": "%s"}}`, Cert, Key, Cert) tlsConfig, err := signerTLS(configure([]byte(config)), false) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.NotNil(t, tlsConfig.ClientCAs) } // The rest of the functionality of signerTLS depends upon // utils.ConfigureServerTLS, so this test just asserts that if it fails, // the error is propogated. func TestSignerTLSFailure(t *testing.T) { config := fmt.Sprintf( `{"server": {"cert_file": "%s", "key_file": "%s", "client_ca_file": "%s"}}`, Cert, Key, "non-existant") tlsConfig, err := signerTLS(configure([]byte(config)), false) assert.Error(t, err) assert.Nil(t, tlsConfig) assert.True(t, strings.Contains(err.Error(), "Unable to set up TLS")) } notary-0.1/cmd/notary/000077500000000000000000000000001262207326400147515ustar00rootroot00000000000000notary-0.1/cmd/notary/cert.go000066400000000000000000000101601262207326400162330ustar00rootroot00000000000000package main import ( "crypto/x509" "math" "os" "time" "github.com/docker/notary/keystoremanager" "github.com/docker/notary/trustmanager" "github.com/spf13/cobra" ) func init() { cmdCert.AddCommand(cmdCertList) cmdCertRemove.Flags().StringVarP(&certRemoveGUN, "gun", "g", "", "Globally unique name to delete certificates for") cmdCertRemove.Flags().BoolVarP(&certRemoveYes, "yes", "y", false, "Answer yes to the removal question (no confirmation)") cmdCert.AddCommand(cmdCertRemove) } var cmdCert = &cobra.Command{ Use: "cert", Short: "Operates on certificates.", Long: `Operations on certificates.`, } var cmdCertList = &cobra.Command{ Use: "list", Short: "Lists certificates.", Long: "Lists root certificates known to notary.", Run: certList, } var certRemoveGUN string var certRemoveYes bool var cmdCertRemove = &cobra.Command{ Use: "remove [ certID ]", Short: "Removes the certificate with the given cert ID.", Long: "Remove the certificate with the given cert ID from the local host.", Run: certRemove, } // certRemove deletes a certificate given a cert ID or a gun func certRemove(cmd *cobra.Command, args []string) { // If the user hasn't provided -g with a gun, or a cert ID, show usage // If the user provided -g and a cert ID, also show usage if (len(args) < 1 && certRemoveGUN == "") || (len(args) > 0 && certRemoveGUN != "") { cmd.Usage() fatalf("Must specify the cert ID or the GUN of the certificates to remove") } parseConfig() trustDir := mainViper.GetString("trust_dir") keyStoreManager, err := keystoremanager.NewKeyStoreManager(trustDir) if err != nil { fatalf("Failed to create a new truststore manager with directory: %s", trustDir) } var certsToRemove []*x509.Certificate // If there is no GUN, we expect a cert ID if certRemoveGUN == "" { certID := args[0] // This is an invalid ID if len(certID) != idSize { fatalf("Invalid certificate ID provided: %s", certID) } // Attempt to find this certificates cert, err := keyStoreManager.TrustedCertificateStore().GetCertificateByCertID(certID) if err != nil { fatalf("Unable to retrieve certificate with cert ID: %s", certID) } certsToRemove = append(certsToRemove, cert) } else { // We got the -g flag, it's a GUN certs, err := keyStoreManager.TrustedCertificateStore().GetCertificatesByCN(certRemoveGUN) if err != nil { fatalf("%v", err) } certsToRemove = append(certsToRemove, certs...) } // List all the keys about to be removed cmd.Printf("The following certificates will be removed:\n\n") for _, cert := range certsToRemove { // This error can't occur because we're getting certs off of an // x509 store that indexes by ID. certID, _ := trustmanager.FingerprintCert(cert) cmd.Printf("%s - %s\n", cert.Subject.CommonName, certID) } cmd.Println("\nAre you sure you want to remove these certificates? (yes/no)") // Ask for confirmation before removing certificates, unless -y is provided if !certRemoveYes { confirmed := askConfirm() if !confirmed { fatalf("Aborting action.") } } // Remove all the certs for _, cert := range certsToRemove { err = keyStoreManager.TrustedCertificateStore().RemoveCert(cert) if err != nil { fatalf("Failed to remove root certificate for %s", cert.Subject.CommonName) } } } func certList(cmd *cobra.Command, args []string) { if len(args) > 0 { cmd.Usage() os.Exit(1) } parseConfig() trustDir := mainViper.GetString("trust_dir") keyStoreManager, err := keystoremanager.NewKeyStoreManager(trustDir) if err != nil { fatalf("Failed to create a new truststore manager with directory: %s", trustDir) } cmd.Println("") cmd.Println("# Trusted Certificates:") trustedCerts := keyStoreManager.TrustedCertificateStore().GetCertificates() for _, c := range trustedCerts { printCert(cmd, c) } } func printCert(cmd *cobra.Command, cert *x509.Certificate) { timeDifference := cert.NotAfter.Sub(time.Now()) certID, err := trustmanager.FingerprintCert(cert) if err != nil { fatalf("Could not fingerprint certificate: %v", err) } cmd.Printf("%s %s (expires in: %v days)\n", cert.Subject.CommonName, certID, math.Floor(timeDifference.Hours()/24)) } notary-0.1/cmd/notary/config.json000066400000000000000000000001361262207326400171110ustar00rootroot00000000000000{ "remote_server": { "url": "https://notary-server:4443", "root_ca": "root-ca.crt" } } notary-0.1/cmd/notary/integration_nonpkcs11_test.go000066400000000000000000000012701262207326400225570ustar00rootroot00000000000000// +build !pkcs11 package main import ( "testing" "github.com/docker/notary/passphrase" ) func rootOnHardware() bool { return false } // Per-test set up that returns a cleanup function. This set up changes the // passphrase retriever to always produce a constant passphrase func setUp(t *testing.T) func() { oldRetriever := retriever var fake = func(k, a string, c bool, n int) (string, bool, error) { return testPassphrase, false, nil } retriever = fake getRetriever = func() passphrase.Retriever { return fake } return func() { retriever = oldRetriever getRetriever = getPassphraseRetriever } } // no-op func verifyRootKeyOnHardware(t *testing.T, rootKeyID string) {} notary-0.1/cmd/notary/integration_pkcs11_test.go000066400000000000000000000034311262207326400220450ustar00rootroot00000000000000// +build pkcs11 package main import ( "testing" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager/yubikey" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) var rootOnHardware = yubikey.YubikeyAccessible // Per-test set up that returns a cleanup function. This set up: // - changes the passphrase retriever to always produce a constant passphrase // - disables touch on yubikeys // - deletes all keys on the yubikey func setUp(t *testing.T) func() { oldRetriever := retriever var fake = func(k, a string, c bool, n int) (string, bool, error) { if k == "Yubikey" { return oldRetriever(k, a, c, n) } return testPassphrase, false, nil } retriever = fake getRetriever = func() passphrase.Retriever { return fake } yubikey.SetYubikeyKeyMode(yubikey.KeymodeNone) // //we're just removing keys here, so nil is fine s, err := yubikey.NewYubiKeyStore(nil, retriever) assert.NoError(t, err) for k := range s.ListKeys() { err := s.RemoveKey(k) assert.NoError(t, err) } return func() { retriever = oldRetriever getRetriever = getPassphraseRetriever yubikey.SetYubikeyKeyMode(yubikey.KeymodeTouch | yubikey.KeymodePinOnce) } } // ensures that the root is actually on the yubikey - this makes sure the // commands are hooked up to interact with the yubikey, rather than right files // on disk func verifyRootKeyOnHardware(t *testing.T, rootKeyID string) { // do not bother verifying if there is no yubikey available if yubikey.YubikeyAccessible() { // //we're just getting keys here, so nil is fine s, err := yubikey.NewYubiKeyStore(nil, retriever) assert.NoError(t, err) privKey, role, err := s.GetKey(rootKeyID) assert.NoError(t, err) assert.NotNil(t, privKey) assert.Equal(t, data.CanonicalRootRole, role) } } notary-0.1/cmd/notary/integration_test.go000066400000000000000000000403741262207326400206720ustar00rootroot00000000000000// Actually start up a notary server and run through basic TUF and key // interactions via the client. // Note - if using Yubikey, retrieving pins/touch doesn't seem to work right // when running in the midst of all tests. package main import ( "bytes" "crypto/rand" "io/ioutil" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/Sirupsen/logrus" ctxu "github.com/docker/distribution/context" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/server" "github.com/docker/notary/server/storage" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) var testPassphrase = "passphrase" // run a command and return the output as a string func runCommand(t *testing.T, tempDir string, args ...string) (string, error) { // Using a new viper and Command so we don't have state between command invocations mainViper = viper.New() cmd := &cobra.Command{} setupCommand(cmd) b := new(bytes.Buffer) // Create an empty config file so we don't load the default on ~/.notary/config.json configFile := filepath.Join(tempDir, "config.json") cmd.SetArgs(append([]string{"-c", configFile, "-d", tempDir}, args...)) cmd.SetOutput(b) retErr := cmd.Execute() output, err := ioutil.ReadAll(b) assert.NoError(t, err) return string(output), retErr } // makes a testing notary-server func setupServer() *httptest.Server { // Set up server ctx := context.WithValue( context.Background(), "metaStore", storage.NewMemStorage()) ctx = context.WithValue(ctx, "keyAlgorithm", data.ECDSAKey) // Eat the logs instead of spewing them out var b bytes.Buffer l := logrus.New() l.Out = &b ctx = ctxu.WithLogger(ctx, logrus.NewEntry(l)) cryptoService := cryptoservice.NewCryptoService( "", trustmanager.NewKeyMemoryStore(retriever)) return httptest.NewServer(server.RootHandler(nil, ctx, cryptoService)) } // Initializes a repo, adds a target, publishes the target, lists the target, // verifies the target, and then removes the target. func TestClientTufInteraction(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) server := setupServer() defer server.Close() tempFile, err := ioutil.TempFile("/tmp", "targetfile") assert.NoError(t, err) tempFile.Close() defer os.Remove(tempFile.Name()) var ( output string target = "sdgkadga" ) // -- tests -- // init repo _, err = runCommand(t, tempDir, "-s", server.URL, "init", "gun") assert.NoError(t, err) // add a target _, err = runCommand(t, tempDir, "add", "gun", target, tempFile.Name()) assert.NoError(t, err) // check status - see target output, err = runCommand(t, tempDir, "status", "gun") assert.NoError(t, err) assert.True(t, strings.Contains(output, target)) // publish repo _, err = runCommand(t, tempDir, "-s", server.URL, "publish", "gun") assert.NoError(t, err) // check status - no targets output, err = runCommand(t, tempDir, "status", "gun") assert.NoError(t, err) assert.False(t, strings.Contains(string(output), target)) // list repo - see target output, err = runCommand(t, tempDir, "-s", server.URL, "list", "gun") assert.NoError(t, err) assert.True(t, strings.Contains(string(output), target)) // lookup target and repo - see target output, err = runCommand(t, tempDir, "-s", server.URL, "lookup", "gun", target) assert.NoError(t, err) assert.True(t, strings.Contains(string(output), target)) // verify repo - empty file output, err = runCommand(t, tempDir, "-s", server.URL, "verify", "gun", target) assert.NoError(t, err) // remove target _, err = runCommand(t, tempDir, "remove", "gun", target) assert.NoError(t, err) // publish repo _, err = runCommand(t, tempDir, "-s", server.URL, "publish", "gun") assert.NoError(t, err) // list repo - don't see target output, err = runCommand(t, tempDir, "-s", server.URL, "list", "gun") assert.NoError(t, err) assert.False(t, strings.Contains(string(output), target)) } // Splits a string into lines, and returns any lines that are not empty ( // striped of whitespace) func splitLines(chunk string) []string { splitted := strings.Split(strings.TrimSpace(chunk), "\n") var results []string for _, line := range splitted { line := strings.TrimSpace(line) if line != "" { results = append(results, line) } } return results } // List keys, parses the output, and returns the unique key IDs as an array // of root key IDs and an array of signing key IDs. Output expected looks like: // ROLE GUN KEY ID LOCATION // ---------------------------------------------------------------- // root 8bd63a896398b558ac... file (.../private) // snapshot repo e9e9425cd9a85fc7a5... file (.../private) // targets repo f5b84e2d92708c5acb... file (.../private) func getUniqueKeys(t *testing.T, tempDir string) ([]string, []string) { output, err := runCommand(t, tempDir, "key", "list") assert.NoError(t, err) lines := splitLines(output) if len(lines) == 1 && lines[0] == "No signing keys found." { return []string{}, []string{} } if len(lines) < 3 { // 2 lines of header, at least 1 line with keys t.Logf("This output is not what is expected by the test:\n%s", output) } var ( rootMap = make(map[string]bool) nonrootMap = make(map[string]bool) root []string nonroot []string ) // first two lines are header for _, line := range lines[2:] { parts := strings.Fields(line) var ( placeToGo map[string]bool keyID string ) if strings.TrimSpace(parts[0]) == "root" { // no gun, so there are only 3 fields placeToGo, keyID = rootMap, parts[1] } else { // gun comes between role and key ID placeToGo, keyID = nonrootMap, parts[2] } // keys are 32-chars long (32 byte shasum, hex-encoded) assert.Len(t, keyID, 64) placeToGo[keyID] = true } for k := range rootMap { root = append(root, k) } for k := range nonrootMap { nonroot = append(nonroot, k) } return root, nonroot } // List keys, parses the output, and asserts something about the number of root // keys and number of signing keys, as well as returning them. func assertNumKeys(t *testing.T, tempDir string, numRoot, numSigning int, rootOnDisk bool) ([]string, []string) { root, signing := getUniqueKeys(t, tempDir) assert.Len(t, root, numRoot) assert.Len(t, signing, numSigning) for _, rootKeyID := range root { _, err := os.Stat(filepath.Join( tempDir, "private", "root_keys", rootKeyID+"_root.key")) // os.IsExist checks to see if the error is because a file already // exist, and hence doesn't actually the right funciton to use here assert.Equal(t, rootOnDisk, !os.IsNotExist(err)) // this function is declared is in the build-tagged setup files verifyRootKeyOnHardware(t, rootKeyID) } return root, signing } // Adds the given target to the gun, publishes it, and lists it to ensure that // it appears. Returns the listing output. func assertSuccessfullyPublish( t *testing.T, tempDir, url, gun, target, fname string) string { _, err := runCommand(t, tempDir, "add", gun, target, fname) assert.NoError(t, err) _, err = runCommand(t, tempDir, "-s", url, "publish", gun) assert.NoError(t, err) output, err := runCommand(t, tempDir, "-s", url, "list", gun) assert.NoError(t, err) assert.True(t, strings.Contains(string(output), target)) return output } // Tests root key generation and key rotation func TestClientKeyGenerationRotation(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) tempfiles := make([]string, 2) for i := 0; i < 2; i++ { tempFile, err := ioutil.TempFile("/tmp", "targetfile") assert.NoError(t, err) tempFile.Close() tempfiles[i] = tempFile.Name() defer os.Remove(tempFile.Name()) } server := setupServer() defer server.Close() var target = "sdgkadga" // -- tests -- // starts out with no keys assertNumKeys(t, tempDir, 0, 0, true) // generate root key produces a single root key and no other keys _, err := runCommand(t, tempDir, "key", "generate", data.ECDSAKey) assert.NoError(t, err) assertNumKeys(t, tempDir, 1, 0, true) // initialize a repo, should have signing keys and no new root key _, err = runCommand(t, tempDir, "-s", server.URL, "init", "gun") assert.NoError(t, err) origRoot, origSign := assertNumKeys(t, tempDir, 1, 2, true) // publish using the original keys assertSuccessfullyPublish(t, tempDir, server.URL, "gun", target, tempfiles[0]) // rotate the signing keys _, err = runCommand(t, tempDir, "key", "rotate", "gun") assert.NoError(t, err) root, sign := assertNumKeys(t, tempDir, 1, 4, true) assert.Equal(t, origRoot[0], root[0]) // there should be the new keys and the old keys for _, origKey := range origSign { found := false for _, key := range sign { if key == origKey { found = true } } assert.True(t, found, "Old key not found in list of old and new keys") } // publish the key rotation _, err = runCommand(t, tempDir, "-s", server.URL, "publish", "gun") assert.NoError(t, err) root, sign = assertNumKeys(t, tempDir, 1, 2, true) assert.Equal(t, origRoot[0], root[0]) // just do a cursory rotation check that the keys aren't equal anymore for _, origKey := range origSign { for _, key := range sign { assert.NotEqual( t, key, origKey, "One of the signing keys was not removed") } } // publish using the new keys output := assertSuccessfullyPublish( t, tempDir, server.URL, "gun", target+"2", tempfiles[1]) // assert that the previous target is sitll there assert.True(t, strings.Contains(string(output), target)) } // Tests backup/restore root+signing keys - repo with restored keys should be // able to publish successfully func TestClientKeyBackupAndRestore(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() dirs := make([]string, 3) for i := 0; i < 3; i++ { tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) dirs[i] = tempDir } tempfiles := make([]string, 2) for i := 0; i < 2; i++ { tempFile, err := ioutil.TempFile("/tmp", "tempfile") assert.NoError(t, err) tempFile.Close() tempfiles[i] = tempFile.Name() defer os.Remove(tempFile.Name()) } server := setupServer() defer server.Close() var ( target = "sdgkadga" err error ) // create two repos and publish a target for _, gun := range []string{"gun1", "gun2"} { _, err = runCommand(t, dirs[0], "-s", server.URL, "init", gun) assert.NoError(t, err) assertSuccessfullyPublish( t, dirs[0], server.URL, gun, target, tempfiles[0]) } assertNumKeys(t, dirs[0], 1, 4, true) // -- tests -- zipfile := tempfiles[0] + ".zip" defer os.Remove(zipfile) // backup then restore all keys _, err = runCommand(t, dirs[0], "key", "backup", zipfile) assert.NoError(t, err) _, err = runCommand(t, dirs[1], "key", "restore", zipfile) assert.NoError(t, err) assertNumKeys(t, dirs[1], 1, 4, !rootOnHardware()) // all keys should be there // can list and publish to both repos using restored keys for _, gun := range []string{"gun1", "gun2"} { output, err := runCommand(t, dirs[1], "-s", server.URL, "list", gun) assert.NoError(t, err) assert.True(t, strings.Contains(string(output), target)) assertSuccessfullyPublish( t, dirs[1], server.URL, gun, target+"2", tempfiles[1]) } // backup and restore keys for one gun _, err = runCommand(t, dirs[0], "key", "backup", zipfile, "-g", "gun1") assert.NoError(t, err) _, err = runCommand(t, dirs[2], "key", "restore", zipfile) assert.NoError(t, err) // this function is declared is in the build-tagged setup files if rootOnHardware() { // hardware root is still present, and the key will ONLY be on hardware // and not on disk assertNumKeys(t, dirs[2], 1, 2, false) } else { // only 2 signing keys should be there, and no root key assertNumKeys(t, dirs[2], 0, 2, true) } } // Generate a root key and export the root key only. Return the key ID // exported. func exportRoot(t *testing.T, exportTo string) string { tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) // generate root key produces a single root key and no other keys _, err := runCommand(t, tempDir, "key", "generate", data.ECDSAKey) assert.NoError(t, err) oldRoot, _ := assertNumKeys(t, tempDir, 1, 0, true) // export does not require a password oldRetriever := retriever retriever = nil defer func() { // but import will, later retriever = oldRetriever }() _, err = runCommand( t, tempDir, "key", "export", oldRoot[0], exportTo) assert.NoError(t, err) return oldRoot[0] } // Tests import/export root key only func TestClientKeyImportExportRootOnly(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) server := setupServer() defer server.Close() var ( target = "sdgkadga" rootKeyID string ) tempFile, err := ioutil.TempFile("/tmp", "pemfile") assert.NoError(t, err) // close later, because we might need to write to it defer os.Remove(tempFile.Name()) // -- tests -- if rootOnHardware() { t.Log("Cannot export a key from hardware. Will generate one to import.") privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) pemBytes, err := trustmanager.EncryptPrivateKey(privKey, testPassphrase) assert.NoError(t, err) nBytes, err := tempFile.Write(pemBytes) assert.NoError(t, err) tempFile.Close() assert.Equal(t, len(pemBytes), nBytes) rootKeyID = privKey.ID() } else { tempFile.Close() rootKeyID = exportRoot(t, tempFile.Name()) } // import the key _, err = runCommand(t, tempDir, "key", "import", tempFile.Name()) assert.NoError(t, err) // if there is hardware available, root will only be on hardware, and not // on disk newRoot, _ := assertNumKeys(t, tempDir, 1, 0, !rootOnHardware()) assert.Equal(t, rootKeyID, newRoot[0]) // Just to make sure, init a repo and publish _, err = runCommand(t, tempDir, "-s", server.URL, "init", "gun") assert.NoError(t, err) assertNumKeys(t, tempDir, 1, 2, !rootOnHardware()) assertSuccessfullyPublish( t, tempDir, server.URL, "gun", target, tempFile.Name()) } func assertNumCerts(t *testing.T, tempDir string, expectedNum int) []string { output, err := runCommand(t, tempDir, "cert", "list") assert.NoError(t, err) certs := splitLines( strings.TrimPrefix(strings.TrimSpace(output), "# Trusted Certificates:")) assert.Len(t, certs, expectedNum) return certs } // TestClientCertInteraction func TestClientCertInteraction(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) server := setupServer() defer server.Close() // -- tests -- _, err := runCommand(t, tempDir, "-s", server.URL, "init", "gun1") assert.NoError(t, err) _, err = runCommand(t, tempDir, "-s", server.URL, "init", "gun2") assert.NoError(t, err) certs := assertNumCerts(t, tempDir, 2) // remove certs for one gun _, err = runCommand(t, tempDir, "cert", "remove", "-g", "gun1", "-y") assert.NoError(t, err) certs = assertNumCerts(t, tempDir, 1) // remove a single cert certID := strings.TrimSpace(strings.Split(certs[0], " ")[1]) // passing an empty gun here because the string for the previous gun has // has already been stored (a drawback of running these commands without) // shelling out _, err = runCommand(t, tempDir, "cert", "remove", certID, "-y", "-g", "") assert.NoError(t, err) assertNumCerts(t, tempDir, 0) } // Tests default root key generation func TestDefaultRootKeyGeneration(t *testing.T) { // -- setup -- cleanup := setUp(t) defer cleanup() tempDir := tempDirWithConfig(t, "{}") defer os.RemoveAll(tempDir) // -- tests -- // starts out with no keys assertNumKeys(t, tempDir, 0, 0, true) // generate root key with no algorithm produces a single ECDSA root key and no other keys _, err := runCommand(t, tempDir, "key", "generate") assert.NoError(t, err) assertNumKeys(t, tempDir, 1, 0, true) } func tempDirWithConfig(t *testing.T, config string) string { tempDir, err := ioutil.TempDir("/tmp", "repo") assert.NoError(t, err) err = ioutil.WriteFile(filepath.Join(tempDir, "config.json"), []byte(config), 0644) assert.NoError(t, err) return tempDir } func TestMain(m *testing.M) { if testing.Short() { // skip os.Exit(0) } os.Exit(m.Run()) } notary-0.1/cmd/notary/keys.go000066400000000000000000000350461262207326400162630ustar00rootroot00000000000000package main import ( "archive/zip" "bufio" "fmt" "io" "os" "path/filepath" "sort" "strconv" "strings" notaryclient "github.com/docker/notary/client" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" ) func init() { cmdKey.AddCommand(cmdKeyList) cmdKey.AddCommand(cmdKeyGenerateRootKey) cmdKeysBackup.Flags().StringVarP(&keysExportGUN, "gun", "g", "", "Globally Unique Name to export keys for") cmdKey.AddCommand(cmdKeysBackup) cmdKey.AddCommand(cmdKeyExportRoot) cmdKeyExportRoot.Flags().BoolVarP(&keysExportRootChangePassphrase, "change-passphrase", "p", false, "Set a new passphrase for the key being exported") cmdKey.AddCommand(cmdKeysRestore) cmdKey.AddCommand(cmdKeyImportRoot) cmdKey.AddCommand(cmdRotateKey) cmdKey.AddCommand(cmdKeyRemove) } var cmdKey = &cobra.Command{ Use: "key", Short: "Operates on keys.", Long: `Operations on private keys.`, } var cmdKeyList = &cobra.Command{ Use: "list", Short: "Lists keys.", Long: "Lists all keys known to notary.", Run: keysList, } var cmdRotateKey = &cobra.Command{ Use: "rotate [ GUN ]", Short: "Rotate all the signing (non-root) keys for the given Globally Unique Name.", Long: "Removes all old signing (non-root) keys for the given Globally Unique Name, and generates new ones. This only makes local changes - please use then `notary publish` to push the key rotation changes to the remote server.", Run: keysRotate, } var cmdKeyGenerateRootKey = &cobra.Command{ Use: "generate [ algorithm ]", Short: "Generates a new root key with a given algorithm.", Long: "Generates a new root key with a given algorithm. If hardware key storage (e.g. a Yubikey) is available, the key will be stored both on hardware and on disk (so that it can be backed up). Please make sure to back up and then remove this on-key disk immediately afterwards.", Run: keysGenerateRootKey, } var keysExportGUN string var cmdKeysBackup = &cobra.Command{ Use: "backup [ zipfilename ]", Short: "Backs up all your on-disk keys to a ZIP file.", Long: "Backs up all of your accessible of keys. The keys are reencrypted with a new passphrase. The output is a ZIP file. If the --gun option is passed, only signing keys and no root keys will be backed up. Does not work on keys that are only in hardware (e.g. Yubikeys).", Run: keysBackup, } var keysExportRootChangePassphrase bool var cmdKeyExportRoot = &cobra.Command{ Use: "export [ keyID ] [ pemfilename ]", Short: "Export a root key on disk to a PEM file.", Long: "Exports a single root key on disk, without reencrypting. The output is a PEM file. Does not work on keys that are only in hardware (e.g. Yubikeys).", Run: keysExportRoot, } var cmdKeysRestore = &cobra.Command{ Use: "restore [ zipfilename ]", Short: "Restore multiple keys from a ZIP file.", Long: "Restores one or more keys from a ZIP file. If hardware key storage (e.g. a Yubikey) is available, root keys will be imported into the hardware, but not backed up to disk in the same location as the other, non-root keys.", Run: keysRestore, } var cmdKeyImportRoot = &cobra.Command{ Use: "import [ pemfilename ]", Short: "Imports a root key from a PEM file.", Long: "Imports a single root key from a PEM file. If a hardware key storage (e.g. Yubikey) is available, the root key will be imported into the hardware but not backed up on disk again.", Run: keysImportRoot, } var cmdKeyRemove = &cobra.Command{ Use: "remove [ keyID ]", Short: "Removes the key with the given keyID.", Long: "Removes the key with the given keyID. If the key is stored in more than one location, you will be asked which one to remove.", Run: keyRemove, } func truncateWithEllipsis(str string, maxWidth int, leftTruncate bool) string { if len(str) <= maxWidth { return str } if leftTruncate { return fmt.Sprintf("...%s", str[len(str)-(maxWidth-3):]) } return fmt.Sprintf("%s...", str[:maxWidth-3]) } const ( maxGUNWidth = 25 maxLocWidth = 40 ) type keyInfo struct { gun string // assumption that this is "" if role is root role string keyID string location string } // We want to sort by gun, then by role, then by keyID, then by location // In the case of a root role, then there is no GUN, and a root role comes // first. type keyInfoSorter []keyInfo func (k keyInfoSorter) Len() int { return len(k) } func (k keyInfoSorter) Swap(i, j int) { k[i], k[j] = k[j], k[i] } func (k keyInfoSorter) Less(i, j int) bool { // special-case role if k[i].role != k[j].role { if k[i].role == data.CanonicalRootRole { return true } if k[j].role == data.CanonicalRootRole { return false } // otherwise, neither of them are root, they're just different, so // go with the traditional sort order. } // sort order is GUN, role, keyID, location. orderedI := []string{k[i].gun, k[i].role, k[i].keyID, k[i].location} orderedJ := []string{k[j].gun, k[j].role, k[j].keyID, k[j].location} for x := 0; x < 4; x++ { switch { case orderedI[x] < orderedJ[x]: return true case orderedI[x] > orderedJ[x]: return false } // continue on and evalulate the next item } // this shouldn't happen - that means two values are exactly equal return false } // Given a list of KeyStores in order of listing preference, pretty-prints the // root keys and then the signing keys. func prettyPrintKeys(keyStores []trustmanager.KeyStore, writer io.Writer) { var info []keyInfo for _, store := range keyStores { for keyPath, role := range store.ListKeys() { gun := "" if role != data.CanonicalRootRole { gun = filepath.Dir(keyPath) } info = append(info, keyInfo{ role: role, location: store.Name(), gun: gun, keyID: filepath.Base(keyPath), }) } } if len(info) == 0 { writer.Write([]byte("No signing keys found.\n")) return } sort.Stable(keyInfoSorter(info)) table := tablewriter.NewWriter(writer) table.SetHeader([]string{"ROLE", "GUN", "KEY ID", "LOCATION"}) table.SetBorder(false) table.SetColumnSeparator(" ") table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("-") table.SetAutoWrapText(false) for _, oneKeyInfo := range info { table.Append([]string{ oneKeyInfo.role, truncateWithEllipsis(oneKeyInfo.gun, maxGUNWidth, true), oneKeyInfo.keyID, truncateWithEllipsis(oneKeyInfo.location, maxLocWidth, true), }) } table.Render() } func keysList(cmd *cobra.Command, args []string) { if len(args) > 0 { cmd.Usage() os.Exit(1) } parseConfig() stores := getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, true) cmd.Println("") prettyPrintKeys(stores, cmd.Out()) cmd.Println("") } func keysGenerateRootKey(cmd *cobra.Command, args []string) { // We require one or no arguments (since we have a default value), but if the // user passes in more than one argument, we error out. if len(args) > 1 { cmd.Usage() fatalf("Please provide only one Algorithm as an argument to generate (rsa, ecdsa)") } parseConfig() // If no param is given to generate, generates an ecdsa key by default algorithm := data.ECDSAKey // If we were provided an argument lets attempt to use it as an algorithm if len(args) > 0 { algorithm = args[0] } allowedCiphers := map[string]bool{ data.ECDSAKey: true, data.RSAKey: true, } if !allowedCiphers[strings.ToLower(algorithm)] { fatalf("Algorithm not allowed, possible values are: RSA, ECDSA") } parseConfig() cs := cryptoservice.NewCryptoService( "", getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, true)..., ) pubKey, err := cs.Create(data.CanonicalRootRole, algorithm) if err != nil { fatalf("Failed to create a new root key: %v", err) } cmd.Printf("Generated new %s root key with keyID: %s\n", algorithm, pubKey.ID()) } // keysBackup exports a collection of keys to a ZIP file func keysBackup(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify output filename for export") } parseConfig() exportFilename := args[0] cs := cryptoservice.NewCryptoService( "", getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, false)..., ) exportFile, err := os.Create(exportFilename) if err != nil { fatalf("Error creating output file: %v", err) } // Must use a different passphrase retriever to avoid caching the // unlocking passphrase and reusing that. exportRetriever := getRetriever() if keysExportGUN != "" { err = cs.ExportKeysByGUN(exportFile, keysExportGUN, exportRetriever) } else { err = cs.ExportAllKeys(exportFile, exportRetriever) } exportFile.Close() if err != nil { os.Remove(exportFilename) fatalf("Error exporting keys: %v", err) } } // keysExportRoot exports a root key by ID to a PEM file func keysExportRoot(cmd *cobra.Command, args []string) { if len(args) < 2 { cmd.Usage() fatalf("Must specify key ID and output filename for export") } parseConfig() keyID := args[0] exportFilename := args[1] if len(keyID) != idSize { fatalf("Please specify a valid root key ID") } parseConfig() cs := cryptoservice.NewCryptoService( "", getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, false)..., ) exportFile, err := os.Create(exportFilename) if err != nil { fatalf("Error creating output file: %v", err) } if keysExportRootChangePassphrase { // Must use a different passphrase retriever to avoid caching the // unlocking passphrase and reusing that. exportRetriever := getRetriever() err = cs.ExportRootKeyReencrypt(exportFile, keyID, exportRetriever) } else { err = cs.ExportRootKey(exportFile, keyID) } exportFile.Close() if err != nil { os.Remove(exportFilename) fatalf("Error exporting root key: %v", err) } } // keysRestore imports keys from a ZIP file func keysRestore(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify input filename for import") } importFilename := args[0] parseConfig() cs := cryptoservice.NewCryptoService( "", getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, true)..., ) zipReader, err := zip.OpenReader(importFilename) if err != nil { fatalf("Opening file for import: %v", err) } defer zipReader.Close() err = cs.ImportKeysZip(zipReader.Reader) if err != nil { fatalf("Error importing keys: %v", err) } } // keysImportRoot imports a root key from a PEM file func keysImportRoot(cmd *cobra.Command, args []string) { if len(args) != 1 { cmd.Usage() fatalf("Must specify input filename for import") } parseConfig() cs := cryptoservice.NewCryptoService( "", getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, true)..., ) importFilename := args[0] importFile, err := os.Open(importFilename) if err != nil { fatalf("Opening file for import: %v", err) } defer importFile.Close() err = cs.ImportRootKey(importFile) if err != nil { fatalf("Error importing root key: %v", err) } } func printKey(cmd *cobra.Command, keyPath, alias, loc string) { keyID := filepath.Base(keyPath) gun := filepath.Dir(keyPath) cmd.Printf("%s - %s - %s - %s\n", gun, alias, keyID, loc) } func keysRotate(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify a GUN and target") } parseConfig() gun := args[0] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, remoteTrustServer, nil, retriever) if err != nil { fatalf(err.Error()) } if err := nRepo.RotateKeys(); err != nil { fatalf(err.Error()) } } func removeKeyInteractively(keyStores []trustmanager.KeyStore, keyID string, in io.Reader, out io.Writer) error { var foundKeys [][]string var storesByIndex []trustmanager.KeyStore for _, store := range keyStores { for keypath, role := range store.ListKeys() { if filepath.Base(keypath) == keyID { foundKeys = append(foundKeys, []string{keypath, role, store.Name()}) storesByIndex = append(storesByIndex, store) } } } if len(foundKeys) == 0 { return fmt.Errorf("No key with ID %s found.", keyID) } readIn := bufio.NewReader(in) if len(foundKeys) > 1 { for { // ask the user for which key to delete fmt.Fprintf(out, "Found the following matching keys:\n") for i, info := range foundKeys { fmt.Fprintf(out, "\t%d. %s: %s (%s)\n", i+1, info[0], info[1], info[2]) } fmt.Fprint(out, "Which would you like to delete? Please enter a number: ") result, err := readIn.ReadBytes('\n') if err != nil { return err } index, err := strconv.Atoi(strings.TrimSpace(string(result))) if err != nil || index > len(foundKeys) || index < 1 { fmt.Fprintf(out, "\nInvalid choice: %s\n", string(result)) continue } foundKeys = [][]string{foundKeys[index-1]} storesByIndex = []trustmanager.KeyStore{storesByIndex[index-1]} fmt.Fprintln(out, "") break } } // Now the length must be 1 - ask for confirmation. keyDescription := fmt.Sprintf("%s (role %s) from %s", foundKeys[0][0], foundKeys[0][1], foundKeys[0][2]) fmt.Fprintf(out, "Are you sure you want to remove %s? [Y/n] ", keyDescription) result, err := readIn.ReadBytes('\n') if err != nil { return err } yesno := strings.ToLower(strings.TrimSpace(string(result))) if !strings.HasPrefix("yes", yesno) && yesno != "" { fmt.Fprintln(out, "\nAborting action.") return nil } err = storesByIndex[0].RemoveKey(foundKeys[0][0]) if err != nil { return err } fmt.Fprintf(out, "\nDeleted %s.\n", keyDescription) return nil } // keyRemove deletes a private key based on ID func keyRemove(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("must specify the key ID of the key to remove") } parseConfig() keyID := args[0] // This is an invalid ID if len(keyID) != idSize { fatalf("invalid key ID provided: %s", keyID) } stores := getKeyStores(cmd, mainViper.GetString("trust_dir"), retriever, true) cmd.Println("") err := removeKeyInteractively(stores, keyID, os.Stdin, cmd.Out()) cmd.Println("") if err != nil { fatalf(err.Error()) } } func getKeyStores(cmd *cobra.Command, directory string, ret passphrase.Retriever, withHardware bool) []trustmanager.KeyStore { fileKeyStore, err := trustmanager.NewKeyFileStore(directory, ret) if err != nil { fatalf("Failed to create private key store in directory: %s", directory) } ks := []trustmanager.KeyStore{fileKeyStore} if withHardware { yubiStore, err := getYubiKeyStore(fileKeyStore, ret) if err == nil && yubiStore != nil { // Note that the order is important, since we want to prioritize // the yubikey store ks = []trustmanager.KeyStore{yubiStore, fileKeyStore} } } return ks } notary-0.1/cmd/notary/keys_nonpkcs11.go000066400000000000000000000004711262207326400201520ustar00rootroot00000000000000// +build !pkcs11 package main import ( "errors" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" ) func getYubiKeyStore(fileKeyStore trustmanager.KeyStore, ret passphrase.Retriever) (trustmanager.KeyStore, error) { return nil, errors.New("Not built with hardware support") } notary-0.1/cmd/notary/keys_pkcs11.go000066400000000000000000000005261262207326400174400ustar00rootroot00000000000000// +build pkcs11 package main import ( "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/trustmanager/yubikey" ) func getYubiKeyStore(fileKeyStore trustmanager.KeyStore, ret passphrase.Retriever) (trustmanager.KeyStore, error) { return yubikey.NewYubiKeyStore(fileKeyStore, ret) } notary-0.1/cmd/notary/keys_test.go000066400000000000000000000245071262207326400173220ustar00rootroot00000000000000package main import ( "bytes" "crypto/rand" "fmt" "io/ioutil" "reflect" "sort" "strings" "testing" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) var ret = passphrase.ConstantRetriever("pass") func TestTruncateWithEllipsis(t *testing.T) { digits := "1234567890" // do not truncate assert.Equal(t, truncateWithEllipsis(digits, 10, true), digits) assert.Equal(t, truncateWithEllipsis(digits, 10, false), digits) assert.Equal(t, truncateWithEllipsis(digits, 11, true), digits) assert.Equal(t, truncateWithEllipsis(digits, 11, false), digits) // left and right truncate assert.Equal(t, truncateWithEllipsis(digits, 8, true), "...67890") assert.Equal(t, truncateWithEllipsis(digits, 8, false), "12345...") } func TestKeyInfoSorter(t *testing.T) { expected := []keyInfo{ {role: data.CanonicalRootRole, gun: "", keyID: "a", location: "i"}, {role: data.CanonicalRootRole, gun: "", keyID: "a", location: "j"}, {role: data.CanonicalRootRole, gun: "", keyID: "z", location: "z"}, {role: "a", gun: "a", keyID: "a", location: "y"}, {role: "b", gun: "a", keyID: "a", location: "y"}, {role: "b", gun: "a", keyID: "b", location: "y"}, {role: "b", gun: "a", keyID: "b", location: "z"}, {role: "a", gun: "b", keyID: "a", location: "z"}, } jumbled := make([]keyInfo, len(expected)) // randomish indices for j, e := range []int{3, 6, 1, 4, 0, 7, 5, 2} { jumbled[j] = expected[e] } sort.Sort(keyInfoSorter(jumbled)) assert.True(t, reflect.DeepEqual(expected, jumbled), fmt.Sprintf("Expected %v, Got %v", expected, jumbled)) } type otherMemoryStore struct { trustmanager.KeyMemoryStore } func (l *otherMemoryStore) Name() string { return strings.Repeat("z", 70) } // Given a list of key stores, the keys should be pretty-printed with their // roles, locations, IDs, and guns first in sorted order in the key store func TestPrettyPrintRootAndSigningKeys(t *testing.T) { ret := passphrase.ConstantRetriever("pass") keyStores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), &otherMemoryStore{KeyMemoryStore: *trustmanager.NewKeyMemoryStore(ret)}, } longNameShortened := "..." + strings.Repeat("z", 37) // just use the same key for testing key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) root := data.CanonicalRootRole // add keys to the key stores err = keyStores[0].AddKey(key.ID(), root, key) assert.NoError(t, err) err = keyStores[1].AddKey(key.ID(), root, key) assert.NoError(t, err) err = keyStores[0].AddKey(strings.Repeat("a/", 30)+key.ID(), "targets", key) assert.NoError(t, err) err = keyStores[1].AddKey("short/gun/"+key.ID(), "snapshot", key) assert.NoError(t, err) expected := [][]string{ {root, key.ID(), keyStores[0].Name()}, {root, key.ID(), longNameShortened}, {"targets", "..." + strings.Repeat("/a", 11), key.ID(), keyStores[0].Name()}, {"snapshot", "short/gun", key.ID(), longNameShortened}, } var b bytes.Buffer prettyPrintKeys(keyStores, &b) text, err := ioutil.ReadAll(&b) assert.NoError(t, err) lines := strings.Split(strings.TrimSpace(string(text)), "\n") assert.Len(t, lines, len(expected)+2) // starts with headers assert.True(t, reflect.DeepEqual(strings.Fields(lines[0]), []string{"ROLE", "GUN", "KEY", "ID", "LOCATION"})) assert.Equal(t, "----", lines[1][:4]) for i, line := range lines[2:] { // we are purposely not putting spaces in test data so easier to split splitted := strings.Fields(line) for j, v := range splitted { assert.Equal(t, expected[i][j], strings.TrimSpace(v)) } } } // If there are no keys in any of the key stores, a message that there are no // signing keys should be displayed. func TestPrettyPrintZeroKeys(t *testing.T) { ret := passphrase.ConstantRetriever("pass") emptyKeyStore := trustmanager.NewKeyMemoryStore(ret) var b bytes.Buffer prettyPrintKeys([]trustmanager.KeyStore{emptyKeyStore}, &b) text, err := ioutil.ReadAll(&b) assert.NoError(t, err) lines := strings.Split(strings.TrimSpace(string(text)), "\n") assert.Len(t, lines, 1) assert.Equal(t, "No signing keys found.", lines[0]) } // If there are no keys, removeKeyInteractively will just return an error about // there not being any key func TestRemoveIfNoKey(t *testing.T) { var buf bytes.Buffer stores := []trustmanager.KeyStore{trustmanager.NewKeyMemoryStore(nil)} err := removeKeyInteractively(stores, "12345", &buf, &buf) assert.Error(t, err) assert.Contains(t, err.Error(), "No key with ID") } // If there is one key, asking to remove it will ask for confirmation. Passing // anything other than 'yes'/'y'/'' response will abort the deletion and // not delete the key. func TestRemoveOneKeyAbort(t *testing.T) { nos := []string{"no", "NO", "AAAARGH", " N "} store := trustmanager.NewKeyMemoryStore(ret) key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) err = store.AddKey(key.ID(), "root", key) assert.NoError(t, err) stores := []trustmanager.KeyStore{store} for _, noAnswer := range nos { var out bytes.Buffer in := bytes.NewBuffer([]byte(noAnswer + "\n")) err := removeKeyInteractively(stores, key.ID(), in, &out) assert.NoError(t, err) text, err := ioutil.ReadAll(&out) assert.NoError(t, err) output := string(text) assert.Contains(t, output, "Are you sure") assert.Contains(t, output, "Aborting action") assert.Len(t, store.ListKeys(), 1) } } // If there is one key, asking to remove it will ask for confirmation. Passing // 'yes'/'y'/'' response will continue the deletion. func TestRemoveOneKeyConfirm(t *testing.T) { yesses := []string{"yes", " Y ", "yE", " ", ""} for _, yesAnswer := range yesses { store := trustmanager.NewKeyMemoryStore(ret) key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) err = store.AddKey(key.ID(), "root", key) assert.NoError(t, err) var out bytes.Buffer in := bytes.NewBuffer([]byte(yesAnswer + "\n")) err = removeKeyInteractively( []trustmanager.KeyStore{store}, key.ID(), in, &out) assert.NoError(t, err) text, err := ioutil.ReadAll(&out) assert.NoError(t, err) output := string(text) assert.Contains(t, output, "Are you sure") assert.Contains(t, output, "Deleted "+key.ID()) assert.Len(t, store.ListKeys(), 0) } } // If there is more than one key, removeKeyInteractively will ask which key to // delete and will do so over and over until the user quits if the answer is // invalid. func TestRemoveMultikeysInvalidInput(t *testing.T) { in := bytes.NewBuffer([]byte("nota number\n9999\n-3\n0")) key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) stores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), trustmanager.NewKeyMemoryStore(ret), } err = stores[0].AddKey(key.ID(), "root", key) assert.NoError(t, err) err = stores[1].AddKey("gun/"+key.ID(), "target", key) assert.NoError(t, err) var out bytes.Buffer err = removeKeyInteractively(stores, key.ID(), in, &out) assert.Error(t, err) text, err := ioutil.ReadAll(&out) assert.NoError(t, err) assert.Len(t, stores[0].ListKeys(), 1) assert.Len(t, stores[1].ListKeys(), 1) // It should have listed the keys over and over, asking which key the user // wanted to delete output := string(text) assert.Contains(t, output, "Found the following matching keys") var rootCount, targetCount int for _, line := range strings.Split(output, "\n") { if strings.Contains(line, key.ID()) { if strings.Contains(line, "target") { targetCount++ } else { rootCount++ } } } assert.Equal(t, rootCount, targetCount) assert.Equal(t, 4, rootCount) // for each of the 4 invalid inputs } // If there is more than one key, removeKeyInteractively will ask which key to // delete. Then it will confirm whether they want to delete, and the user can // abort at that confirmation. func TestRemoveMultikeysAbortChoice(t *testing.T) { in := bytes.NewBuffer([]byte("1\nn\n")) key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) stores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), trustmanager.NewKeyMemoryStore(ret), } err = stores[0].AddKey(key.ID(), "root", key) assert.NoError(t, err) err = stores[1].AddKey("gun/"+key.ID(), "target", key) assert.NoError(t, err) var out bytes.Buffer err = removeKeyInteractively(stores, key.ID(), in, &out) assert.NoError(t, err) // no error to abort deleting text, err := ioutil.ReadAll(&out) assert.NoError(t, err) assert.Len(t, stores[0].ListKeys(), 1) assert.Len(t, stores[1].ListKeys(), 1) // It should have listed the keys, asked whether the user really wanted to // delete, and then aborted. output := string(text) assert.Contains(t, output, "Found the following matching keys") assert.Contains(t, output, "Are you sure") assert.Contains(t, output, "Aborting action") } // If there is more than one key, removeKeyInteractively will ask which key to // delete. Then it will confirm whether they want to delete, and if the user // confirms, will remove it from the correct key store. func TestRemoveMultikeysRemoveOnlyChosenKey(t *testing.T) { in := bytes.NewBuffer([]byte("1\ny\n")) key, err := trustmanager.GenerateED25519Key(rand.Reader) assert.NoError(t, err) stores := []trustmanager.KeyStore{ trustmanager.NewKeyMemoryStore(ret), trustmanager.NewKeyMemoryStore(ret), } err = stores[0].AddKey(key.ID(), "root", key) assert.NoError(t, err) err = stores[1].AddKey("gun/"+key.ID(), "target", key) assert.NoError(t, err) var out bytes.Buffer err = removeKeyInteractively(stores, key.ID(), in, &out) assert.NoError(t, err) text, err := ioutil.ReadAll(&out) assert.NoError(t, err) // It should have listed the keys, asked whether the user really wanted to // delete, and then deleted. output := string(text) assert.Contains(t, output, "Found the following matching keys") assert.Contains(t, output, "Are you sure") assert.Contains(t, output, "Deleted "+key.ID()) // figure out which one we picked to delete, and assert it was deleted for _, line := range strings.Split(output, "\n") { if strings.HasPrefix(line, "\t1.") { // we picked the first item if strings.Contains(line, "root") { // first key store assert.Len(t, stores[0].ListKeys(), 0) assert.Len(t, stores[1].ListKeys(), 1) } else { assert.Len(t, stores[0].ListKeys(), 1) assert.Len(t, stores[1].ListKeys(), 0) } } } } notary-0.1/cmd/notary/main.go000066400000000000000000000121511262207326400162240ustar00rootroot00000000000000package main import ( "fmt" "os" "path/filepath" "strings" "github.com/Sirupsen/logrus" "github.com/docker/notary/passphrase" "github.com/docker/notary/version" homedir "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( configDir = ".notary/" defaultServerURL = "https://notary-server:4443" idSize = 64 ) var ( verbose bool trustDir string configFile string remoteTrustServer string configPath string configFileName = "config" configFileExt = "json" retriever passphrase.Retriever getRetriever = getPassphraseRetriever mainViper = viper.New() ) func init() { retriever = getPassphraseRetriever() } func parseConfig() { if verbose { logrus.SetLevel(logrus.DebugLevel) logrus.SetOutput(os.Stderr) } // Get home directory for current user homeDir, err := homedir.Dir() if err != nil { fatalf("Cannot get current user home directory: %v", err) } if homeDir == "" { fatalf("Cannot get current user home directory") } // By default our trust directory (where keys are stored) is in ~/.notary/ mainViper.SetDefault("trust_dir", filepath.Join(homeDir, filepath.Dir(configDir))) // If there was a commandline configFile set, we parse that. // If there wasn't we attempt to find it on the default location ~/.notary/config if configFile != "" { configFileExt = strings.TrimPrefix(filepath.Ext(configFile), ".") configFileName = strings.TrimSuffix(filepath.Base(configFile), filepath.Ext(configFile)) configPath = filepath.Dir(configFile) } else { configPath = filepath.Join(homeDir, filepath.Dir(configDir)) } // Setup the configuration details into viper mainViper.SetConfigName(configFileName) mainViper.SetConfigType(configFileExt) mainViper.AddConfigPath(configPath) // Find and read the config file err = mainViper.ReadInConfig() if err != nil { logrus.Debugf("Configuration file not found, using defaults") // If we were passed in a configFile via -c, bail if it doesn't exist, // otherwise ignore it: we can use the defaults if configFile != "" || !os.IsNotExist(err) { fatalf("error opening config file %v", err) } } // At this point we either have the default value or the one set by the config. // Either way, the command-line flag has precedence and overwrites the value if trustDir != "" { mainViper.Set("trust_dir", trustDir) } // Expands all the possible ~/ that have been given, either through -d or config // If there is no error, use it, if not, attempt to use whatever the user gave us expandedTrustDir, err := homedir.Expand(mainViper.GetString("trust_dir")) if err == nil { mainViper.Set("trust_dir", expandedTrustDir) } logrus.Debugf("Using the following trust directory: %s", mainViper.GetString("trust_dir")) } func setupCommand(notaryCmd *cobra.Command) { var versionCmd = &cobra.Command{ Use: "version", Short: "Print the version number of notary", Long: `print the version number of notary`, Run: func(cmd *cobra.Command, args []string) { fmt.Printf("notary\n Version: %s\n Git commit: %s\n", version.NotaryVersion, version.GitCommit) }, } notaryCmd.AddCommand(versionCmd) notaryCmd.PersistentFlags().StringVarP(&trustDir, "trustDir", "d", "", "Directory where the trust data is persisted to") notaryCmd.PersistentFlags().StringVarP(&configFile, "configFile", "c", "", "Path to the configuration file to use") notaryCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output") notaryCmd.PersistentFlags().StringVarP(&remoteTrustServer, "server", "s", "", "Remote trust server location") notaryCmd.AddCommand(cmdKey) notaryCmd.AddCommand(cmdCert) notaryCmd.AddCommand(cmdTufInit) notaryCmd.AddCommand(cmdTufList) notaryCmd.AddCommand(cmdTufAdd) notaryCmd.AddCommand(cmdTufRemove) notaryCmd.AddCommand(cmdTufStatus) notaryCmd.AddCommand(cmdTufPublish) notaryCmd.AddCommand(cmdTufLookup) notaryCmd.AddCommand(cmdVerify) } func main() { var notaryCmd = &cobra.Command{ Use: "notary", Short: "Notary allows the creation of trusted collections.", Long: "Notary allows the creation and management of collections of signed targets, allowing the signing and validation of arbitrary content.", } notaryCmd.SetOutput(os.Stdout) setupCommand(notaryCmd) notaryCmd.Execute() } func fatalf(format string, args ...interface{}) { fmt.Printf("* fatal: "+format+"\n", args...) os.Exit(1) } func askConfirm() bool { var res string _, err := fmt.Scanln(&res) if err != nil { return false } if strings.EqualFold(res, "y") || strings.EqualFold(res, "yes") { return true } return false } func getPassphraseRetriever() passphrase.Retriever { baseRetriever := passphrase.PromptRetriever() env := map[string]string{ "root": os.Getenv("NOTARY_ROOT_PASSPHRASE"), "targets": os.Getenv("NOTARY_TARGETS_PASSPHRASE"), "snapshot": os.Getenv("NOTARY_SNAPSHOT_PASSPHRASE"), } return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { if v := env[alias]; v != "" { return v, numAttempts > 1, nil } return baseRetriever(keyName, alias, createNew, numAttempts) } } notary-0.1/cmd/notary/root-ca.crt000066400000000000000000000036711262207326400170360ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFhjCCA26gAwIBAgIJAMJ4Mtt6YhNLMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0G A1UECgwGRG9ja2VyMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTAeFw0xNTA3 MTYwNDI1MDBaFw0yNTA3MTMwNDI1MDBaMF8xCzAJBgNVBAYTAlVTMQswCQYDVQQI DAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2VyMRow GAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP ADCCAgoCggIBAMzUzq2O07tm3A/4emCN/294jUBnNeGlM4TgsB8W9ingw9CU7oBn CRTK94cGDHTb5ofcj9Kt4/dSL52uJpkZshmAga4fDDhtntnUHaKYzjoZSKZtq7qV hC1Dah7s3zftZn4NHiRe82loXH/W0//0MWdQCaLc8E0rd/amrd6EO+5SUwF4dXSk nWoo3oxtOEnb1uQcWWIiwLRmd1pw3PW/bt/SHssD5dJ+78/nR1qCHhJyLVpylMiy WijkMKW7mbQFefuCOsQ0QvGG3BrTLu+fVs9GYNzHC+L1bSQbfts4nOSodcB/klhd mbgVW8mrgeHww/jgb2WJW9Y3RFNp/VEuhVrHiz/NW2qE3nPLEnu0vd50jYIXbvBm fbhCoJntYAiCY0l8v+POgP3ACtsS41rcn8VyD3Ho4u4186ki71+QRQTsUk2MXRV6 AKQ9u4Cl4d0tV1oHjVyiKDv8PNakNrI48KmnF9R9wMgzDHIoBVQZraVTyPwW9HvS 8K3Lsm6QAE7pErideOyBViOiiqvW7rUaLERTkhGirX2RChwhYLtYIj0LitgzdaT4 JD1JxonqN30g2jk1+mJKMEeWBMTjFqtzuQPYH3HkHKxoNfvEuL5fsZSmhV/mR+yW lSe1f8r1qpAACj/K3mome/z8UhNxzEW8TCYkwamLkAPF485W64KIYI1tAgMBAAGj RTBDMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgFGMB0GA1UdDgQW BBR1DNVNxOFsi9Z7xXfnT2PH+DtoWTANBgkqhkiG9w0BAQsFAAOCAgEAUbbrI3OQ 5XO8HHpoTwVqFzSzKOuSSrcMGrv67rn+2HvVJYfxtusZBS6+Rw7QVG3daPS+pSNX NM1qyin3BjpNR2lI771yyK/yjjNH9pZPR+8ThJ8/77roLJudTCCPt49PoYgSQQsp IB75PlqnTWVwccW9pm2zSdqDxFeZpTpwEvgyX8MNCfYeynxp5+S81593z8iav16u t2I38NyFJKuxin9zNkxkpf/a9Pr/Gk56gw1OfHXp+sW/6KIzx8fjQuL6P8HEpwVG zXXA8fMX91cIFI4+DTc8mPjtYvT6/PzDWE/q6FZZnbHJ50Ngg5D8uFN5lLgZFNtf ITeoNjTk2koq8vvTW8FDpMkb50zqGdBoIdDtRFd3oot+MEg+6mba+Kttwg05aJ9a SIIxjvU4NH6qOXBSgzaI1hMr7DTBnaXxMEBiaNaPg2nqi6uhaUOcVw3F01yBfGfX aGsNLKpFiKFYQfOR1M2ho/7AL19GYQD3IFWDJqk0/eQLfFR74iKVMz6ndwt9F7A8 0xxGXGpw2NJQTWLQui4Wzt33q541ihzL7EDtybBScUdIOIEO20mHr2czFoTL9IKx rU0Ck5BMyMBB+DOppP+TeKjutAI1yRVsNoabOuK4oo/FmqysgQoHEE+gVUThrrpE wV1EBILkX6O4GiMqu1+x92/yCmlKEg0Q6MM= -----END CERTIFICATE----- notary-0.1/cmd/notary/tuf.go000066400000000000000000000323571262207326400161100ustar00rootroot00000000000000package main import ( "bufio" "crypto/sha256" "encoding/hex" "fmt" "io" "io/ioutil" "net" "net/http" "net/url" "os" "path/filepath" "sort" "strings" "time" "crypto/subtle" "github.com/Sirupsen/logrus" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/pkg/term" notaryclient "github.com/docker/notary/client" "github.com/docker/notary/tuf/data" "github.com/docker/notary/utils" "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" ) var cmdTufList = &cobra.Command{ Use: "list [ GUN ]", Short: "Lists targets for a remote trusted collection.", Long: "Lists all targets for a remote trusted collection identified by the Globally Unique Name. This is an online operation.", Run: tufList, } var cmdTufAdd = &cobra.Command{ Use: "add [ GUN ] ", Short: "Adds the file as a target to the trusted collection.", Long: "Adds the file as a target to the local trusted collection identified by the Globally Unique Name. This is an offline operation. Please then use `publish` to push the changes to the remote trusted collection.", Run: tufAdd, } var cmdTufRemove = &cobra.Command{ Use: "remove [ GUN ] ", Short: "Removes a target from a trusted collection.", Long: "Removes a target from the local trusted collection identified by the Globally Unique Name. This is an offline operation. Please then use `publish` to push the changes to the remote trusted collection.", Run: tufRemove, } var cmdTufInit = &cobra.Command{ Use: "init [ GUN ]", Short: "Initializes a local trusted collection.", Long: "Initializes a local trusted collection identified by the Globally Unique Name. This is an online operation.", Run: tufInit, } var cmdTufLookup = &cobra.Command{ Use: "lookup [ GUN ] ", Short: "Looks up a specific target in a remote trusted collection.", Long: "Looks up a specific target in a remote trusted collection identified by the Globally Unique Name.", Run: tufLookup, } var cmdTufPublish = &cobra.Command{ Use: "publish [ GUN ]", Short: "Publishes the local trusted collection.", Long: "Publishes the local trusted collection identified by the Globally Unique Name, sending the local changes to a remote trusted server.", Run: tufPublish, } var cmdTufStatus = &cobra.Command{ Use: "status [ GUN ]", Short: "Displays status of unpublished changes to the local trusted collection.", Long: "Displays status of unpublished changes to the local trusted collection identified by the Globally Unique Name.", Run: tufStatus, } var cmdVerify = &cobra.Command{ Use: "verify [ GUN ] ", Short: "Verifies if the content is included in the remote trusted collection", Long: "Verifies if the data passed in STDIN is included in the remote trusted collection identified by the Global Unique Name.", Run: verify, } func tufAdd(cmd *cobra.Command, args []string) { if len(args) < 3 { cmd.Usage() fatalf("Must specify a GUN, target, and path to target data") } parseConfig() gun := args[0] targetName := args[1] targetPath := args[2] // no online operations are performed by add so the transport argument // should be nil nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), nil, retriever) if err != nil { fatalf(err.Error()) } target, err := notaryclient.NewTarget(targetName, targetPath) if err != nil { fatalf(err.Error()) } err = nRepo.AddTarget(target) if err != nil { fatalf(err.Error()) } cmd.Printf( "Addition of target \"%s\" to repository \"%s\" staged for next publish.\n", targetName, gun) } func tufInit(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify a GUN") } parseConfig() gun := args[0] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), getTransport(gun, false), retriever) if err != nil { fatalf(err.Error()) } rootKeyList := nRepo.CryptoService.ListKeys(data.CanonicalRootRole) var rootKeyID string if len(rootKeyList) < 1 { cmd.Println("No root keys found. Generating a new root key...") rootPublicKey, err := nRepo.CryptoService.Create(data.CanonicalRootRole, data.ECDSAKey) rootKeyID = rootPublicKey.ID() if err != nil { fatalf(err.Error()) } } else { // Choses the first root key available, which is initialization specific // but should return the HW one first. rootKeyID = rootKeyList[0] cmd.Printf("Root key found, using: %s\n", rootKeyID) } err = nRepo.Initialize(rootKeyID) if err != nil { fatalf(err.Error()) } } func tufList(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify a GUN") } parseConfig() gun := args[0] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), getTransport(gun, true), retriever) if err != nil { fatalf(err.Error()) } // Retreive the remote list of signed targets targetList, err := nRepo.ListTargets() if err != nil { fatalf(err.Error()) } prettyPrintTargets(targetList, cmd.Out()) } func tufLookup(cmd *cobra.Command, args []string) { if len(args) < 2 { cmd.Usage() fatalf("Must specify a GUN and target") } parseConfig() gun := args[0] targetName := args[1] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), getTransport(gun, true), retriever) if err != nil { fatalf(err.Error()) } target, err := nRepo.GetTargetByName(targetName) if err != nil { fatalf(err.Error()) } cmd.Println(target.Name, fmt.Sprintf("sha256:%x", target.Hashes["sha256"]), target.Length) } func tufStatus(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify a GUN") } parseConfig() gun := args[0] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), nil, retriever) if err != nil { fatalf(err.Error()) } cl, err := nRepo.GetChangelist() if err != nil { fatalf(err.Error()) } if len(cl.List()) == 0 { cmd.Printf("No unpublished changes for %s\n", gun) return } cmd.Printf("Unpublished changes for %s:\n\n", gun) cmd.Printf("%-10s%-10s%-12s%s\n", "action", "scope", "type", "path") cmd.Println("----------------------------------------------------") for _, ch := range cl.List() { cmd.Printf("%-10s%-10s%-12s%s\n", ch.Action(), ch.Scope(), ch.Type(), ch.Path()) } } func tufPublish(cmd *cobra.Command, args []string) { if len(args) < 1 { cmd.Usage() fatalf("Must specify a GUN") } parseConfig() gun := args[0] cmd.Println("Pushing changes to", gun) nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), getTransport(gun, false), retriever) if err != nil { fatalf(err.Error()) } err = nRepo.Publish() if err != nil { fatalf(err.Error()) } } func tufRemove(cmd *cobra.Command, args []string) { if len(args) < 2 { cmd.Usage() fatalf("Must specify a GUN and target") } parseConfig() gun := args[0] targetName := args[1] // no online operation are performed by remove so the transport argument // should be nil. repo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), nil, retriever) if err != nil { fatalf(err.Error()) } err = repo.RemoveTarget(targetName) if err != nil { fatalf(err.Error()) } cmd.Printf("Removal of %s from %s staged for next publish.\n", targetName, gun) } func verify(cmd *cobra.Command, args []string) { if len(args) < 2 { cmd.Usage() fatalf("Must specify a GUN and target") } parseConfig() // Reads all of the data on STDIN payload, err := ioutil.ReadAll(os.Stdin) if err != nil { fatalf("Error reading content from STDIN: %v", err) } gun := args[0] targetName := args[1] nRepo, err := notaryclient.NewNotaryRepository(mainViper.GetString("trust_dir"), gun, getRemoteTrustServer(), getTransport(gun, true), retriever) if err != nil { fatalf(err.Error()) } target, err := nRepo.GetTargetByName(targetName) if err != nil { logrus.Error("notary: data not present in the trusted collection.") os.Exit(-11) } // Create hasher and hash data stdinHash := sha256.Sum256(payload) serverHash := target.Hashes["sha256"] if subtle.ConstantTimeCompare(stdinHash[:], serverHash) == 0 { logrus.Error("notary: data not present in the trusted collection.") os.Exit(1) } else { _, _ = os.Stdout.Write(payload) } return } type passwordStore struct { anonymous bool } func (ps passwordStore) Basic(u *url.URL) (string, string) { if ps.anonymous { return "", "" } stdin := bufio.NewReader(os.Stdin) fmt.Fprintf(os.Stdout, "Enter username: ") userIn, err := stdin.ReadBytes('\n') if err != nil { logrus.Errorf("error processing username input: %s", err) return "", "" } username := strings.TrimSpace(string(userIn)) state, err := term.SaveState(0) if err != nil { logrus.Errorf("error saving terminal state, cannot retrieve password: %s", err) return "", "" } term.DisableEcho(0, state) defer term.RestoreTerminal(0, state) fmt.Fprintf(os.Stdout, "Enter password: ") userIn, err = stdin.ReadBytes('\n') fmt.Fprintln(os.Stdout) if err != nil { logrus.Errorf("error processing password input: %s", err) return "", "" } password := strings.TrimSpace(string(userIn)) return username, password } func getTransport(gun string, readOnly bool) http.RoundTripper { // Attempt to get a root CA from the config file. Nil is the host defaults. rootCAFile := mainViper.GetString("remote_server.root_ca") if rootCAFile != "" { // If we haven't been given an Absolute path, we assume it's relative // from the configuration directory (~/.notary by default) if !filepath.IsAbs(rootCAFile) { rootCAFile = filepath.Join(configPath, rootCAFile) } } insecureSkipVerify := false if mainViper.IsSet("remote_server.skipTLSVerify") { insecureSkipVerify = mainViper.GetBool("remote_server.skipTLSVerify") } tlsConfig, err := utils.ConfigureClientTLS(&utils.ClientTLSOpts{ RootCAFile: rootCAFile, InsecureSkipVerify: insecureSkipVerify, }) if err != nil { logrus.Fatal("Unable to configure TLS: ", err.Error()) } base := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, }).Dial, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, DisableKeepAlives: true, } return tokenAuth(base, gun, readOnly) } func tokenAuth(baseTransport *http.Transport, gun string, readOnly bool) http.RoundTripper { // TODO(dmcgowan): add notary specific headers authTransport := transport.NewTransport(baseTransport) pingClient := &http.Client{ Transport: authTransport, Timeout: 5 * time.Second, } trustServerURL := getRemoteTrustServer() endpoint, err := url.Parse(trustServerURL) if err != nil { fatalf("Could not parse remote trust server url (%s): %s", trustServerURL, err.Error()) } if endpoint.Scheme == "" { fatalf("Trust server url has to be in the form of http(s)://URL:PORT. Got: %s", trustServerURL) } subPath, err := url.Parse("v2/") if err != nil { fatalf("Failed to parse v2 subpath. This error should not have been reached. Please report it as an issue at https://github.com/docker/notary/issues: %s", err.Error()) } endpoint = endpoint.ResolveReference(subPath) req, err := http.NewRequest("GET", endpoint.String(), nil) if err != nil { fatalf(err.Error()) } resp, err := pingClient.Do(req) if err != nil { fatalf(err.Error()) } defer resp.Body.Close() challengeManager := auth.NewSimpleChallengeManager() if err := challengeManager.AddResponse(resp); err != nil { fatalf(err.Error()) } ps := passwordStore{anonymous: readOnly} tokenHandler := auth.NewTokenHandler(authTransport, ps, gun, "push", "pull") basicHandler := auth.NewBasicHandler(ps) modifier := transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) return transport.NewTransport(baseTransport, modifier) } func getRemoteTrustServer() string { if remoteTrustServer == "" { configRemote := mainViper.GetString("remote_server.url") if configRemote != "" { remoteTrustServer = configRemote } else { remoteTrustServer = defaultServerURL } } return remoteTrustServer } type targetsSorter []*notaryclient.Target func (t targetsSorter) Len() int { return len(t) } func (t targetsSorter) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func (t targetsSorter) Less(i, j int) bool { return t[i].Name < t[j].Name } // Given a list of KeyStores in order of listing preference, pretty-prints the // root keys and then the signing keys. func prettyPrintTargets(ts []*notaryclient.Target, writer io.Writer) { if len(ts) == 0 { writer.Write([]byte("\nNo targets present in this repository.\n\n")) return } sort.Stable(targetsSorter(ts)) table := tablewriter.NewWriter(writer) table.SetHeader([]string{"Name", "Digest", "Size (bytes)"}) table.SetBorder(false) table.SetColumnSeparator(" ") table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("-") table.SetAutoWrapText(false) for _, t := range ts { table.Append([]string{ t.Name, hex.EncodeToString(t.Hashes["sha256"]), fmt.Sprintf("%d", t.Length), }) } table.Render() } notary-0.1/const.go000066400000000000000000000001421262207326400143440ustar00rootroot00000000000000package notary // application wide constants const ( PrivKeyPerms = 0700 PubCertPerms = 0755 ) notary-0.1/coverpkg.sh000077500000000000000000000006271262207326400150560ustar00rootroot00000000000000#!/usr/bin/env bash # Given a subpackage and the containing package, figures out which packages # need to be passed to `go test -coverpkg`: this includes all of the # subpackage's dependencies within the containing package, as well as the # subpackage itself. DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' notary-0.1/cryptoservice/000077500000000000000000000000001262207326400155735ustar00rootroot00000000000000notary-0.1/cryptoservice/certificate.go000066400000000000000000000020561262207326400204070ustar00rootroot00000000000000package cryptoservice import ( "crypto/rand" "crypto/x509" "fmt" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" ) // GenerateCertificate generates an X509 Certificate from a template, given a GUN func GenerateCertificate(rootKey data.PrivateKey, gun string) (*x509.Certificate, error) { signer := rootKey.CryptoSigner() if signer == nil { return nil, fmt.Errorf("key type not supported for Certificate generation: %s\n", rootKey.Algorithm()) } template, err := trustmanager.NewCertificate(gun) if err != nil { return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err) } derBytes, err := x509.CreateCertificate(rand.Reader, template, template, signer.Public(), signer) if err != nil { return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err) } // Encode the new certificate into PEM cert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err) } return cert, nil } notary-0.1/cryptoservice/certificate_test.go000066400000000000000000000016471262207326400214530ustar00rootroot00000000000000package cryptoservice import ( "crypto/rand" "crypto/x509" "testing" "github.com/docker/notary/trustmanager" "github.com/stretchr/testify/assert" ) func TestGenerateCertificate(t *testing.T) { privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate key") keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) err = keyStore.AddKey(privKey.ID(), "root", privKey) assert.NoError(t, err, "could not add key to store") // Check GenerateCertificate method gun := "docker.com/notary" cert, err := GenerateCertificate(privKey, gun) assert.NoError(t, err, "could not generate certificate") // Check public key ecdsaPrivateKey, err := x509.ParseECPrivateKey(privKey.Private()) assert.NoError(t, err) ecdsaPublicKey := ecdsaPrivateKey.Public() assert.Equal(t, ecdsaPublicKey, cert.PublicKey) // Check CommonName assert.Equal(t, cert.Subject.CommonName, gun) } notary-0.1/cryptoservice/crypto_service.go000066400000000000000000000115021262207326400211610ustar00rootroot00000000000000package cryptoservice import ( "crypto/rand" "fmt" "path/filepath" "github.com/Sirupsen/logrus" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" ) const ( rsaKeySize = 2048 // Used for snapshots and targets keys ) // CryptoService implements Sign and Create, holding a specific GUN and keystore to // operate on type CryptoService struct { gun string keyStores []trustmanager.KeyStore } // NewCryptoService returns an instance of CryptoService func NewCryptoService(gun string, keyStores ...trustmanager.KeyStore) *CryptoService { return &CryptoService{gun: gun, keyStores: keyStores} } // Create is used to generate keys for targets, snapshots and timestamps func (cs *CryptoService) Create(role, algorithm string) (data.PublicKey, error) { var privKey data.PrivateKey var err error switch algorithm { case data.RSAKey: privKey, err = trustmanager.GenerateRSAKey(rand.Reader, rsaKeySize) if err != nil { return nil, fmt.Errorf("failed to generate RSA key: %v", err) } case data.ECDSAKey: privKey, err = trustmanager.GenerateECDSAKey(rand.Reader) if err != nil { return nil, fmt.Errorf("failed to generate EC key: %v", err) } case data.ED25519Key: privKey, err = trustmanager.GenerateED25519Key(rand.Reader) if err != nil { return nil, fmt.Errorf("failed to generate ED25519 key: %v", err) } default: return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm) } logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role, privKey.ID()) // Store the private key into our keystore with the name being: /GUN/ID.key with an alias of role var keyPath string if role == data.CanonicalRootRole { keyPath = privKey.ID() } else { keyPath = filepath.Join(cs.gun, privKey.ID()) } for _, ks := range cs.keyStores { err = ks.AddKey(keyPath, role, privKey) if err == nil { return data.PublicKeyFromPrivate(privKey), nil } } if err != nil { return nil, fmt.Errorf("failed to add key to filestore: %v", err) } return nil, fmt.Errorf("keystores would not accept new private keys for unknown reasons") } // GetPrivateKey returns a private key by ID. It tries to get the key first // without a GUN (in which case it's a root key). If that fails, try to get // the key with the GUN (non-root key). // If that fails, then we don't have the key. func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role string, err error) { keyPaths := []string{keyID, filepath.Join(cs.gun, keyID)} for _, ks := range cs.keyStores { for _, keyPath := range keyPaths { k, role, err = ks.GetKey(keyPath) if err != nil { continue } return } } return // returns whatever the final values were } // GetKey returns a key by ID func (cs *CryptoService) GetKey(keyID string) data.PublicKey { privKey, _, err := cs.GetPrivateKey(keyID) if err != nil { return nil } return data.PublicKeyFromPrivate(privKey) } // RemoveKey deletes a key by ID func (cs *CryptoService) RemoveKey(keyID string) (err error) { keyPaths := []string{keyID, filepath.Join(cs.gun, keyID)} for _, ks := range cs.keyStores { for _, keyPath := range keyPaths { ks.RemoveKey(keyPath) } } return // returns whatever the final values were } // Sign returns the signatures for the payload with a set of keyIDs. It ignores // errors to sign and expects the called to validate if the number of returned // signatures is adequate. func (cs *CryptoService) Sign(keyIDs []string, payload []byte) ([]data.Signature, error) { signatures := make([]data.Signature, 0, len(keyIDs)) for _, keyID := range keyIDs { privKey, _, err := cs.GetPrivateKey(keyID) if err != nil { logrus.Debugf("error attempting to retrieve private key: %s, %v", keyID, err) continue } sigAlgo := privKey.SignatureAlgorithm() sig, err := privKey.Sign(rand.Reader, payload, nil) if err != nil { logrus.Debugf("ignoring error attempting to %s sign with keyID: %s, %v", privKey.Algorithm(), keyID, err) continue } logrus.Debugf("appending %s signature with Key ID: %s", privKey.Algorithm(), keyID) // Append signatures to result array signatures = append(signatures, data.Signature{ KeyID: keyID, Method: sigAlgo, Signature: sig[:], }) } return signatures, nil } // ListKeys returns a list of key IDs valid for the given role func (cs *CryptoService) ListKeys(role string) []string { var res []string for _, ks := range cs.keyStores { for k, r := range ks.ListKeys() { if r == role { res = append(res, k) } } } return res } // ListAllKeys returns a map of key IDs to role func (cs *CryptoService) ListAllKeys() map[string]string { res := make(map[string]string) for _, ks := range cs.keyStores { for k, r := range ks.ListKeys() { res[k] = r // keys are content addressed so don't care about overwrites } } return res } notary-0.1/cryptoservice/crypto_service_test.go000066400000000000000000000235541262207326400222320ustar00rootroot00000000000000package cryptoservice import ( "crypto/rand" "fmt" "path/filepath" "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" ) var algoToSigType = map[string]data.SigAlgorithm{ data.ECDSAKey: data.ECDSASignature, data.ED25519Key: data.EDDSASignature, data.RSAKey: data.RSAPSSSignature, } var passphraseRetriever = func(string, string, bool, int) (string, bool, error) { return "", false, nil } type CryptoServiceTester struct { cryptoServiceFactory func() *CryptoService role string keyAlgo string } // asserts that created key exists func (c CryptoServiceTester) TestCreateAndGetKey(t *testing.T) { cryptoService := c.cryptoServiceFactory() // Test Create tufKey, err := cryptoService.Create(c.role, c.keyAlgo) assert.NoError(t, err, c.errorMsg("error creating key")) // Test GetKey retrievedKey := cryptoService.GetKey(tufKey.ID()) assert.NotNil(t, retrievedKey, c.errorMsg("Could not find key ID %s", tufKey.ID())) assert.Equal(t, tufKey.Public(), retrievedKey.Public(), c.errorMsg("retrieved public key didn't match")) // Test GetPrivateKey retrievedKey, alias, err := cryptoService.GetPrivateKey(tufKey.ID()) assert.NoError(t, err) assert.Equal(t, tufKey.ID(), retrievedKey.ID(), c.errorMsg("retrieved private key didn't have the right ID")) assert.Equal(t, c.role, alias) } // If there are multiple keystores, ensure that a key is only added to one - // the first in the list of keyStores (which is in order of preference) func (c CryptoServiceTester) TestCreateAndGetWhenMultipleKeystores(t *testing.T) { cryptoService := c.cryptoServiceFactory() cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) // Test Create tufKey, err := cryptoService.Create(c.role, c.keyAlgo) assert.NoError(t, err, c.errorMsg("error creating key")) // Only the first keystore should have the key keyPath := tufKey.ID() if c.role != data.CanonicalRootRole && cryptoService.gun != "" { keyPath = filepath.Join(cryptoService.gun, keyPath) } _, _, err = cryptoService.keyStores[0].GetKey(keyPath) assert.NoError(t, err, c.errorMsg( "First keystore does not have the key %s", keyPath)) _, _, err = cryptoService.keyStores[1].GetKey(keyPath) assert.Error(t, err, c.errorMsg( "Second keystore has the key %s", keyPath)) // GetKey works across multiple keystores retrievedKey := cryptoService.GetKey(tufKey.ID()) assert.NotNil(t, retrievedKey, c.errorMsg("Could not find key ID %s", tufKey.ID())) } // asserts that getting key fails for a non-existent key func (c CryptoServiceTester) TestGetNonexistentKey(t *testing.T) { cryptoService := c.cryptoServiceFactory() assert.Nil(t, cryptoService.GetKey("boguskeyid"), c.errorMsg("non-nil result for bogus keyid")) _, _, err := cryptoService.GetPrivateKey("boguskeyid") assert.NotNil(t, err) } // asserts that signing with a created key creates a valid signature func (c CryptoServiceTester) TestSignWithKey(t *testing.T) { cryptoService := c.cryptoServiceFactory() content := []byte("this is a secret") tufKey, err := cryptoService.Create(c.role, c.keyAlgo) assert.NoError(t, err, c.errorMsg("error creating key")) // Test Sign signatures, err := cryptoService.Sign([]string{tufKey.ID()}, content) assert.NoError(t, err, c.errorMsg("signing failed")) assert.Len(t, signatures, 1, c.errorMsg("wrong number of signatures")) verifier, ok := signed.Verifiers[algoToSigType[c.keyAlgo]] assert.True(t, ok, c.errorMsg("Unknown verifier for algorithm")) err = verifier.Verify(tufKey, signatures[0].Signature, content) assert.NoError(t, err, c.errorMsg("verification failed for %s key type", c.keyAlgo)) } // asserts that signing, if there are no matching keys, produces no signatures func (c CryptoServiceTester) TestSignNoMatchingKeys(t *testing.T) { cryptoService := c.cryptoServiceFactory() content := []byte("this is a secret") privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, c.errorMsg("error creating key")) // Test Sign with key that is not in the cryptoservice signatures, err := cryptoService.Sign([]string{privKey.ID()}, content) assert.NoError(t, err, c.errorMsg("signing failed")) assert.Len(t, signatures, 0, c.errorMsg("wrong number of signatures")) } // If there are multiple keystores, even if all of them have the same key, // only one signature is returned. func (c CryptoServiceTester) TestSignWhenMultipleKeystores(t *testing.T) { cryptoService := c.cryptoServiceFactory() cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) content := []byte("this is a secret") privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, c.errorMsg("error creating key")) for _, store := range cryptoService.keyStores { err := store.AddKey(privKey.ID(), "root", privKey) assert.NoError(t, err) } signatures, err := cryptoService.Sign([]string{privKey.ID()}, content) assert.NoError(t, err, c.errorMsg("signing failed")) assert.Len(t, signatures, 1, c.errorMsg("wrong number of signatures")) } // asserts that removing key that exists succeeds func (c CryptoServiceTester) TestRemoveCreatedKey(t *testing.T) { cryptoService := c.cryptoServiceFactory() tufKey, err := cryptoService.Create(c.role, c.keyAlgo) assert.NoError(t, err, c.errorMsg("error creating key")) assert.NotNil(t, cryptoService.GetKey(tufKey.ID())) // Test RemoveKey err = cryptoService.RemoveKey(tufKey.ID()) assert.NoError(t, err, c.errorMsg("could not remove key")) retrievedKey := cryptoService.GetKey(tufKey.ID()) assert.Nil(t, retrievedKey, c.errorMsg("remove didn't work")) } // asserts that removing key will remove it from all keystores func (c CryptoServiceTester) TestRemoveFromMultipleKeystores(t *testing.T) { cryptoService := c.cryptoServiceFactory() cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, c.errorMsg("error creating key")) for _, store := range cryptoService.keyStores { err := store.AddKey(privKey.ID(), "root", privKey) assert.NoError(t, err) } assert.NotNil(t, cryptoService.GetKey(privKey.ID())) // Remove removes it from all key stores err = cryptoService.RemoveKey(privKey.ID()) assert.NoError(t, err, c.errorMsg("could not remove key")) for _, store := range cryptoService.keyStores { _, _, err := store.GetKey(privKey.ID()) assert.Error(t, err) } } // asserts that listing keys works with multiple keystores, and that the // same keys are deduplicated func (c CryptoServiceTester) TestListFromMultipleKeystores(t *testing.T) { cryptoService := c.cryptoServiceFactory() cryptoService.keyStores = append(cryptoService.keyStores, trustmanager.NewKeyMemoryStore(passphraseRetriever)) expectedKeysIDs := make(map[string]bool) // just want to be able to index by key for i := 0; i < 3; i++ { privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, c.errorMsg("error creating key")) expectedKeysIDs[privKey.ID()] = true // adds one different key to each keystore, and then one key to // both keystores for j, store := range cryptoService.keyStores { if i == j || i == 2 { store.AddKey(privKey.ID(), "root", privKey) } } } // sanity check - each should have 2 for _, store := range cryptoService.keyStores { assert.Len(t, store.ListKeys(), 2, c.errorMsg("added keys wrong")) } keyList := cryptoService.ListKeys("root") assert.Len(t, keyList, 4, c.errorMsg( "ListKeys should have 4 keys (not necesarily unique) but does not: %v", keyList)) for _, k := range keyList { _, ok := expectedKeysIDs[k] assert.True(t, ok, c.errorMsg("Unexpected key %s", k)) } keyMap := cryptoService.ListAllKeys() assert.Len(t, keyMap, 3, c.errorMsg("ListAllKeys should have 3 unique keys but does not: %v", keyMap)) for k, role := range keyMap { _, ok := expectedKeysIDs[k] assert.True(t, ok) assert.Equal(t, "root", role) } } // Prints out an error message with information about the key algorithm, // role, and test name. Ideally we could generate different tests given // data, without having to put for loops in one giant test function, but // that involves a lot of boilerplate. So as a compromise, everything will // still be run in for loops in one giant test function, but we can at // least provide an error message stating what data/helper test function // failed. func (c CryptoServiceTester) errorMsg(message string, args ...interface{}) string { pc := make([]uintptr, 10) // at least 1 entry needed runtime.Callers(2, pc) // the caller of errorMsg f := runtime.FuncForPC(pc[0]) return fmt.Sprintf("%s (role: %s, keyAlgo: %s): %s", f.Name(), c.role, c.keyAlgo, fmt.Sprintf(message, args...)) } func testCryptoService(t *testing.T, gun string) { getTestingCryptoService := func() *CryptoService { return NewCryptoService( gun, trustmanager.NewKeyMemoryStore(passphraseRetriever)) } roles := []string{ data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole, data.CanonicalTimestampRole, } for _, role := range roles { for algo := range algoToSigType { cst := CryptoServiceTester{ cryptoServiceFactory: getTestingCryptoService, role: role, keyAlgo: algo, } cst.TestCreateAndGetKey(t) cst.TestCreateAndGetWhenMultipleKeystores(t) cst.TestGetNonexistentKey(t) cst.TestSignWithKey(t) cst.TestSignNoMatchingKeys(t) cst.TestSignWhenMultipleKeystores(t) cst.TestRemoveCreatedKey(t) cst.TestRemoveFromMultipleKeystores(t) cst.TestListFromMultipleKeystores(t) } } } func TestCryptoServiceWithNonEmptyGUN(t *testing.T) { testCryptoService(t, "org/repo") } func TestCryptoServiceWithEmptyGUN(t *testing.T) { testCryptoService(t, "") } notary-0.1/cryptoservice/import_export.go000066400000000000000000000175241262207326400210460ustar00rootroot00000000000000package cryptoservice import ( "archive/zip" "crypto/x509" "encoding/pem" "errors" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" ) const zipMadeByUNIX = 3 << 8 var ( // ErrNoValidPrivateKey is returned if a key being imported doesn't // look like a private key ErrNoValidPrivateKey = errors.New("no valid private key found") // ErrRootKeyNotEncrypted is returned if a root key being imported is // unencrypted ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported") // ErrNoKeysFoundForGUN is returned if no keys are found for the // specified GUN during export ErrNoKeysFoundForGUN = errors.New("no keys found for specified GUN") ) // ExportRootKey exports the specified root key to an io.Writer in PEM format. // The key's existing encryption is preserved. func (cs *CryptoService) ExportRootKey(dest io.Writer, keyID string) error { var ( pemBytes []byte err error ) for _, ks := range cs.keyStores { pemBytes, err = ks.ExportKey(keyID) if err != nil { continue } } if err != nil { return err } nBytes, err := dest.Write(pemBytes) if err != nil { return err } if nBytes != len(pemBytes) { return errors.New("Unable to finish writing exported key.") } return nil } // ExportRootKeyReencrypt exports the specified root key to an io.Writer in // PEM format. The key is reencrypted with a new passphrase. func (cs *CryptoService) ExportRootKeyReencrypt(dest io.Writer, keyID string, newPassphraseRetriever passphrase.Retriever) error { privateKey, role, err := cs.GetPrivateKey(keyID) if err != nil { return err } // Create temporary keystore to use as a staging area tempBaseDir, err := ioutil.TempDir("", "notary-key-export-") defer os.RemoveAll(tempBaseDir) tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) if err != nil { return err } err = tempKeyStore.AddKey(keyID, role, privateKey) if err != nil { return err } pemBytes, err := tempKeyStore.ExportKey(keyID) if err != nil { return err } nBytes, err := dest.Write(pemBytes) if err != nil { return err } if nBytes != len(pemBytes) { return errors.New("Unable to finish writing exported key.") } return nil } // ImportRootKey imports a root in PEM format key from an io.Reader // It prompts for the key's passphrase to verify the data and to determine // the key ID. func (cs *CryptoService) ImportRootKey(source io.Reader) error { pemBytes, err := ioutil.ReadAll(source) if err != nil { return err } if err = checkRootKeyIsEncrypted(pemBytes); err != nil { return err } for _, ks := range cs.keyStores { // don't redeclare err, we want the value carried out of the loop if err = ks.ImportKey(pemBytes, "root"); err == nil { return nil //bail on the first keystore we import to } } return err } // ExportAllKeys exports all keys to an io.Writer in zip format. // newPassphraseRetriever will be used to obtain passphrases to use to encrypt the existing keys. func (cs *CryptoService) ExportAllKeys(dest io.Writer, newPassphraseRetriever passphrase.Retriever) error { tempBaseDir, err := ioutil.TempDir("", "notary-key-export-") defer os.RemoveAll(tempBaseDir) // Create temporary keystore to use as a staging area tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) if err != nil { return err } for _, ks := range cs.keyStores { if err := moveKeys(ks, tempKeyStore); err != nil { return err } } zipWriter := zip.NewWriter(dest) if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil { return err } zipWriter.Close() return nil } // ImportKeysZip imports keys from a zip file provided as an zip.Reader. The // keys in the root_keys directory are left encrypted, but the other keys are // decrypted with the specified passphrase. func (cs *CryptoService) ImportKeysZip(zipReader zip.Reader) error { // Temporarily store the keys in maps, so we can bail early if there's // an error (for example, wrong passphrase), without leaving the key // store in an inconsistent state newKeys := make(map[string][]byte) // Iterate through the files in the archive. Don't add the keys for _, f := range zipReader.File { fNameTrimmed := strings.TrimSuffix(f.Name, filepath.Ext(f.Name)) rc, err := f.Open() if err != nil { return err } defer rc.Close() fileBytes, err := ioutil.ReadAll(rc) if err != nil { return nil } // Note that using / as a separator is okay here - the zip // package guarantees that the separator will be / if fNameTrimmed[len(fNameTrimmed)-5:] == "_root" { if err = checkRootKeyIsEncrypted(fileBytes); err != nil { return err } } newKeys[fNameTrimmed] = fileBytes } for keyName, pemBytes := range newKeys { if keyName[len(keyName)-5:] == "_root" { keyName = "root" } // try to import the key to all key stores. As long as one of them // succeeds, consider it a success var tmpErr error for _, ks := range cs.keyStores { if err := ks.ImportKey(pemBytes, keyName); err != nil { tmpErr = err } else { tmpErr = nil break } } if tmpErr != nil { return tmpErr } } return nil } // ExportKeysByGUN exports all keys associated with a specified GUN to an // io.Writer in zip format. passphraseRetriever is used to select new passphrases to use to // encrypt the keys. func (cs *CryptoService) ExportKeysByGUN(dest io.Writer, gun string, passphraseRetriever passphrase.Retriever) error { tempBaseDir, err := ioutil.TempDir("", "notary-key-export-") defer os.RemoveAll(tempBaseDir) // Create temporary keystore to use as a staging area tempKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever) if err != nil { return err } for _, ks := range cs.keyStores { if err := moveKeysByGUN(ks, tempKeyStore, gun); err != nil { return err } } zipWriter := zip.NewWriter(dest) if len(tempKeyStore.ListKeys()) == 0 { return ErrNoKeysFoundForGUN } if err := addKeysToArchive(zipWriter, tempKeyStore); err != nil { return err } zipWriter.Close() return nil } func moveKeysByGUN(oldKeyStore, newKeyStore trustmanager.KeyStore, gun string) error { for relKeyPath := range oldKeyStore.ListKeys() { // Skip keys that aren't associated with this GUN if !strings.HasPrefix(relKeyPath, filepath.FromSlash(gun)) { continue } privKey, alias, err := oldKeyStore.GetKey(relKeyPath) if err != nil { return err } err = newKeyStore.AddKey(relKeyPath, alias, privKey) if err != nil { return err } } return nil } func moveKeys(oldKeyStore, newKeyStore trustmanager.KeyStore) error { for f := range oldKeyStore.ListKeys() { privateKey, alias, err := oldKeyStore.GetKey(f) if err != nil { return err } err = newKeyStore.AddKey(f, alias, privateKey) if err != nil { return err } } return nil } func addKeysToArchive(zipWriter *zip.Writer, newKeyStore *trustmanager.KeyFileStore) error { for _, relKeyPath := range newKeyStore.ListFiles() { fullKeyPath := filepath.Join(newKeyStore.BaseDir(), relKeyPath) fi, err := os.Lstat(fullKeyPath) if err != nil { return err } infoHeader, err := zip.FileInfoHeader(fi) if err != nil { return err } infoHeader.Name = relKeyPath zipFileEntryWriter, err := zipWriter.CreateHeader(infoHeader) if err != nil { return err } fileContents, err := ioutil.ReadFile(fullKeyPath) if err != nil { return err } if _, err = zipFileEntryWriter.Write(fileContents); err != nil { return err } } return nil } // checkRootKeyIsEncrypted makes sure the root key is encrypted. We have // internal assumptions that depend on this. func checkRootKeyIsEncrypted(pemBytes []byte) error { block, _ := pem.Decode(pemBytes) if block == nil { return ErrNoValidPrivateKey } if !x509.IsEncryptedPEMBlock(block) { return ErrRootKeyNotEncrypted } return nil } notary-0.1/cryptoservice/import_export_test.go000066400000000000000000000330321262207326400220750ustar00rootroot00000000000000package cryptoservice import ( "archive/zip" "bytes" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "path/filepath" "strings" "testing" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) const timestampECDSAKeyJSON = ` {"keytype":"ecdsa","keyval":{"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw==","private":"MHcCAQEEIDqtcdzU7H3AbIPSQaxHl9+xYECt7NpK7B1+6ep5cv9CoAoGCCqGSM49AwEHoUQDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw=="}}` func createTestServer(t *testing.T) (*httptest.Server, *http.ServeMux) { mux := http.NewServeMux() // TUF will request /v2/docker.com/notary/_trust/tuf/timestamp.key // Return a canned timestamp.key mux.HandleFunc("/v2/docker.com/notary/_trust/tuf/timestamp.key", func(w http.ResponseWriter, r *http.Request) { // Also contains the private key, but for the purpose of this // test, we don't care fmt.Fprint(w, timestampECDSAKeyJSON) }) ts := httptest.NewServer(mux) return ts, mux } var oldPassphrase = "oldPassphrase" var exportPassphrase = "exportPassphrase" var oldPassphraseRetriever = func(string, string, bool, int) (string, bool, error) { return oldPassphrase, false, nil } var newPassphraseRetriever = func(string, string, bool, int) (string, bool, error) { return exportPassphrase, false, nil } func TestImportExportZip(t *testing.T) { gun := "docker.com/notary" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) cs := NewCryptoService(gun, fileStore) pubKey, err := cs.Create(data.CanonicalRootRole, data.ECDSAKey) assert.NoError(t, err) rootKeyID := pubKey.ID() tempZipFile, err := ioutil.TempFile("", "notary-test-export-") tempZipFilePath := tempZipFile.Name() defer os.Remove(tempZipFilePath) err = cs.ExportAllKeys(tempZipFile, newPassphraseRetriever) tempZipFile.Close() assert.NoError(t, err) // Reopen the zip file for importing zipReader, err := zip.OpenReader(tempZipFilePath) assert.NoError(t, err, "could not open zip file") // Map of files to expect in the zip file, with the passphrases passphraseByFile := make(map[string]string) // Add non-root keys to the map. These should use the new passphrase // because the passwords were chosen by the newPassphraseRetriever. privKeyMap := cs.ListAllKeys() for privKeyName := range privKeyMap { _, alias, err := cs.GetPrivateKey(privKeyName) assert.NoError(t, err, "privKey %s has no alias", privKeyName) if alias == "root" { continue } relKeyPath := filepath.Join("tuf_keys", privKeyName+"_"+alias+".key") passphraseByFile[relKeyPath] = exportPassphrase } // Add root key to the map. This will use the export passphrase because it // will be reencrypted. relRootKey := filepath.Join("root_keys", rootKeyID+"_root.key") passphraseByFile[relRootKey] = exportPassphrase // Iterate through the files in the archive, checking that the files // exist and are encrypted with the expected passphrase. for _, f := range zipReader.File { expectedPassphrase, present := passphraseByFile[f.Name] if !present { t.Fatalf("unexpected file %s in zip file", f.Name) } delete(passphraseByFile, f.Name) rc, err := f.Open() assert.NoError(t, err, "could not open file inside zip archive") pemBytes, err := ioutil.ReadAll(rc) assert.NoError(t, err, "could not read file from zip") _, err = trustmanager.ParsePEMPrivateKey(pemBytes, expectedPassphrase) assert.NoError(t, err, "PEM not encrypted with the expected passphrase") rc.Close() } zipReader.Close() // Are there any keys that didn't make it to the zip? for fileNotFound := range passphraseByFile { t.Fatalf("%s not found in zip", fileNotFound) } // Create new repo to test import tempBaseDir2, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir2) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore2, err := trustmanager.NewKeyFileStore(tempBaseDir2, newPassphraseRetriever) assert.NoError(t, err) cs2 := NewCryptoService(gun, fileStore2) // Reopen the zip file for importing zipReader, err = zip.OpenReader(tempZipFilePath) assert.NoError(t, err, "could not open zip file") // Now try with a valid passphrase. This time it should succeed. err = cs2.ImportKeysZip(zipReader.Reader) assert.NoError(t, err) zipReader.Close() // Look for keys in private. The filenames should match the key IDs // in the repo's private key store. for privKeyName := range privKeyMap { _, alias, err := cs2.GetPrivateKey(privKeyName) assert.NoError(t, err, "privKey %s has no alias", privKeyName) if alias == "root" { continue } relKeyPath := filepath.Join("tuf_keys", privKeyName+"_"+alias+".key") privKeyFileName := filepath.Join(tempBaseDir2, "private", relKeyPath) _, err = os.Stat(privKeyFileName) assert.NoError(t, err, "missing private key for role %s: %s", alias, privKeyName) } // Look for keys in root_keys // There should be a file named after the key ID of the root key we // passed in. rootKeyFilename := rootKeyID + "_root.key" _, err = os.Stat(filepath.Join(tempBaseDir2, "private", "root_keys", rootKeyFilename)) assert.NoError(t, err, "missing root key") } func TestImportExportGUN(t *testing.T) { gun := "docker.com/notary" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore, err := trustmanager.NewKeyFileStore(tempBaseDir, newPassphraseRetriever) cs := NewCryptoService(gun, fileStore) _, err = cs.Create(data.CanonicalRootRole, data.ECDSAKey) _, err = cs.Create(data.CanonicalTargetsRole, data.ECDSAKey) _, err = cs.Create(data.CanonicalSnapshotRole, data.ECDSAKey) assert.NoError(t, err) tempZipFile, err := ioutil.TempFile("", "notary-test-export-") tempZipFilePath := tempZipFile.Name() defer os.Remove(tempZipFilePath) err = cs.ExportKeysByGUN(tempZipFile, gun, newPassphraseRetriever) assert.NoError(t, err) // With an invalid GUN, this should return an error err = cs.ExportKeysByGUN(tempZipFile, "does.not.exist/in/repository", newPassphraseRetriever) assert.EqualError(t, err, ErrNoKeysFoundForGUN.Error()) tempZipFile.Close() // Reopen the zip file for importing zipReader, err := zip.OpenReader(tempZipFilePath) assert.NoError(t, err, "could not open zip file") // Map of files to expect in the zip file, with the passphrases passphraseByFile := make(map[string]string) // Add keys non-root keys to the map. These should use the new passphrase // because they were formerly unencrypted. privKeyMap := cs.ListAllKeys() for privKeyName := range privKeyMap { _, alias, err := cs.GetPrivateKey(privKeyName) if err != nil { t.Fatalf("privKey %s has no alias", privKeyName) } if alias == "root" { continue } relKeyPath := filepath.Join("tuf_keys", privKeyName+"_"+alias+".key") passphraseByFile[relKeyPath] = exportPassphrase } // Iterate through the files in the archive, checking that the files // exist and are encrypted with the expected passphrase. for _, f := range zipReader.File { expectedPassphrase, present := passphraseByFile[f.Name] if !present { t.Fatalf("unexpected file %s in zip file", f.Name) } delete(passphraseByFile, f.Name) rc, err := f.Open() assert.NoError(t, err, "could not open file inside zip archive") pemBytes, err := ioutil.ReadAll(rc) assert.NoError(t, err, "could not read file from zip") _, err = trustmanager.ParsePEMPrivateKey(pemBytes, expectedPassphrase) assert.NoError(t, err, "PEM not encrypted with the expected passphrase") rc.Close() } zipReader.Close() // Are there any keys that didn't make it to the zip? for fileNotFound := range passphraseByFile { t.Fatalf("%s not found in zip", fileNotFound) } // Create new repo to test import tempBaseDir2, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir2) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore2, err := trustmanager.NewKeyFileStore(tempBaseDir2, newPassphraseRetriever) cs2 := NewCryptoService(gun, fileStore2) // Reopen the zip file for importing zipReader, err = zip.OpenReader(tempZipFilePath) assert.NoError(t, err, "could not open zip file") // Now try with a valid passphrase. This time it should succeed. err = cs2.ImportKeysZip(zipReader.Reader) assert.NoError(t, err) zipReader.Close() // Look for keys in private. The filenames should match the key IDs // in the repo's private key store. for privKeyName, role := range privKeyMap { if role == "root" { continue } _, alias, err := cs2.GetPrivateKey(privKeyName) if err != nil { t.Fatalf("privKey %s has no alias", privKeyName) } if alias == "root" { continue } relKeyPath := filepath.Join("tuf_keys", privKeyName+"_"+alias+".key") privKeyFileName := filepath.Join(tempBaseDir2, "private", relKeyPath) _, err = os.Stat(privKeyFileName) assert.NoError(t, err) } } func TestImportExportRootKey(t *testing.T) { gun := "docker.com/notary" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore, err := trustmanager.NewKeyFileStore(tempBaseDir, oldPassphraseRetriever) cs := NewCryptoService(gun, fileStore) pubKey, err := cs.Create(data.CanonicalRootRole, data.ECDSAKey) assert.NoError(t, err) rootKeyID := pubKey.ID() tempKeyFile, err := ioutil.TempFile("", "notary-test-export-") tempKeyFilePath := tempKeyFile.Name() defer os.Remove(tempKeyFilePath) err = cs.ExportRootKey(tempKeyFile, rootKeyID) assert.NoError(t, err) tempKeyFile.Close() // Create new repo to test import tempBaseDir2, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir2) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore2, err := trustmanager.NewKeyFileStore(tempBaseDir2, oldPassphraseRetriever) cs2 := NewCryptoService(gun, fileStore2) keyReader, err := os.Open(tempKeyFilePath) assert.NoError(t, err, "could not open key file") err = cs2.ImportRootKey(keyReader) assert.NoError(t, err) keyReader.Close() // Look for repo's root key in repo2 // There should be a file named after the key ID of the root key we // imported. rootKeyFilename := rootKeyID + "_root.key" _, err = os.Stat(filepath.Join(tempBaseDir2, "private", "root_keys", rootKeyFilename)) assert.NoError(t, err, "missing root key") // Try to import a decrypted version of the root key and make sure it // doesn't succeed pemBytes, err := ioutil.ReadFile(tempKeyFilePath) assert.NoError(t, err, "could not read key file") privKey, err := trustmanager.ParsePEMPrivateKey(pemBytes, oldPassphrase) assert.NoError(t, err, "could not decrypt key file") decryptedPEMBytes, err := trustmanager.KeyToPEM(privKey) assert.NoError(t, err, "could not convert key to PEM") err = cs2.ImportRootKey(bytes.NewReader(decryptedPEMBytes)) assert.EqualError(t, err, ErrRootKeyNotEncrypted.Error()) // Try to import garbage and make sure it doesn't succeed err = cs2.ImportRootKey(strings.NewReader("this is not PEM")) assert.EqualError(t, err, ErrNoValidPrivateKey.Error()) // Should be able to unlock the root key with the old password key, alias, err := cs2.GetPrivateKey(rootKeyID) assert.NoError(t, err, "could not unlock root key") assert.Equal(t, "root", alias) assert.Equal(t, rootKeyID, key.ID()) } func TestImportExportRootKeyReencrypt(t *testing.T) { gun := "docker.com/notary" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore, err := trustmanager.NewKeyFileStore(tempBaseDir, oldPassphraseRetriever) cs := NewCryptoService(gun, fileStore) pubKey, err := cs.Create(data.CanonicalRootRole, data.ECDSAKey) assert.NoError(t, err) rootKeyID := pubKey.ID() tempKeyFile, err := ioutil.TempFile("", "notary-test-export-") tempKeyFilePath := tempKeyFile.Name() defer os.Remove(tempKeyFilePath) err = cs.ExportRootKeyReencrypt(tempKeyFile, rootKeyID, newPassphraseRetriever) assert.NoError(t, err) tempKeyFile.Close() // Create new repo to test import tempBaseDir2, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir2) assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileStore2, err := trustmanager.NewKeyFileStore(tempBaseDir2, newPassphraseRetriever) cs2 := NewCryptoService(gun, fileStore2) keyReader, err := os.Open(tempKeyFilePath) assert.NoError(t, err, "could not open key file") err = cs2.ImportRootKey(keyReader) assert.NoError(t, err) keyReader.Close() // Look for repo's root key in repo2 // There should be a file named after the key ID of the root key we // imported. rootKeyFilename := rootKeyID + "_root.key" _, err = os.Stat(filepath.Join(tempBaseDir2, "private", "root_keys", rootKeyFilename)) assert.NoError(t, err, "missing root key") // Should be able to unlock the root key with the new password key, alias, err := cs2.GetPrivateKey(rootKeyID) assert.NoError(t, err, "could not unlock root key") assert.Equal(t, "root", alias) assert.Equal(t, rootKeyID, key.ID()) } notary-0.1/docker-compose.yml000066400000000000000000000006711262207326400163330ustar00rootroot00000000000000notaryserver: build: . dockerfile: Dockerfile.server links: - notarymysql - notarysigner ports: - "8080" - "4443:4443" environment: SERVICE_NAME: notary notarysigner: volumes: - /dev/bus/usb/003/010:/dev/bus/usb/002/010 - /var/run/pcscd/pcscd.comm:/var/run/pcscd/pcscd.comm build: . dockerfile: Dockerfile.signer links: - notarymysql notarymysql: build: ./notarymysql/ ports: - "3306:3306" notary-0.1/docs/000077500000000000000000000000001262207326400136225ustar00rootroot00000000000000notary-0.1/docs/Dockerfile000066400000000000000000000014001262207326400156070ustar00rootroot00000000000000FROM docs/base:hugo-github-linking MAINTAINER Mary Anthony (@moxiegirl) # To get the git info for this repo COPY . /src COPY . /docs/content/notary/ RUN svn checkout https://github.com/docker/docker/trunk/docs /docs/content/engine RUN svn checkout https://github.com/docker/compose/trunk/docs /docs/content/compose RUN svn checkout https://github.com/docker/swarm/trunk/docs /docs/content/swarm RUN svn checkout https://github.com/docker/machine/trunk/docs /docs/content/machine RUN svn checkout https://github.com/docker/distribution/trunk/docs /docs/content/registry RUN svn checkout https://github.com/docker/tutorials/trunk/docs /docs/content/tutorials RUN svn checkout https://github.com/docker/opensource/trunk/docs /docs/content/opensource notary-0.1/docs/Makefile000066400000000000000000000045561262207326400152740ustar00rootroot00000000000000.PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate # env vars passed through directly to Docker's build scripts # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these DOCKER_ENVS := \ -e BUILDFLAGS \ -e DOCKER_CLIENTONLY \ -e DOCKER_EXECDRIVER \ -e DOCKER_GRAPHDRIVER \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) # to allow `make DOCSPORT=9000 docs` DOCSPORT := 8000 # Get the IP ADDRESS DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") HUGO_BIND_IP=0.0.0.0 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE # for some docs workarounds (see below in "docs-build" target) GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) default: docs docs: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-draft: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) docs-shell: docs-build $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash docs-build: # ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files # echo "$(GIT_BRANCH)" > GIT_BRANCH # echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET # echo "$(GITCOMMIT)" > GITCOMMIT docker build -t "$(DOCKER_DOCS_IMAGE)" . notary-0.1/docs/README.md000066400000000000000000000070041262207326400151020ustar00rootroot00000000000000# Contributing to the Docker Notary documentation The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site. You don't need to be a Hugo expert to contribute to the Notary documentation. If you are familiar with Markdown, you can modify the content in the `docs` files. If you want to add a new file or change the location of the document in the menu, you do need to know a little more. ## Documentation contributing workflow 1. Edit a Markdown file in the tree. 2. Save your changes. 3. Make sure you are in the `docs` subdirectory. 4. Build the documentation. $ make docs ---> ffcf3f6c4e97 Removing intermediate container a676414185e8 Successfully built ffcf3f6c4e97 docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0 ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead. 0 of 4 drafts rendered 0 future content 12 pages created 0 paginator pages created 0 tags created 0 categories created in 55 ms Serving pages from /docs/public Web Server is available at http://0.0.0.0:8000/ Press Ctrl+C to stop 5. Open the available server in your browser. ## Tips on Hugo metadata and menu positioning The top of each Docker Notary documentation file contains TOML metadata. The metadata is commented out to prevent it from appearing in GitHub. The metadata alone has this structure: +++ title = "Extending services in Notary" description = "How to use Docker Notary's extends keyword to share configuration between files and projects" keywords = ["fig, composition, Notary, docker, orchestration, documentation, docs"] [menu.main] parent="smn_workw_Notary" weight=2 +++ The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Notary *to the menu with the* `smn_workdw_Notary` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title. You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up. ## Other key documentation repositories The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Notary documentation. The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site. notary-0.1/docs/cli.md000066400000000000000000000010021262207326400147040ustar00rootroot00000000000000 # Notary CLI ## Notary Server The default notary server URL is [https://notary-server:4443/]. This default value can overridden (by priority order): - by specifying the option `--server/-s` on commands requiring call to the notary server. - by setting the `NOTARY_SERVER_URL` environment variable. notary-0.1/docs/index.md000066400000000000000000000005221262207326400152520ustar00rootroot00000000000000 # List of Notary Documentation * [Overview of Docker Notary](overview) * [Notary CLI](cli) notary-0.1/docs/overview.md000066400000000000000000000004021262207326400160060ustar00rootroot00000000000000 # Overview of Docker Notary notary-0.1/errors/000077500000000000000000000000001262207326400142065ustar00rootroot00000000000000notary-0.1/errors/errors.go000066400000000000000000000071751262207326400160630ustar00rootroot00000000000000package errors import ( "net/http" "github.com/docker/distribution/registry/api/errcode" ) // The notary API is on version 1, but URLs start with /v2/ to be consistent // with the registry API const errGroup = "notary.api.v1" // These errors should be returned from contextHandlers only. They are // serialized and returned to a user as part of the generic error handling // done by the rootHandler var ( ErrNoStorage = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NO_STORAGE", Message: "The server is misconfigured and has no storage.", Description: "No storage backend has been configured for the server.", HTTPStatusCode: http.StatusInternalServerError, }) ErrNoFilename = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NO_FILENAME", Message: "No file/role name provided.", Description: "No file/role name is provided to associate an update with.", HTTPStatusCode: http.StatusBadRequest, }) ErrInvalidRole = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "INVALID_ROLE", Message: "The role you are attempting to operate on is invalid.", Description: "The user attempted to operate on a role that is not deemed valid.", HTTPStatusCode: http.StatusBadRequest, }) ErrMalformedJSON = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MALFORMED_JSON", Message: "JSON sent by the client could not be parsed by the server", Description: "The client sent malformed JSON.", HTTPStatusCode: http.StatusBadRequest, }) ErrUpdating = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "UPDATING", Message: "An error has occurred while updating the TUF repository.", Description: "An error occurred when attempting to apply an update at the storage layer.", HTTPStatusCode: http.StatusInternalServerError, }) ErrMetadataNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "METADATA_NOT_FOUND", Message: "You have requested metadata that does not exist.", Description: "The user requested metadata that is not known to the server.", HTTPStatusCode: http.StatusNotFound, }) ErrMalformedUpload = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "MALFORMED_UPLOAD", Message: "The body of your request is malformed.", Description: "The user uploaded new TUF data and the server was unable to parse it as multipart/form-data.", HTTPStatusCode: http.StatusBadRequest, }) ErrGenericNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "GENERIC_NOT_FOUND", Message: "You have requested a resource that does not exist.", Description: "The user requested a non-specific resource that is not known to the server.", HTTPStatusCode: http.StatusNotFound, }) ErrNoCryptoService = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NO_CRYPTOSERVICE", Message: "The server does not have a signing service configured.", Description: "No signing service has been configured for the server and it has been asked to perform an operation that requires either signing, or key generation.", HTTPStatusCode: http.StatusInternalServerError, }) ErrNoKeyAlgorithm = errcode.Register(errGroup, errcode.ErrorDescriptor{ Value: "NO_KEYALGORITHM", Message: "The server does not have a key algorithm configured.", Description: "No key algorihtm has been configured for the server and it has been asked to perform an operation that requires generation.", HTTPStatusCode: http.StatusInternalServerError, }) ErrUnknown = errcode.ErrorCodeUnknown ) notary-0.1/fixtures/000077500000000000000000000000001262207326400145435ustar00rootroot00000000000000notary-0.1/fixtures/intermediate-ca.crt000066400000000000000000000042451262207326400203150ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0 5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK 0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq 4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce 0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL lKmXlK4dTg== -----END CERTIFICATE----- notary-0.1/fixtures/notary-server.crt000066400000000000000000000100441262207326400200740ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFWzCCA0OgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBfMRowGAYDVQQDDBFOb3Rh cnkgVGVzdGluZyBDQTELMAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lz Y28xDzANBgNVBAoMBkRvY2tlcjELMAkGA1UECAwCQ0EwHhcNMTUwNzE2MDQyNTMy WhcNMTYwNzE1MDQyNTMyWjBbMRYwFAYDVQQDDA1ub3Rhcnktc2VydmVyMQswCQYD VQQGEwJVUzEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2Vy MQswCQYDVQQIDAJDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKjb eflOtVrOv0IOeJGKfi5LHH3Di0O2nlZu8AITSJbDZPSXoYc+cprpoEWYncbFFC3C 94z5xBW5vcAqMhLs50ml5ADl86umcLl2C/mX8NuZnlIevMCb0mBiavDtSPV3J5Dq Ok+trgKEXs9g4hyh5Onh5Y5InPO1lDJ+2cEtVGBMhhddfWRVlV9ZUWxPYVCTt6L0 bD9SeyXJVB0dnFhr3xICayhDlhlvcjXVOTUsewJLo/L2nq0ve93Jb2smKio27ZGE 79bCGqJK213/FNqfAlGUPkhYTfYJTcgjhS1plmtgN6KZF6RVXvOrCBMEDM2yZq1m EPWjoT0tn0MkWErDANcCAwEAAaOCASQwggEgMIGIBgNVHSMEgYAwfoAUd7jyFwFw CnRtRWVPFd5EDigTXqmhY6RhMF8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEW MBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2VyMRowGAYDVQQD DBFOb3RhcnkgVGVzdGluZyBDQYIBATAMBgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQG CCsGAQUFBwMCBggrBgEFBQcDATAOBgNVHQ8BAf8EBAMCBaAwNwYDVR0RBDAwLoIN bm90YXJ5LXNlcnZlcoIMbm90YXJ5c2VydmVygglsb2NhbGhvc3SHBH8AAAEwHQYD VR0OBBYEFBQcColyhey0o0RTLiiGAtaRhUIuMA0GCSqGSIb3DQEBCwUAA4ICAQAW jf7f98t5y2C5mjd8om/vfgpJRmnyjFxJD8glCIacnwABAc2MgNoWRISWJnjwSf9W kZ/tWGeHKdQ4Q7T3+Vu2d8nVpL+cGLZY4iddzxlNqWeaA7Sa7jQSLvOoYYxkb+w5 jUpukvqxGzCToW3dlOaV0qvOhXaOxPD6T8IWivnQdU53oU3kopfYiRjkREA1dIBv Hwaa6fAjeK4KyBt7pzKScfHzU4X2gXajqc7Ox0NAb5YfIFOqySqcnYNflcZ+lDPd XVMBdB4eRl1BbVTlonxxATWkhiv8GZUc9YD/bikbFzVYm3N5XRT7LCgyBgrmbH5k PJUElTP2AsoSRLXUsPgCAhBM9QzHWsMiEh5wcpe61C3Afwv4MLtr7T0T99vp/BJt OOJ7kJzYhp6P4FTi4uXuT4xcIJ/yTDZcLUTlJSWCuCKCM76yZteWEmlvWBHd9QiF TDqKzjhrnt2FpPSBSm9Na+hAwsmZfRzQXelXai3aBx55HCIcGZ9o8oGsJaB7uDum 4+lFOhMiGaL2/pxhZcbCCjpLNv/9mCb67iPQV/E8xAY89wsXYpU+i/q1RGbraXLA K3faVJu6R5taGe0heQr6VGZwF4L+bG64rtxUPqKDCF+Y9FpN4qUDl3vzmYGBTd5u osKHqyciMmPCpgR7IQd1yYqH1cwlhQX/yTepX9gcrA== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0 5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK 0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq 4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce 0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL lKmXlK4dTg== -----END CERTIFICATE----- notary-0.1/fixtures/notary-server.key000066400000000000000000000032141262207326400200750ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAqNt5+U61Ws6/Qg54kYp+LkscfcOLQ7aeVm7wAhNIlsNk9Jeh hz5ymumgRZidxsUULcL3jPnEFbm9wCoyEuznSaXkAOXzq6ZwuXYL+Zfw25meUh68 wJvSYGJq8O1I9XcnkOo6T62uAoRez2DiHKHk6eHljkic87WUMn7ZwS1UYEyGF119 ZFWVX1lRbE9hUJO3ovRsP1J7JclUHR2cWGvfEgJrKEOWGW9yNdU5NSx7Akuj8vae rS973clvayYqKjbtkYTv1sIaokrbXf8U2p8CUZQ+SFhN9glNyCOFLWmWa2A3opkX pFVe86sIEwQMzbJmrWYQ9aOhPS2fQyRYSsMA1wIDAQABAoIBAG6mtD1dCJajGM3u sa+d86XebqMzOtV6nDPDqt+RR2YUUNm/a4g2sd817WLt6aZRizGZq6LkIUyjVObS P9ILEF1AqjK0fYMkJIZEBwDeQmWFOyxRHBuTgL7Mf4u10rOYC4N5GhEQnRDlMUPw FvvwUxO4hjdA+ijx+lVErulaDQq0yj5mL4LWu4cHm576OufzgHOIp6fQtfRVJIXD W2ginblgYFLd+PPiM1RMPR/Pj63VWXWBn1VwLAxWN889E4VG2medl0taQgkNQ3/W 0J04KiTXPrtcUBy2AGoHikvN7gG7Up2IwRRbsXkUdhQNZ/HnIQlkFfteiqqt9VNR Nsi31nECgYEA0qE+96TvYf8jeZsqrl8YQAvjXWrNA05eKZlT6cm6XpyXq22v9Cgn 2KXEhRwHZF2dQ2C+1PvboeTUbpdPX1nY2shY59L7+t68F/jxotcjx0yL+ZC742Fy bWsc8Us0Ir2DD5g/+0F+LRLFJKSfJPdLzEkvwuYnlm6RcFlbxIxW6h0CgYEAzTrE 6ulEhN0fKeJY/UaK/8GlLllXc2Z5t7mRicN1s782l5qi0n1R57VJw/Ezx4JN1mcQ 4axe9zzjAA5JfSDfyTyNedP1KOmCaKmBqGa9JppxGcVQpMDg8+QvYnJ8o5JXEXSE TOnpY4RTEA1RGnA5KbbJ7R1MiHUGXC9nizVHxIMCgYB8cu1DYN5XpmoNddK4CFPJ s7x4+5t6MpmMNp3P6nMFZ7xte3eU6QzyAq+kfjUX5f//SXA3Y0AX3Z5uYVRyYCGy 0uFEx/I9/dBg0aPjtP3cyauCnzOEW5VCdSE6qFZ7mEGRu0FCcSXd99MnnWSycLMG Vs+zdk05osan/QQtk0XfOQKBgDfkIWy4SmjEr5AAjKutYn10hz+wJRjQd6WJbBFQ oeVp1bxD6MPaTUwFGym5rphO7FPPjdFn2BUNB+Uj/u+M3GU5kG31Q3b44QMP5reu AyVYOiUCj4vO23SQWDc/ZqJFYGDokn8/1Me9acGdXtEMbwTlOujQad9fv3OrlU9c G0dxAoGAHcntflD6UvQ5/PYOirNJL1GhSspF7u72NrsYjaoZls83uIqucJiB5hMH Ovq1TJbl0DwDBOyMmt5gZraPQB0P5/5GvnxqGlIAKIwi2VuQ2XHpSBE8Pg5Pveb8 sgFLFnwL5+JyqOP65AV3Eh5b4BJc6kqKz4gVmKLBQeo6lE13sNs= -----END RSA PRIVATE KEY----- notary-0.1/fixtures/notary-signer.crt000066400000000000000000000100441262207326400200550ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFWzCCA0OgAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMRowGAYDVQQDDBFOb3Rh cnkgVGVzdGluZyBDQTELMAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lz Y28xDzANBgNVBAoMBkRvY2tlcjELMAkGA1UECAwCQ0EwHhcNMTUwNzE2MDQyNTIx WhcNMTYwNzE1MDQyNTIxWjBbMRYwFAYDVQQDDA1ub3Rhcnktc2lnbmVyMQswCQYD VQQGEwJVUzEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2Vy MQswCQYDVQQIDAJDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANhO 8+K9xT6M9dQC90Hxs6bmTXWQzE5oV2kLeVKqOjwAvGt6wBE2XJCAbTS3FORIOyoO VQDVCv2Pk2lZXGWqSrH8SY2umjRJIhPDiqN9V5M/gcmMm2EUgwmp2l4bsDk1MQ6G Sbud5kjYGZcp9uXxAVO8tfLVLQF7ohJYqiexJN+fZkQyxTgSqrI7MKK1pUvGX/fa 6EXzpKwxTQPJXiG/ZQW0Pn+gdrz+/Cf0PcVyV/Ghc2RR+WjKzqqAiDUJoEtKm/xQ VRcSPbagVLCe0KZr7VmtDWnHsUv9ZB9BRNlIlRVDOhVDCCcMu/zEtcxuH8ja7faf i5xNt6vCBmHuCXQtTUsCAwEAAaOCASQwggEgMIGIBgNVHSMEgYAwfoAUd7jyFwFw CnRtRWVPFd5EDigTXqmhY6RhMF8xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEW MBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2VyMRowGAYDVQQD DBFOb3RhcnkgVGVzdGluZyBDQYIBATAMBgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQG CCsGAQUFBwMCBggrBgEFBQcDATAOBgNVHQ8BAf8EBAMCBaAwNwYDVR0RBDAwLoIN bm90YXJ5LXNpZ25lcoIMbm90YXJ5c2lnbmVygglsb2NhbGhvc3SHBH8AAAEwHQYD VR0OBBYEFLv4/22eN7pe8IzCbL+gKr2i/o6VMA0GCSqGSIb3DQEBCwUAA4ICAQCR uX9Wif8uRu5v3tgSWx+EBJleq0nWcWM7VTLPedtpL2Xq+GZldJ7A+BGHgLQ42YjO /nye92ZcAWllEv676SEInWQmR1wtZ0cnlltvLdsZSCbHpwPpn3CK/afNm8OwtLfC KmaRU+qlLLtAvnu2fTk8KMTfAc9UJbhtntsH0rjvQPxoMTgjj2gWiWfIQZurkeAT Bovv7GfvfBsM4jAtAx5ZFOAo6yx1kvCb2rwmnrzzMA7GQTSUzWlwyviNyi8WB+kb pcm/4e4khDHzIVgCoT+O+gS382CP6cCAUcFfLizxCYvY3uS6P5be+sp8JO4bV9Sc 0nMiDFZWyzEZj1dWMnoWNq1vMEr9NAXexata5B2DIfWZz6pWWMdw3uPo5hZBcNik 6okQacazFCdgmtbXl+TPld8dQEN0beqYhIHQ9aosYyONoBhqn4I/09XQQmxVY2/L BThsQBIJHh2jIRgFcSePoVDI/lDd6wnqtSwedu+7tShG6bN9tlQsyqf+8MquBC3Q aw78cRCJG3CZpw0cmMm2vxlraHbB3+XKkQfQGRgEV4C88MO1W7WTyrwCJg9akVYz l2sG3WANdBs46RHAKDbTBXOKiib5tfTUFRgDqtFJ9/wKJ9mNhhYHPuCkjIt2yPf4 iq/3GeSNdr5stqSN0Wa7w6baqxbuZgqURtOCayAcpA== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0 5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK 0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq 4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce 0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL lKmXlK4dTg== -----END CERTIFICATE----- notary-0.1/fixtures/notary-signer.key000066400000000000000000000032141262207326400200560ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA2E7z4r3FPoz11AL3QfGzpuZNdZDMTmhXaQt5Uqo6PAC8a3rA ETZckIBtNLcU5Eg7Kg5VANUK/Y+TaVlcZapKsfxJja6aNEkiE8OKo31Xkz+ByYyb YRSDCanaXhuwOTUxDoZJu53mSNgZlyn25fEBU7y18tUtAXuiEliqJ7Ek359mRDLF OBKqsjsworWlS8Zf99roRfOkrDFNA8leIb9lBbQ+f6B2vP78J/Q9xXJX8aFzZFH5 aMrOqoCINQmgS0qb/FBVFxI9tqBUsJ7QpmvtWa0NacexS/1kH0FE2UiVFUM6FUMI Jwy7/MS1zG4fyNrt9p+LnE23q8IGYe4JdC1NSwIDAQABAoIBAHykYhyRxYrZpv3Y B6pUIHVX1+Ka4V98+IFrPynHNW9F7UzxmqNQc95AYq0xojQ4+v6s64ZjPMYHaaYW /AsJKamN+sRNjEX8rko9LzIuE7yhp6QABbjXHPsAiPgZdF5CrFX2Q558yinHfFeC sualDWK3JxEajaiBGU8BEGt2xAymuWACGblrM1aAEZa8B84TW3CzzcdyzAkn8P3e piJCe+DWMc33441r0KlV5GruwF9ewXiWzZtXAOiP/0xEDICFdlFWbO39myMpxDdU Y0uZ+zmn2G3gz2tz25thH0Wl7mDQ3AA0VlHurgPBBEekeZPQmjiKW+F4slCzXvuy kW/urIECgYEA/LhY+OWlZVXzIEly7z1/cU9/WImqTs2uRKDeQHMwZrd7D9BXkJuQ jPN+jZlMYBBrxoaCywbMrgB80Z3MgGHaSx9OIDEZmaxyuQv0zQJCMogysYkbCcaD mHYnyAf7OXa708Z168WAisEhrwa/DXBn3/hPoBkrbMsuPF/J+tEP7lsCgYEA2x2g 86SitgPVeNV3iuZ6D/SV0QIbDWOYoST2GQn2LnfALIOrzpXRClOSQZ2pGtg9gYo1 owUyyOSv2Fke93p3ufHv3Gqvjl55lzBVV0siHkEXwHcol36DDGQcskVnXJqaL3IF tiOisuJS9A7PW7gEi0miyGzzB/kh/IEWHKqLL9ECgYEAoBOFB+MuqMmQftsHWlLx 7qwUVdidb90IjZ/4J4rPFcESyimFzas8HIv/lWGM5yx/l/iL0F42N+FHLt9tMcTJ qNvjeLChLp307RGNtm2/0JJEyf+2iLKdmGz/Nc0YbIWw46vJ9dXcXgeHdn4ndjPF GDEI/rfysa7hUoy6O41BMhECgYBPJsLPgHdufLAOeD44pM0PGnFMERCoo4OtImbr 4JdXbdazvdTASYo7yriYj1VY5yhAtSZu/x+7RjDnXDo9d7XsK6NT4g4Mxb/yh3ks kW1/tE/aLLEzGHZKcZeUJlISN57e6Ld7dh/9spf4pajuHuk1T6JH+GNKTAqk5hSQ wmKJIQKBgCGBWGvJrCeT5X9oHdrlHj2YoKvIIG1eibagcjcKemD7sWzi7Q4P7JIo xeX8K1WVxdBpo4/RiQcGFmwSmSUKwwr1dO00xtjxIl7ip4DU+WAM7CdmcOIOMbr4 rP9T/wy1ZBkERCIw2ElybTzB8yuOlNLuOMhUeU55xUMFNYYrWEp2 -----END RSA PRIVATE KEY----- notary-0.1/fixtures/root-ca.crt000066400000000000000000000036711262207326400166300ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFhjCCA26gAwIBAgIJAMJ4Mtt6YhNLMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0G A1UECgwGRG9ja2VyMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTAeFw0xNTA3 MTYwNDI1MDBaFw0yNTA3MTMwNDI1MDBaMF8xCzAJBgNVBAYTAlVTMQswCQYDVQQI DAJDQTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzEPMA0GA1UECgwGRG9ja2VyMRow GAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP ADCCAgoCggIBAMzUzq2O07tm3A/4emCN/294jUBnNeGlM4TgsB8W9ingw9CU7oBn CRTK94cGDHTb5ofcj9Kt4/dSL52uJpkZshmAga4fDDhtntnUHaKYzjoZSKZtq7qV hC1Dah7s3zftZn4NHiRe82loXH/W0//0MWdQCaLc8E0rd/amrd6EO+5SUwF4dXSk nWoo3oxtOEnb1uQcWWIiwLRmd1pw3PW/bt/SHssD5dJ+78/nR1qCHhJyLVpylMiy WijkMKW7mbQFefuCOsQ0QvGG3BrTLu+fVs9GYNzHC+L1bSQbfts4nOSodcB/klhd mbgVW8mrgeHww/jgb2WJW9Y3RFNp/VEuhVrHiz/NW2qE3nPLEnu0vd50jYIXbvBm fbhCoJntYAiCY0l8v+POgP3ACtsS41rcn8VyD3Ho4u4186ki71+QRQTsUk2MXRV6 AKQ9u4Cl4d0tV1oHjVyiKDv8PNakNrI48KmnF9R9wMgzDHIoBVQZraVTyPwW9HvS 8K3Lsm6QAE7pErideOyBViOiiqvW7rUaLERTkhGirX2RChwhYLtYIj0LitgzdaT4 JD1JxonqN30g2jk1+mJKMEeWBMTjFqtzuQPYH3HkHKxoNfvEuL5fsZSmhV/mR+yW lSe1f8r1qpAACj/K3mome/z8UhNxzEW8TCYkwamLkAPF485W64KIYI1tAgMBAAGj RTBDMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgFGMB0GA1UdDgQW BBR1DNVNxOFsi9Z7xXfnT2PH+DtoWTANBgkqhkiG9w0BAQsFAAOCAgEAUbbrI3OQ 5XO8HHpoTwVqFzSzKOuSSrcMGrv67rn+2HvVJYfxtusZBS6+Rw7QVG3daPS+pSNX NM1qyin3BjpNR2lI771yyK/yjjNH9pZPR+8ThJ8/77roLJudTCCPt49PoYgSQQsp IB75PlqnTWVwccW9pm2zSdqDxFeZpTpwEvgyX8MNCfYeynxp5+S81593z8iav16u t2I38NyFJKuxin9zNkxkpf/a9Pr/Gk56gw1OfHXp+sW/6KIzx8fjQuL6P8HEpwVG zXXA8fMX91cIFI4+DTc8mPjtYvT6/PzDWE/q6FZZnbHJ50Ngg5D8uFN5lLgZFNtf ITeoNjTk2koq8vvTW8FDpMkb50zqGdBoIdDtRFd3oot+MEg+6mba+Kttwg05aJ9a SIIxjvU4NH6qOXBSgzaI1hMr7DTBnaXxMEBiaNaPg2nqi6uhaUOcVw3F01yBfGfX aGsNLKpFiKFYQfOR1M2ho/7AL19GYQD3IFWDJqk0/eQLfFR74iKVMz6ndwt9F7A8 0xxGXGpw2NJQTWLQui4Wzt33q541ihzL7EDtybBScUdIOIEO20mHr2czFoTL9IKx rU0Ck5BMyMBB+DOppP+TeKjutAI1yRVsNoabOuK4oo/FmqysgQoHEE+gVUThrrpE wV1EBILkX6O4GiMqu1+x92/yCmlKEg0Q6MM= -----END CERTIFICATE----- notary-0.1/fixtures/secure.example.com.crt000066400000000000000000000035701262207326400207570ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIFVzCCAz+gAwIBAgIBAzANBgkqhkiG9w0BAQsFADBfMRowGAYDVQQDDBFOb3Rh cnkgVGVzdGluZyBDQTELMAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lz Y28xDzANBgNVBAoMBkRvY2tlcjELMAkGA1UECAwCQ0EwHhcNMTUwNzE2MDQyNTUw WhcNMTYwNzE1MDQyNTUwWjBgMRswGQYDVQQDDBJzZWN1cmUuZXhhbXBsZS5jb20x CzAJBgNVBAYTAlVTMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQKDAZE b2NrZXIxCzAJBgNVBAgMAkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC AQEAmLYiYCTAWJBWAuxZLqVmV4FiUdGgEqoQvCbN73zF/mQfhq0CITo6xSxs1QiG DOzUtkpzXzziSj4J5+et4JkFleeEKaMcHadeIsSlHGvVtXDv93oR3ydmfZO+ULRU 8xHloqcLr1KrOP1daLfdMRbactd75UQgvw9XTsdeMVX5AlicSENVKV+AQXvVpv8P T10MSvlBFam4reXuY/SkeMbIaW5pFu6AQv3Zmftt2ta0CB9kb1mYd+OKru8Hnnq5 aJw6R3GhP0TBd25P1PkiSxM2KGYZZk0W/NZqLK9/LTFKTNCv7VjCbysVo7HxCY0b Qe/bDP82v7SnLtb3aZogfva4HQIDAQABo4IBGzCCARcwgYgGA1UdIwSBgDB+gBR3 uPIXAXAKdG1FZU8V3kQOKBNeqaFjpGEwXzELMAkGA1UEBhMCVVMxCzAJBgNVBAgM AkNBMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMQ8wDQYDVQQKDAZEb2NrZXIxGjAY BgNVBAMMEU5vdGFyeSBUZXN0aW5nIENBggEBMAwGA1UdEwEB/wQCMAAwHQYDVR0l BBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMA4GA1UdDwEB/wQEAwIFoDAuBgNVHREE JzAlghJzZWN1cmUuZXhhbXBsZS5jb22CCWxvY2FsaG9zdIcEfwAAATAdBgNVHQ4E FgQUDPD4CaXRbu5QBb5e8y8odvTqW4IwDQYJKoZIhvcNAQELBQADggIBAJOylmc4 n7J64GKsP/xhUdKKV9/KD+ufzpKbrLIojWn7rTye70vY0OjQFuOXc54yjMSIL+/5 mlNQ7Y/fJS8xdH79ER+4nWMuD2eciLnsLgbYUk4hiyby8/5V+/YqPeCpPCn6TJRK a0E6lV/UjXJdrigJvJoNOR8ZgtEZ/QPgjJEVUsg47dtqzsDpgeS8dcjuMWpZxP02 qavFLDjSFzVH+2D6Oty1DQplm//3XaRXh23dOCP8wj/bxvnVToFWs+zO4uT1LF/S KXCNQoeiGxWHyzrXFVVtVnC9FSNz0Gg2/Em1tfRgvhUn4KLJcvZW9o1R7VVCX0L1 0x0fyK3VWeWc86a5a681amKZSEbjAmIVZF9zOX0PODC8oy+zqOPWa0WCl4K6zDC6 2IIFBBNy50ZS2iON6RY6mE7NmA78gckf415cqIVrloYJbbTDepfhTV218SLepph4 uGb2/sxklfHOYE+rpHciibWwXrwlODJaXuzXFhplUd/ovdujBNAIHkBfzy+Y6z2s bwZcfqD4NIb/AGhIyW2vbvu4zslDp1MEsLoaO+SzirMzkyMBlKRt120tws4EkUlm /QhjSUoZpCAsy5C/pV4+bx0SysNd/S+kKaRZc/U6Y3ZYBFhszLh7JaLXKmk7wHnE rggm6oz4L/GyPWc/FjfnsefWKM2yC3QDhjvj -----END CERTIFICATE----- notary-0.1/fixtures/secure.example.com.key000066400000000000000000000032141262207326400207520ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAmLYiYCTAWJBWAuxZLqVmV4FiUdGgEqoQvCbN73zF/mQfhq0C ITo6xSxs1QiGDOzUtkpzXzziSj4J5+et4JkFleeEKaMcHadeIsSlHGvVtXDv93oR 3ydmfZO+ULRU8xHloqcLr1KrOP1daLfdMRbactd75UQgvw9XTsdeMVX5AlicSENV KV+AQXvVpv8PT10MSvlBFam4reXuY/SkeMbIaW5pFu6AQv3Zmftt2ta0CB9kb1mY d+OKru8Hnnq5aJw6R3GhP0TBd25P1PkiSxM2KGYZZk0W/NZqLK9/LTFKTNCv7VjC bysVo7HxCY0bQe/bDP82v7SnLtb3aZogfva4HQIDAQABAoIBAQCLPj+X5MrRtkIH BlTHGJ95mIr6yaYofpMlzEgoX1/1dnvcg/IWNA8UbE6L7Oq17FiEItyR8WTwhyLn JrO/wCd8qQ40HPrs+wf1sdJPWPATMfhMcizLihSE2mtFETkILcByD9iyszFWlIdQ jZ4NPaZP4rWgtf8Z1zYnqdf0Kk0T2imFya0qyoRLo40kxeb4p5K53JD7rPLQNyvO YeFXTuKxBrFEMs6/wFjl+TO4nfHQXQlgQp4MNd9L5fEQBj+TvGVX+zcQEmzxljK8 zFNXyxvXgjBPD+0V7yRhTYjrUfZJ4RX1yKDpdsva6BXL7t9hNEg/aGnKRDYF3i5q WQz8csCBAoGBAMfdtAr3RCuCxe0TIVBon5wubau6HLOxorcXSvxO5PO2kzhy3+GY xcCMJ+Wo0dTFXjQD3oxRKuDrPRK7AX/grYn7qJo6W7SM9xYEq3HspJJFGkcRsvem MALt8bvG5NkGmLJD+pTOKVaTZRjW3BM6GcMzBgsLynQcLllRtNI8Hcw9AoGBAMOa CMsWQfoOUjUffrXN0UnXLEPEeazPobnCHVtE244FdX/BFu5WMA7qqaPRyvnfK0Vl vF5sGNiBCOnq1zjYee6FD2eyAzVmWJXM1DB4Ewp4ZaABS0ZCZgNfyd1badY4IZpw pjYEQprguw+J8yZItNJRo+WBmnSgZy6o1bpDaflhAoGAYf61GS9VkFPlQbFAg1FY +NXW1f1Bt2VgV48nKAByx3/8PRAt70ndo+PUaAlXIJDI+I3xHzFo6bDNWBKy0IVT 8TSf3UbB0gvP1k7h1NDnfAQ/txrZeg1Uuwr5nE0Pxc0zLyyffzh6EkXgqsYmT5MM MKYiz2WvlTCAFTE3jGEHZy0CgYBti/cgxnZs9VhVKC5u47YzBK9lxMPgZOjOgEiw tP/Bqo0D38BX+y0vLX2UogprpvE1DKVSvHetyZaUa1HeJF8llp/qE2h4n7k9LFoq SxVe588CrbbawpUfjqYfsvKzZvxq4mw0FG65DuO08C2dY1rh75c7EjrO1obzOtt4 VgkkAQKBgDnRyLnzlMfvjCyW9+cHbURQNe2iupfnlrXWEntg56USBVrFtfRQxDRp fBtlq+0BNfDVdoVNasTCBW16UKoRBH1/k5idz5QPEbKY2055sNxHMVg0uzdb4HXr 73uaYzNrT8P7wyHFF3UL5bd0aO5DT1VYvGlHHgOhCyqcM+RBgPBS -----END RSA PRIVATE KEY----- notary-0.1/fixtures/self-signed_docker.com-notary.crt000066400000000000000000000011701262207326400230720ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIBpDCCAUqgAwIBAgIRAIquZ7lRJj1Um030Kd7GFXgwCgYIKoZIzj0EAwIwODEa MBgGA1UEChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20v bm90YXJ5MB4XDTE1MDcxNzAwMzE1NFoXDTE3MDcxNjAwMzE1NFowODEaMBgGA1UE ChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20vbm90YXJ5 MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEjnnozttLzYgIN5fL8ZwYbsMig0pj HSNupVTPjDIrLUYUnoQfG6IQ0E2BMixEGnI/A9WreeXP2oz06LZ4SROMQqM1MDMw DgYDVR0PAQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMDMAwGA1UdEwEB/wQC MAAwCgYIKoZIzj0EAwIDSAAwRQIgT9cxottjza9BBQcMsoB/Uf2JYXWgSkp9QMXT 8mG4mMICIQDMYWFdgn5u8nDeThJ+bG8Lu5nIGb/NWEOFtU0xQv913Q== -----END CERTIFICATE----- notary-0.1/fixtures/self-signed_secure.example.com.crt000077500000000000000000000011731262207326400232370ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIBqDCCAU6gAwIBAgIRAM1vKVhmZuWcrogc3ASBaZUwCgYIKoZIzj0EAwIwOjEb MBkGA1UEChMSc2VjdXJlLmV4YW1wbGUuY29tMRswGQYDVQQDExJzZWN1cmUuZXhh bXBsZS5jb20wHhcNMTUwNzE3MDU1NTIzWhcNMTcwNzE2MDU1NTIzWjA6MRswGQYD VQQKExJzZWN1cmUuZXhhbXBsZS5jb20xGzAZBgNVBAMTEnNlY3VyZS5leGFtcGxl LmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABI556M7bS82ICDeXy/GcGG7D IoNKYx0jbqVUz4wyKy1GFJ6EHxuiENBNgTIsRBpyPwPVq3nlz9qM9Oi2eEkTjEKj NTAzMA4GA1UdDwEB/wQEAwIAoDATBgNVHSUEDDAKBggrBgEFBQcDAzAMBgNVHRMB Af8EAjAAMAoGCCqGSM49BAMCA0gAMEUCIER2XCkQ8dUWBZEUeT5kABg7neiHPtSL VVE6bJxu2sxlAiEAkRG6u1ieXKGl38gUkCn75Yvo9nOSLdh0gtxUUcOXvUc= -----END CERTIFICATE----- notary-0.1/keystoremanager/000077500000000000000000000000001262207326400160725ustar00rootroot00000000000000notary-0.1/keystoremanager/keystoremanager.go000066400000000000000000000314651262207326400216320ustar00rootroot00000000000000package keystoremanager import ( "crypto/x509" "errors" "fmt" "path/filepath" "time" "github.com/Sirupsen/logrus" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" ) // KeyStoreManager is an abstraction around the root and non-root key stores, // and related CA stores type KeyStoreManager struct { trustedCAStore trustmanager.X509Store trustedCertificateStore trustmanager.X509Store } const ( trustDir = "trusted_certificates" rsaRootKeySize = 4096 // Used for new root keys ) // ErrValidationFail is returned when there is no valid trusted certificates // being served inside of the roots.json type ErrValidationFail struct { Reason string } // ErrValidationFail is returned when there is no valid trusted certificates // being served inside of the roots.json func (err ErrValidationFail) Error() string { return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason) } // ErrRootRotationFail is returned when we fail to do a full root key rotation // by either failing to add the new root certificate, or delete the old ones type ErrRootRotationFail struct { Reason string } // ErrRootRotationFail is returned when we fail to do a full root key rotation // by either failing to add the new root certificate, or delete the old ones func (err ErrRootRotationFail) Error() string { return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason) } // NewKeyStoreManager returns an initialized KeyStoreManager, or an error // if it fails to create the KeyFileStores or load certificates func NewKeyStoreManager(baseDir string) (*KeyStoreManager, error) { trustPath := filepath.Join(baseDir, trustDir) // Load all CAs that aren't expired and don't use SHA1 trustedCAStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool { return cert.IsCA && cert.BasicConstraintsValid && cert.SubjectKeyId != nil && time.Now().Before(cert.NotAfter) && cert.SignatureAlgorithm != x509.SHA1WithRSA && cert.SignatureAlgorithm != x509.DSAWithSHA1 && cert.SignatureAlgorithm != x509.ECDSAWithSHA1 }) if err != nil { return nil, err } // Load all individual (non-CA) certificates that aren't expired and don't use SHA1 trustedCertificateStore, err := trustmanager.NewX509FilteredFileStore(trustPath, func(cert *x509.Certificate) bool { return !cert.IsCA && time.Now().Before(cert.NotAfter) && cert.SignatureAlgorithm != x509.SHA1WithRSA && cert.SignatureAlgorithm != x509.DSAWithSHA1 && cert.SignatureAlgorithm != x509.ECDSAWithSHA1 }) if err != nil { return nil, err } return &KeyStoreManager{ trustedCAStore: trustedCAStore, trustedCertificateStore: trustedCertificateStore, }, nil } // TrustedCertificateStore returns the trusted certificate store being managed // by this KeyStoreManager func (km *KeyStoreManager) TrustedCertificateStore() trustmanager.X509Store { return km.trustedCertificateStore } // TrustedCAStore returns the CA store being managed by this KeyStoreManager func (km *KeyStoreManager) TrustedCAStore() trustmanager.X509Store { return km.trustedCAStore } // AddTrustedCert adds a cert to the trusted certificate store (not the CA // store) func (km *KeyStoreManager) AddTrustedCert(cert *x509.Certificate) { km.trustedCertificateStore.AddCert(cert) } // AddTrustedCACert adds a cert to the trusted CA certificate store func (km *KeyStoreManager) AddTrustedCACert(cert *x509.Certificate) { km.trustedCAStore.AddCert(cert) } /* ValidateRoot receives a new root, validates its correctness and attempts to do root key rotation if needed. First we list the current trusted certificates we have for a particular GUN. If that list is non-empty means that we've already seen this repository before, and have a list of trusted certificates for it. In this case, we use this list of certificates to attempt to validate this root file. If the previous validation suceeds, or in the case where we found no trusted certificates for this particular GUN, we check the integrity of the root by making sure that it is validated by itself. This means that we will attempt to validate the root data with the certificates that are included in the root keys themselves. If this last steps succeeds, we attempt to do root rotation, by ensuring that we only trust the certificates that are present in the new root. This mechanism of operation is essentially Trust On First Use (TOFU): if we have never seen a certificate for a particular CN, we trust it. If later we see a different certificate for that certificate, we return an ErrValidationFailed error. Note that since we only allow trust data to be downloaded over an HTTPS channel we are using the current public PKI to validate the first download of the certificate adding an extra layer of security over the normal (SSH style) trust model. We shall call this: TOFUS. */ func (km *KeyStoreManager) ValidateRoot(root *data.Signed, gun string) error { logrus.Debugf("entered ValidateRoot with dns: %s", gun) signedRoot, err := data.RootFromSigned(root) if err != nil { return err } // Retrieve all the leaf certificates in root for which the CN matches the GUN allValidCerts, err := validRootLeafCerts(signedRoot, gun) if err != nil { logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err) return &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"} } // Retrieve all the trusted certificates that match this gun certsForCN, err := km.trustedCertificateStore.GetCertificatesByCN(gun) if err != nil { // If the error that we get back is different than ErrNoCertificatesFound // we couldn't check if there are any certificates with this CN already // trusted. Let's take the conservative approach and return a failed validation if _, ok := err.(*trustmanager.ErrNoCertificatesFound); !ok { logrus.Debugf("error retrieving trusted certificates for: %s, %v", gun, err) return &ErrValidationFail{Reason: "unable to retrieve trusted certificates"} } } // If we have certificates that match this specific GUN, let's make sure to // use them first to validate that this new root is valid. if len(certsForCN) != 0 { logrus.Debugf("found %d valid root certificates for %s", len(certsForCN), gun) err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(certsForCN)) if err != nil { logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err) return &ErrValidationFail{Reason: "failed to validate data with current trusted certificates"} } } else { logrus.Debugf("found no currently valid root certificates for %s", gun) } // Validate the integrity of the new root (does it have valid signatures) err = signed.VerifyRoot(root, 0, trustmanager.CertsToKeys(allValidCerts)) if err != nil { logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err) return &ErrValidationFail{Reason: "failed to validate integrity of roots"} } // Getting here means A) we had trusted certificates and both the // old and new validated this root; or B) we had no trusted certificates but // the new set of certificates has integrity (self-signed) logrus.Debugf("entering root certificate rotation for: %s", gun) // Do root certificate rotation: we trust only the certs present in the new root // First we add all the new certificates (even if they already exist) for _, cert := range allValidCerts { err := km.trustedCertificateStore.AddCert(cert) if err != nil { // If the error is already exists we don't fail the rotation if _, ok := err.(*trustmanager.ErrCertExists); ok { logrus.Debugf("ignoring certificate addition to: %s", gun) continue } logrus.Debugf("error adding new trusted certificate for: %s, %v", gun, err) } } // Now we delete old certificates that aren't present in the new root for certID, cert := range certsToRemove(certsForCN, allValidCerts) { logrus.Debugf("removing certificate with certID: %s", certID) err = km.trustedCertificateStore.RemoveCert(cert) if err != nil { logrus.Debugf("failed to remove trusted certificate with keyID: %s, %v", certID, err) return &ErrRootRotationFail{Reason: "failed to rotate root keys"} } } logrus.Debugf("Root validation succeeded for %s", gun) return nil } // validRootLeafCerts returns a list of non-exipired, non-sha1 certificates whoose // Common-Names match the provided GUN func validRootLeafCerts(root *data.SignedRoot, gun string) ([]*x509.Certificate, error) { // Get a list of all of the leaf certificates present in root allLeafCerts, _ := parseAllCerts(root) var validLeafCerts []*x509.Certificate // Go through every leaf certificate and check that the CN matches the gun for _, cert := range allLeafCerts { // Validate that this leaf certificate has a CN that matches the exact gun if cert.Subject.CommonName != gun { logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s", cert.Subject.CommonName) continue } // Make sure the certificate is not expired if time.Now().After(cert.NotAfter) { logrus.Debugf("error leaf certificate is expired") continue } // We don't allow root certificates that use SHA1 if cert.SignatureAlgorithm == x509.SHA1WithRSA || cert.SignatureAlgorithm == x509.DSAWithSHA1 || cert.SignatureAlgorithm == x509.ECDSAWithSHA1 { logrus.Debugf("error certificate uses deprecated hashing algorithm (SHA1)") continue } validLeafCerts = append(validLeafCerts, cert) } if len(validLeafCerts) < 1 { logrus.Debugf("didn't find any valid leaf certificates for %s", gun) return nil, errors.New("no valid leaf certificates found in any of the root keys") } logrus.Debugf("found %d valid leaf certificates for %s", len(validLeafCerts), gun) return validLeafCerts, nil } // parseAllCerts returns two maps, one with all of the leafCertificates and one // with all the intermediate certificates found in signedRoot func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) { leafCerts := make(map[string]*x509.Certificate) intCerts := make(map[string][]*x509.Certificate) // Before we loop through all root keys available, make sure any exist rootRoles, ok := signedRoot.Signed.Roles["root"] if !ok { logrus.Debugf("tried to parse certificates from invalid root signed data") return nil, nil } logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs) // Iterate over every keyID for the root role inside of roots.json for _, keyID := range rootRoles.KeyIDs { // check that the key exists in the signed root keys map key, ok := signedRoot.Signed.Keys[keyID] if !ok { logrus.Debugf("error while getting data for keyID: %s", keyID) continue } // Decode all the x509 certificates that were bundled with this // Specific root key decodedCerts, err := trustmanager.LoadCertBundleFromPEM(key.Public()) if err != nil { logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err) continue } // Get all non-CA certificates in the decoded certificates leafCertList := trustmanager.GetLeafCerts(decodedCerts) // If we got no leaf certificates or we got more than one, fail if len(leafCertList) != 1 { logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID) continue } // Get the ID of the leaf certificate leafCert := leafCertList[0] leafID, err := trustmanager.FingerprintCert(leafCert) if err != nil { logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", keyID, err) continue } // Store the leaf cert in the map leafCerts[leafID] = leafCert // Get all the remainder certificates marked as a CA to be used as intermediates intermediateCerts := trustmanager.GetIntermediateCerts(decodedCerts) intCerts[leafID] = intermediateCerts } return leafCerts, intCerts } // certsToRemove returns all the certifificates from oldCerts that aren't present // in newCerts func certsToRemove(oldCerts, newCerts []*x509.Certificate) map[string]*x509.Certificate { certsToRemove := make(map[string]*x509.Certificate) // If no newCerts were provided if len(newCerts) == 0 { return certsToRemove } // Populate a map with all the IDs from newCert var newCertMap = make(map[string]struct{}) for _, cert := range newCerts { certID, err := trustmanager.FingerprintCert(cert) if err != nil { logrus.Debugf("error while fingerprinting root certificate with keyID: %s, %v", certID, err) continue } newCertMap[certID] = struct{}{} } // Iterate over all the old certificates and check to see if we should remove them for _, cert := range oldCerts { certID, err := trustmanager.FingerprintCert(cert) if err != nil { logrus.Debugf("error while fingerprinting root certificate with certID: %s, %v", certID, err) continue } if _, ok := newCertMap[certID]; !ok { certsToRemove[certID] = cert } } return certsToRemove } notary-0.1/keystoremanager/keystoremanager_test.go000066400000000000000000000670671262207326400227000ustar00rootroot00000000000000package keystoremanager import ( "bytes" "crypto/x509" "encoding/json" "io/ioutil" "os" "testing" "text/template" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/stretchr/testify/assert" ) type SignedRSARootTemplate struct { RootPem string } var passphraseRetriever = func(string, string, bool, int) (string, bool, error) { return "passphrase", false, nil } const validPEMEncodedRSARoot = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZLekNDQXhXZ0F3SUJBZ0lRUnlwOVFxY0pmZDNheXFkaml6OHhJREFMQmdrcWhraUc5dzBCQVFzd09ERWEKTUJnR0ExVUVDaE1SWkc5amEyVnlMbU52YlM5dWIzUmhjbmt4R2pBWUJnTlZCQU1URVdSdlkydGxjaTVqYjIwdgpibTkwWVhKNU1CNFhEVEUxTURjeE56QTJNelF5TTFvWERURTNNRGN4TmpBMk16UXlNMW93T0RFYU1CZ0dBMVVFCkNoTVJaRzlqYTJWeUxtTnZiUzl1YjNSaGNua3hHakFZQmdOVkJBTVRFV1J2WTJ0bGNpNWpiMjB2Ym05MFlYSjUKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUFvUWZmcnpzWW5zSDh2R2Y0Smg1NQpDajV3cmpVR3pEL3NIa2FGSHB0ako2VG9KR0p2NXlNQVB4enlJbnU1c0lvR0xKYXBuWVZCb0FVMFlnSTlxbEFjCllBNlN4YVN3Z202cnB2bW5sOFFuMHFjNmdlcjNpbnBHYVVKeWxXSHVQd1drdmNpbVFBcUhaeDJkUXRMN2c2a3AKcm1LZVRXcFdvV0x3M0pvQVVaVVZoWk1kNmEyMlpML0R2QXcrSHJvZ2J6NFhleWFoRmI5SUg0MDJ6UHhONnZnYQpKRUZURjBKaTFqdE5nME1vNHBiOVNIc01zaXcrTFpLN1NmZkhWS1B4dmQyMW0vYmlObXdzZ0V4QTNVOE9PRzhwCnV5Z2ZhY3lzNWM4K1pyWCtaRkcvY3Z3S3owazYvUWZKVTQwczZNaFh3NUMyV3R0ZFZtc0c5LzdyR0ZZakhvSUoKd2VEeXhnV2s3dnhLelJKSS91bjdjYWdESWFRc0tySlFjQ0hJR0ZSbHBJUjVUd1g3dmwzUjdjUm5jckRSTVZ2YwpWU0VHMmVzeGJ3N2p0eklwL3lwblZSeGNPbnk3SXlweWpLcVZlcVo2SGd4WnRUQlZyRjFPL2FIbzJrdmx3eVJTCkF1czRrdmg2ejMranpUbTlFemZYaVBRelk5QkVrNWdPTHhoVzlyYzZVaGxTK3BlNWxrYU4vSHlxeS9sUHVxODkKZk1yMnJyN2xmNVdGZEZuemU2V05ZTUFhVzdkTkE0TkUwZHlENTM0MjhaTFh4TlZQTDRXVTY2R2FjNmx5blE4bApyNXRQc1lJRlh6aDZGVmFSS0dRVXRXMWh6OWVjTzZZMjdSaDJKc3lpSXhnVXFrMm9veEU2OXVONDJ0K2R0cUtDCjFzOEcvN1Z0WThHREFMRkxZVG56THZzQ0F3RUFBYU0xTURNd0RnWURWUjBQQVFIL0JBUURBZ0NnTUJNR0ExVWQKSlFRTU1Bb0dDQ3NHQVFVRkJ3TURNQXdHQTFVZEV3RUIvd1FDTUFBd0N3WUpLb1pJaHZjTkFRRUxBNElDQVFCTQpPbGwzRy9YQno4aWRpTmROSkRXVWgrNXczb2ptd2FuclRCZENkcUVrMVdlbmFSNkR0Y2ZsSng2WjNmL213VjRvCmIxc2tPQVgxeVg1UkNhaEpIVU14TWljei9RMzhwT1ZlbEdQclduYzNUSkIrVktqR3lIWGxRRFZrWkZiKzQrZWYKd3RqN0huZ1hoSEZGRFNnam0zRWRNbmR2Z0RRN1NRYjRza09uQ05TOWl5WDdlWHhoRkJDWm1aTCtIQUxLQmoyQgp5aFY0SWNCRHFtcDUwNHQxNHJ4OS9KdnR5MGRHN2ZZN0k1MWdFUXBtNFMwMkpNTDV4dlRtMXhmYm9XSWhaT0RJCnN3RUFPK2VrQm9GSGJTMVE5S01QaklBdzNUckNISDh4OFhacTV6c1l0QUMxeVpIZENLYTI2YVdkeTU2QTllSGoKTzFWeHp3bWJOeVhSZW5WdUJZUCswd3IzSFZLRkc0Sko0WlpwTlp6UVcvcHFFUGdoQ1RKSXZJdWVLNjUyQnlVYwovL3N2K25YZDVmMTlMZUVTOXBmMGwyNTNORGFGWlBiNmFlZ0tmcXVXaDhxbFFCbVVRMkd6YVRMYnRtTmQyOE02Clc3aUw3dGtLWmUxWm5CejlSS2d0UHJEampXR1pJbmpqY09VOEV0VDRTTHE3a0NWRG1QczVNRDh2YUFtOTZKc0UKam1MQzNVdS80azdIaURZWDBpMG1PV2tGalpRTWRWYXRjSUY1RlBTcHB3c1NiVzhRaWRuWHQ1NFV0d3RGREVQegpscGpzN3liZVFFNzFKWGNNWm5WSUs0YmpSWHNFRlBJOThScElsRWRlZGJTVWRZQW5jTE5KUlQ3SFpCTVBHU3daCjBQTkp1Z2xubHIzc3JWemRXMWR6MnhRamR2THd4eTZtTlVGNnJiUUJXQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K` const validCAPEMEncodeRSARoot = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlHTXpDQ0JCdWdBd0lCQWdJQkFUQU5CZ2txaGtpRzl3MEJBUXNGQURCZk1Rc3dDUVlEVlFRR0V3SlZVekVMDQpNQWtHQTFVRUNBd0NRMEV4RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJselkyOHhEekFOQmdOVkJBb01Ca1J2DQpZMnRsY2pFYU1CZ0dBMVVFQXd3UlRtOTBZWEo1SUZSbGMzUnBibWNnUTBFd0hoY05NVFV3TnpFMk1EUXlOVEF6DQpXaGNOTWpVd056RXpNRFF5TlRBeldqQmZNUm93R0FZRFZRUUREQkZPYjNSaGNua2dWR1Z6ZEdsdVp5QkRRVEVMDQpNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJselkyOHhEekFOQmdOVkJBb01Ca1J2DQpZMnRsY2pFTE1Ba0dBMVVFQ0F3Q1EwRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDDQpBUUN3VlZENHBLN3o3cFhQcEpiYVoxSGc1ZVJYSWNhWXRiRlBDbk4waXF5OUhzVkVHbkVuNUJQTlNFc3VQK20wDQo1TjBxVlY3REdiMVNqaWxvTFhEMXFERHZoWFdrK2dpUzlwcHFQSFBMVlBCNGJ2enNxd0RZcnRwYnFrWXZPMFlLDQowU0wza3hQWFVGZGxrRmZndTB4amxjem0yUGhXRzNKZDhhQXRzcEwvTCtWZlBBMTNKVWFXeFNMcHVpMUluOHJoDQpnQXlRVEs2UTRPZjZHYkpZVG5BSGI1OVVvTFhTekI1QWZxaVVxNkw3bkVZWUtvUGZsUGJSQUlXTC9VQm0wYytIDQpvY21zNzA2UFlwbVBTMlJRdjNpT0dtbm45aEVWcDNQNmpxN1dBZXZiQTRhWUd4NUVzYlZ0WUFCcUpCYkZXQXV3DQp3VEdSWW16bjBNajBlVE1nZTl6dFlCMi8yc3hkVGU2dWhtRmdwVVhuZ0RxSkk1TzlOM3pQZnZsRUltQ2t5M0hNDQpqSm9MN2c1c21xWDlvMVArRVNMaDBWWnpoaDdJRFB6UVRYcGNQSVMvNnowbDIyUUdrSy8xTjFQYUFEYVVIZExMDQp2U2F2M3kyQmFFbVB2ZjJma1pqOHlQNWVZZ2k3Q3c1T05oSExEWUhGY2w5Wm0veXdtZHhISkVUejluZmdYbnNXDQpITnhEcXJrQ1ZPNDZyL3U2clNyVXQ2aHIzb2RkSkc4czhKbzA2ZWFydzZYVTNNek0rM2dpd2tLMFNTTTN1UlBxDQo0QXNjUjFUditFMzFBdU9BbWpxWVFvVDI5Yk1JeG9TemVsamovWW5lZHdqVzQ1cFd5YzNKb0hhaWJEd3ZXOVVvDQpHU1pCVnk0aHJNL0ZhN1hDV3YxV2ZITlcxZ0R3YUxZd0RubDVqRm1SQnZjZnVRSURBUUFCbzRINU1JSDJNSUdSDQpCZ05WSFNNRWdZa3dnWWFBRkhVTTFVM0U0V3lMMW52RmQrZFBZOGY0TzJoWm9XT2tZVEJmTVFzd0NRWURWUVFHDQpFd0pWVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhGakFVQmdOVkJBY01EVk5oYmlCR2NtRnVZMmx6WTI4eER6QU5CZ05WDQpCQW9NQmtSdlkydGxjakVhTUJnR0ExVUVBd3dSVG05MFlYSjVJRlJsYzNScGJtY2dRMEdDQ1FEQ2VETGJlbUlUDQpTekFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VBTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGDQpCUWNEQVRBT0JnTlZIUThCQWY4RUJBTUNBVVl3SFFZRFZSME9CQllFRkhlNDhoY0JjQXAwYlVWbFR4WGVSQTRvDQpFMTZwTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElDQVFBV1V0QVBkVUZwd1JxK04xU3pHVWVqU2lrZU1HeVBac2NaDQpKQlVDbWhab0Z1ZmdYR2JMTzVPcGNSTGFWM1hkYTB0LzVQdGRHTVNFemN6ZW9aSFdrbkR0dys3OU9CaXR0UFBqDQpTaDFvRkR1UG8zNVI3ZVA2MjRsVUNjaC9JblpDcGhUYUx4OW9ETEdjYUszYWlsUTl3akJkS2RsQmw4S05LSVpwDQphMTNhUDVyblNtMkp2YSt0WHkveWkzQlNkczNkR0Q4SVRLWnlJLzZBRkh4R3ZPYnJESUJwbzRGRi96Y1dYVkRqDQpwYU9teHBsUnRNNEhpdG0rc1hHdmZxSmU0eDVEdU9YT25QclQzZEh2UlQ2dlNaVW9Lb2J4TXFtUlRPY3JPSVBhDQpFZU1wT29ic2hPUnVSbnRNRFl2dmdPM0Q2cDZpY2lEVzJWcDlONnJkTWRmT1dFUU44SlZXdkI3SXhSSGs5cUtKDQp2WU9XVmJjekF0MHFwTXZYRjNQWExqWmJVTTBrbk9kVUtJRWJxUDRZVWJnZHp4NlJ0Z2lpWTkzMEFqNnRBdGNlDQowZnBnTmx2ak1ScFNCdVdUbEFmTk5qRy9ZaG5kTXo5dUk2OFRNZkZwUjNQY2dWSXYzMGtydy85VnpvTGkyRHBlDQpvdzZEckdPNm9pK0RoTjc4UDRqWS9POVVjelpLMnJvWkwxT2k1UDBSSXhmMjNVWkM3eDFEbGNOM25CcjRzWVN2DQpyQng0Y0ZUTU5wd1UrbnpzSWk0ZGpjRkRLbUpkRU95ak1ua1AydjBMd2U3eXZLMDhwWmRFdSswemJycTE3a3VlDQpYcFhMYzdLNjhRQjE1eXh6R3lsVTVyUnd6bUMvWXNBVnlFNGVvR3U4UHhXeHJFUnZIYnk0QjhZUDB2QWZPcmFMDQpsS21YbEs0ZFRnPT0NCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0NCg==` const validIntermediateAndCertRSA = `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpNSUlHTXpDQ0JCdWdBd0lCQWdJQkFUQU5CZ2txaGtpRzl3MEJBUXNGQURCZk1Rc3dDUVlEVlFRR0V3SlZVekVMDQpNQWtHQTFVRUNBd0NRMEV4RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJselkyOHhEekFOQmdOVkJBb01Ca1J2DQpZMnRsY2pFYU1CZ0dBMVVFQXd3UlRtOTBZWEo1SUZSbGMzUnBibWNnUTBFd0hoY05NVFV3TnpFMk1EUXlOVEF6DQpXaGNOTWpVd056RXpNRFF5TlRBeldqQmZNUm93R0FZRFZRUUREQkZPYjNSaGNua2dWR1Z6ZEdsdVp5QkRRVEVMDQpNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJselkyOHhEekFOQmdOVkJBb01Ca1J2DQpZMnRsY2pFTE1Ba0dBMVVFQ0F3Q1EwRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDDQpBUUN3VlZENHBLN3o3cFhQcEpiYVoxSGc1ZVJYSWNhWXRiRlBDbk4waXF5OUhzVkVHbkVuNUJQTlNFc3VQK20wDQo1TjBxVlY3REdiMVNqaWxvTFhEMXFERHZoWFdrK2dpUzlwcHFQSFBMVlBCNGJ2enNxd0RZcnRwYnFrWXZPMFlLDQowU0wza3hQWFVGZGxrRmZndTB4amxjem0yUGhXRzNKZDhhQXRzcEwvTCtWZlBBMTNKVWFXeFNMcHVpMUluOHJoDQpnQXlRVEs2UTRPZjZHYkpZVG5BSGI1OVVvTFhTekI1QWZxaVVxNkw3bkVZWUtvUGZsUGJSQUlXTC9VQm0wYytIDQpvY21zNzA2UFlwbVBTMlJRdjNpT0dtbm45aEVWcDNQNmpxN1dBZXZiQTRhWUd4NUVzYlZ0WUFCcUpCYkZXQXV3DQp3VEdSWW16bjBNajBlVE1nZTl6dFlCMi8yc3hkVGU2dWhtRmdwVVhuZ0RxSkk1TzlOM3pQZnZsRUltQ2t5M0hNDQpqSm9MN2c1c21xWDlvMVArRVNMaDBWWnpoaDdJRFB6UVRYcGNQSVMvNnowbDIyUUdrSy8xTjFQYUFEYVVIZExMDQp2U2F2M3kyQmFFbVB2ZjJma1pqOHlQNWVZZ2k3Q3c1T05oSExEWUhGY2w5Wm0veXdtZHhISkVUejluZmdYbnNXDQpITnhEcXJrQ1ZPNDZyL3U2clNyVXQ2aHIzb2RkSkc4czhKbzA2ZWFydzZYVTNNek0rM2dpd2tLMFNTTTN1UlBxDQo0QXNjUjFUditFMzFBdU9BbWpxWVFvVDI5Yk1JeG9TemVsamovWW5lZHdqVzQ1cFd5YzNKb0hhaWJEd3ZXOVVvDQpHU1pCVnk0aHJNL0ZhN1hDV3YxV2ZITlcxZ0R3YUxZd0RubDVqRm1SQnZjZnVRSURBUUFCbzRINU1JSDJNSUdSDQpCZ05WSFNNRWdZa3dnWWFBRkhVTTFVM0U0V3lMMW52RmQrZFBZOGY0TzJoWm9XT2tZVEJmTVFzd0NRWURWUVFHDQpFd0pWVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhGakFVQmdOVkJBY01EVk5oYmlCR2NtRnVZMmx6WTI4eER6QU5CZ05WDQpCQW9NQmtSdlkydGxjakVhTUJnR0ExVUVBd3dSVG05MFlYSjVJRlJsYzNScGJtY2dRMEdDQ1FEQ2VETGJlbUlUDQpTekFTQmdOVkhSTUJBZjhFQ0RBR0FRSC9BZ0VBTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQ0JnZ3JCZ0VGDQpCUWNEQVRBT0JnTlZIUThCQWY4RUJBTUNBVVl3SFFZRFZSME9CQllFRkhlNDhoY0JjQXAwYlVWbFR4WGVSQTRvDQpFMTZwTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElDQVFBV1V0QVBkVUZwd1JxK04xU3pHVWVqU2lrZU1HeVBac2NaDQpKQlVDbWhab0Z1ZmdYR2JMTzVPcGNSTGFWM1hkYTB0LzVQdGRHTVNFemN6ZW9aSFdrbkR0dys3OU9CaXR0UFBqDQpTaDFvRkR1UG8zNVI3ZVA2MjRsVUNjaC9JblpDcGhUYUx4OW9ETEdjYUszYWlsUTl3akJkS2RsQmw4S05LSVpwDQphMTNhUDVyblNtMkp2YSt0WHkveWkzQlNkczNkR0Q4SVRLWnlJLzZBRkh4R3ZPYnJESUJwbzRGRi96Y1dYVkRqDQpwYU9teHBsUnRNNEhpdG0rc1hHdmZxSmU0eDVEdU9YT25QclQzZEh2UlQ2dlNaVW9Lb2J4TXFtUlRPY3JPSVBhDQpFZU1wT29ic2hPUnVSbnRNRFl2dmdPM0Q2cDZpY2lEVzJWcDlONnJkTWRmT1dFUU44SlZXdkI3SXhSSGs5cUtKDQp2WU9XVmJjekF0MHFwTXZYRjNQWExqWmJVTTBrbk9kVUtJRWJxUDRZVWJnZHp4NlJ0Z2lpWTkzMEFqNnRBdGNlDQowZnBnTmx2ak1ScFNCdVdUbEFmTk5qRy9ZaG5kTXo5dUk2OFRNZkZwUjNQY2dWSXYzMGtydy85VnpvTGkyRHBlDQpvdzZEckdPNm9pK0RoTjc4UDRqWS9POVVjelpLMnJvWkwxT2k1UDBSSXhmMjNVWkM3eDFEbGNOM25CcjRzWVN2DQpyQng0Y0ZUTU5wd1UrbnpzSWk0ZGpjRkRLbUpkRU95ak1ua1AydjBMd2U3eXZLMDhwWmRFdSswemJycTE3a3VlDQpYcFhMYzdLNjhRQjE1eXh6R3lsVTVyUnd6bUMvWXNBVnlFNGVvR3U4UHhXeHJFUnZIYnk0QjhZUDB2QWZPcmFMDQpsS21YbEs0ZFRnPT0NCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0NCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQ0KTUlJRlZ6Q0NBeitnQXdJQkFnSUJBekFOQmdrcWhraUc5dzBCQVFzRkFEQmZNUm93R0FZRFZRUUREQkZPYjNSaA0KY25rZ1ZHVnpkR2x1WnlCRFFURUxNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJseg0KWTI4eER6QU5CZ05WQkFvTUJrUnZZMnRsY2pFTE1Ba0dBMVVFQ0F3Q1EwRXdIaGNOTVRVd056RTJNRFF5TlRVdw0KV2hjTk1UWXdOekUxTURReU5UVXdXakJnTVJzd0dRWURWUVFEREJKelpXTjFjbVV1WlhoaGJYQnNaUzVqYjIweA0KQ3pBSkJnTlZCQVlUQWxWVE1SWXdGQVlEVlFRSERBMVRZVzRnUm5KaGJtTnBjMk52TVE4d0RRWURWUVFLREFaRQ0KYjJOclpYSXhDekFKQmdOVkJBZ01Ba05CTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQw0KQVFFQW1MWWlZQ1RBV0pCV0F1eFpMcVZtVjRGaVVkR2dFcW9RdkNiTjczekYvbVFmaHEwQ0lUbzZ4U3hzMVFpRw0KRE96VXRrcHpYenppU2o0SjUrZXQ0SmtGbGVlRUthTWNIYWRlSXNTbEhHdlZ0WER2OTNvUjN5ZG1mWk8rVUxSVQ0KOHhIbG9xY0xyMUtyT1AxZGFMZmRNUmJhY3RkNzVVUWd2dzlYVHNkZU1WWDVBbGljU0VOVktWK0FRWHZWcHY4UA0KVDEwTVN2bEJGYW00cmVYdVkvU2tlTWJJYVc1cEZ1NkFRdjNabWZ0dDJ0YTBDQjlrYjFtWWQrT0tydThIbm5xNQ0KYUp3NlIzR2hQMFRCZDI1UDFQa2lTeE0yS0dZWlprMFcvTlpxTEs5L0xURktUTkN2N1ZqQ2J5c1ZvN0h4Q1kwYg0KUWUvYkRQODJ2N1NuTHRiM2Fab2dmdmE0SFFJREFRQUJvNElCR3pDQ0FSY3dnWWdHQTFVZEl3U0JnREIrZ0JSMw0KdVBJWEFYQUtkRzFGWlU4VjNrUU9LQk5lcWFGanBHRXdYekVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTQ0KQWtOQk1SWXdGQVlEVlFRSERBMVRZVzRnUm5KaGJtTnBjMk52TVE4d0RRWURWUVFLREFaRWIyTnJaWEl4R2pBWQ0KQmdOVkJBTU1FVTV2ZEdGeWVTQlVaWE4wYVc1bklFTkJnZ0VCTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwbA0KQkJZd0ZBWUlLd1lCQlFVSEF3SUdDQ3NHQVFVRkJ3TUJNQTRHQTFVZER3RUIvd1FFQXdJRm9EQXVCZ05WSFJFRQ0KSnpBbGdoSnpaV04xY21VdVpYaGhiWEJzWlM1amIyMkNDV3h2WTJGc2FHOXpkSWNFZndBQUFUQWRCZ05WSFE0RQ0KRmdRVURQRDRDYVhSYnU1UUJiNWU4eThvZHZUcVc0SXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBSk95bG1jNA0KbjdKNjRHS3NQL3hoVWRLS1Y5L0tEK3VmenBLYnJMSW9qV243clR5ZTcwdlkwT2pRRnVPWGM1NHlqTVNJTCsvNQ0KbWxOUTdZL2ZKUzh4ZEg3OUVSKzRuV011RDJlY2lMbnNMZ2JZVWs0aGl5Ynk4LzVWKy9ZcVBlQ3BQQ242VEpSSw0KYTBFNmxWL1VqWEpkcmlnSnZKb05PUjhaZ3RFWi9RUGdqSkVWVXNnNDdkdHF6c0RwZ2VTOGRjanVNV3BaeFAwMg0KcWF2RkxEalNGelZIKzJENk90eTFEUXBsbS8vM1hhUlhoMjNkT0NQOHdqL2J4dm5WVG9GV3Mrek80dVQxTEYvUw0KS1hDTlFvZWlHeFdIeXpyWEZWVnRWbkM5RlNOejBHZzIvRW0xdGZSZ3ZoVW40S0xKY3ZaVzlvMVI3VlZDWDBMMQ0KMHgwZnlLM1ZXZVdjODZhNWE2ODFhbUtaU0ViakFtSVZaRjl6T1gwUE9EQzhveSt6cU9QV2EwV0NsNEs2ekRDNg0KMklJRkJCTnk1MFpTMmlPTjZSWTZtRTdObUE3OGdja2Y0MTVjcUlWcmxvWUpiYlREZXBmaFRWMjE4U0xlcHBoNA0KdUdiMi9zeGtsZkhPWUUrcnBIY2lpYld3WHJ3bE9ESmFYdXpYRmhwbFVkL292ZHVqQk5BSUhrQmZ6eStZNnoycw0KYndaY2ZxRDROSWIvQUdoSXlXMnZidnU0enNsRHAxTUVzTG9hTytTemlyTXpreU1CbEtSdDEyMHR3czRFa1VsbQ0KL1FoalNVb1pwQ0FzeTVDL3BWNCtieDBTeXNOZC9TK2tLYVJaYy9VNlkzWllCRmhzekxoN0phTFhLbWs3d0huRQ0KcmdnbTZvejRML0d5UFdjL0ZqZm5zZWZXS00yeUMzUURoanZqDQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tDQo=` const signedRSARootTemplate = `{"signed":{"_type":"Root","consistent_snapshot":false,"expires":"2016-07-16T23:34:13.389129622-07:00","keys":{"1fc4fdc38f66558658c5c59b67f1716bdc6a74ef138b023ae5931db69f51d670":{"keytype":"ecdsa","keyval":{"private":null,"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nIgzLigo5D47dWQe1IUjzHXxvyx0j/OL16VQymuloWsgVDxxT6+mH3CeviMAs+/McnEPE9exnm6SQGR5x3XMw=="}},"23c29cc372109c819e081bc953b7657d05e3f968f03c21d0d75ea457590f3d14":{"keytype":"ecdsa","keyval":{"private":null,"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEClUFVWkc85OQScfTQRS02VaLIEaeCmxdwYS/hcTLVoTxlFfRfs7HyalTwXGAGO79XZZS+koE6s8D0xGcCJQkLQ=="}},"49cf5c6404a35fa41d5a5aa2ce539dfee0d7a2176d0da488914a38603b1f4292":{"keytype":"rsa-x509","keyval":{"private":null,"public":"{{.RootPem}}"}},"e3a5a4fdaf11ea1ec58f5efed6f3639b39cd4cfa1418c8b55c9a8c2447ace5d9":{"keytype":"ecdsa","keyval":{"private":null,"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw=="}}},"roles":{"root":{"keyids":["49cf5c6404a35fa41d5a5aa2ce539dfee0d7a2176d0da488914a38603b1f4292"],"threshold":1},"snapshot":{"keyids":["23c29cc372109c819e081bc953b7657d05e3f968f03c21d0d75ea457590f3d14"],"threshold":1},"targets":{"keyids":["1fc4fdc38f66558658c5c59b67f1716bdc6a74ef138b023ae5931db69f51d670"],"threshold":1},"timestamp":{"keyids":["e3a5a4fdaf11ea1ec58f5efed6f3639b39cd4cfa1418c8b55c9a8c2447ace5d9"],"threshold":1}},"version":2},"signatures":[{"keyid":"49cf5c6404a35fa41d5a5aa2ce539dfee0d7a2176d0da488914a38603b1f4292","method":"rsapss","sig":"YlZwtCj028Xc23+KHfj6govFEY6hMbBXO5HT20F0I5ZeIPb1l7OmkjEiwp9ZHusClY+QeqiP1CFh\n/AfCbv4tLanqMkXPtm8UJJ1hMZVq86coieB32PQDj9k6x1hErHzvPUbOzTRW2BQkFFMZFkLDAd06\npH8lmxyPLOhdkVE8qIT7sBCy/4bQIGfvEX6yCDz84MZdcLNX5B9mzGi9A7gDloh9IEZxA8UgoI18\nSYpv/fYeSZSqM/ws2G+kiELGgTWhcZ+gOlF7ArM/DOlcC/NYqcvY1ugE6Gn7G8opre6NOofdRp3w\n603A2rMMvYTwqKLY6oX/d+07A2+WGHXPUy5otCAybWOw2hIZ35Jjmh12g6Dc6Qk4K2zXwAgvWwBU\nWlT8MlP1Tf7f80jnGjh0aARlHI4LCxlYU5L/pCaYuHgynujvLuzoOuiiPfJv7sYvKoQ8UieE1w//\nHc8E6tWtV5G2FguKLurMoKZ9FBWcanDO0fg5AWuG3qcgUJdvh9acQ33EKer1fqBxs6LSAUWo8rDt\nQkg+b55AW0YBukAW9IAfMySQGAS2e3mHZ8nK/ijaygCRu7/P+NgKY9/zpmfL2xgcNslLcANcSOOt\nhiJS6yqYM9i9G0af0yw/TxAT4ntwjVm8u52UyR/hXIiUc/mjZcYRbSmJOHws902+i+Z/qv72knk="}]}` func TestCertsToRemove(t *testing.T) { // Get a few certificates to test with cert1, err := trustmanager.LoadCertFromFile("../fixtures/secure.example.com.crt") assert.NoError(t, err) cert1KeyID, err := trustmanager.FingerprintCert(cert1) assert.NoError(t, err) // Get intermediate certificate cert2, err := trustmanager.LoadCertFromFile("../fixtures/self-signed_secure.example.com.crt") assert.NoError(t, err) cert2KeyID, err := trustmanager.FingerprintCert(cert2) assert.NoError(t, err) // Get leaf certificate cert3, err := trustmanager.LoadCertFromFile("../fixtures/self-signed_docker.com-notary.crt") assert.NoError(t, err) cert3KeyID, err := trustmanager.FingerprintCert(cert3) assert.NoError(t, err) // Call CertsToRemove with only one old and one new oldCerts := []*x509.Certificate{cert1} newCerts := []*x509.Certificate{cert2} certs := certsToRemove(oldCerts, newCerts) assert.Len(t, certs, 1) _, ok := certs[cert1KeyID] assert.True(t, ok) // Call CertsToRemove with two old and one new oldCerts = []*x509.Certificate{cert1, cert2} newCerts = []*x509.Certificate{cert3} certs = certsToRemove(oldCerts, newCerts) assert.Len(t, certs, 2) _, ok = certs[cert1KeyID] assert.True(t, ok) _, ok = certs[cert2KeyID] assert.True(t, ok) _, ok = certs[cert3KeyID] assert.False(t, ok) // Call CertsToRemove with two new and one old oldCerts = []*x509.Certificate{cert3} newCerts = []*x509.Certificate{cert2, cert1} certs = certsToRemove(oldCerts, newCerts) assert.Len(t, certs, 1) _, ok = certs[cert3KeyID] assert.True(t, ok) _, ok = certs[cert1KeyID] assert.False(t, ok) _, ok = certs[cert2KeyID] assert.False(t, ok) // Call CertsToRemove with three old certs and no new oldCerts = []*x509.Certificate{cert1, cert2, cert3} newCerts = []*x509.Certificate{} certs = certsToRemove(oldCerts, newCerts) assert.Len(t, certs, 0) _, ok = certs[cert1KeyID] assert.False(t, ok) _, ok = certs[cert2KeyID] assert.False(t, ok) _, ok = certs[cert3KeyID] assert.False(t, ok) // Call CertsToRemove with three new certs and no old oldCerts = []*x509.Certificate{} newCerts = []*x509.Certificate{cert1, cert2, cert3} certs = certsToRemove(oldCerts, newCerts) assert.Len(t, certs, 0) _, ok = certs[cert1KeyID] assert.False(t, ok) _, ok = certs[cert2KeyID] assert.False(t, ok) _, ok = certs[cert3KeyID] assert.False(t, ok) } func TestValidateRoot(t *testing.T) { var testSignedRoot data.Signed var signedRootBytes bytes.Buffer // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) assert.NoError(t, err, "failed to create a temporary directory: %s", err) // Create a FileStoreManager keyStoreManager, err := NewKeyStoreManager(tempBaseDir) assert.NoError(t, err) // Execute our template templ, _ := template.New("SignedRSARootTemplate").Parse(signedRSARootTemplate) templ.Execute(&signedRootBytes, SignedRSARootTemplate{RootPem: validPEMEncodedRSARoot}) // Unmarshal our signedroot json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot) // This call to ValidateRoot will succeed since we are using a valid PEM // encoded certificate, and have no other certificates for this CN err = keyStoreManager.ValidateRoot(&testSignedRoot, "docker.com/notary") assert.NoError(t, err) // This call to ValidateRoot will fail since we are passing in a dnsName that // doesn't match the CN of the certificate. err = keyStoreManager.ValidateRoot(&testSignedRoot, "diogomonica.com/notary") if assert.Error(t, err, "An error was expected") { assert.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}) } // // This call to ValidateRoot will fail since we are passing an unparsable RootSigned // // Execute our template deleting the old buffer first signedRootBytes.Reset() templ, _ = template.New("SignedRSARootTemplate").Parse(signedRSARootTemplate) templ.Execute(&signedRootBytes, SignedRSARootTemplate{RootPem: "------ ABSOLUTELY NOT A PEM -------"}) // Unmarshal our signedroot json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot) err = keyStoreManager.ValidateRoot(&testSignedRoot, "docker.com/notary") assert.Error(t, err, "illegal base64 data at input byte") // // This call to ValidateRoot will fail since we are passing an invalid PEM cert // // Execute our template deleting the old buffer first signedRootBytes.Reset() templ, _ = template.New("SignedRSARootTemplate").Parse(signedRSARootTemplate) templ.Execute(&signedRootBytes, SignedRSARootTemplate{RootPem: "LS0tLS1CRUdJTiBDRVJU"}) // Unmarshal our signedroot json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot) err = keyStoreManager.ValidateRoot(&testSignedRoot, "docker.com/notary") if assert.Error(t, err, "An error was expected") { assert.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}) } // // This call to ValidateRoot will fail since we are passing only CA certificate // This will fail due to the lack of a leaf certificate // // Execute our template deleting the old buffer first signedRootBytes.Reset() templ, _ = template.New("SignedRSARootTemplate").Parse(signedRSARootTemplate) templ.Execute(&signedRootBytes, SignedRSARootTemplate{RootPem: validCAPEMEncodeRSARoot}) // Unmarshal our signedroot json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot) err = keyStoreManager.ValidateRoot(&testSignedRoot, "docker.com/notary") if assert.Error(t, err, "An error was expected") { assert.Equal(t, err, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}) } // // This call to ValidateRoot will suceed in getting to the TUF validation, since // we are using a valid PEM encoded certificate chain of intermediate + leaf cert // that are signed by a trusted root authority and the leaf cert has a correct CN. // It will, however, fail to validate, because it has an invalid TUF signature // // Execute our template deleting the old buffer first signedRootBytes.Reset() templ, _ = template.New("SignedRSARootTemplate").Parse(signedRSARootTemplate) templ.Execute(&signedRootBytes, SignedRSARootTemplate{RootPem: validIntermediateAndCertRSA}) // Unmarshal our signedroot json.Unmarshal(signedRootBytes.Bytes(), &testSignedRoot) err = keyStoreManager.ValidateRoot(&testSignedRoot, "secure.example.com") if assert.Error(t, err, "An error was expected") { assert.Equal(t, err, &ErrValidationFail{Reason: "failed to validate integrity of roots"}) } } // TestValidateSuccessfulRootRotation runs through a full root certificate rotation // We test this with both an RSA and ECDSA root certificate func TestValidateSuccessfulRootRotation(t *testing.T) { testValidateSuccessfulRootRotation(t, data.ECDSAKey, data.ECDSAx509Key) if !testing.Short() { testValidateSuccessfulRootRotation(t, data.RSAKey, data.RSAx509Key) } } // Generates a KeyStoreManager in a temporary directory and returns the // manager and certificates for two keys which have been added to the keystore. // Also returns the temporary directory so it can be cleaned up. func filestoreWithTwoCerts(t *testing.T, gun, keyAlg string) ( string, *KeyStoreManager, *cryptoservice.CryptoService, []*x509.Certificate) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory: %s", err) fileKeyStore, err := trustmanager.NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err) cryptoService := cryptoservice.NewCryptoService(gun, fileKeyStore) // Create a FileStoreManager keyStoreManager, err := NewKeyStoreManager(tempBaseDir) assert.NoError(t, err) certs := make([]*x509.Certificate, 2) for i := 0; i < 2; i++ { pubKey, err := cryptoService.Create("root", keyAlg) assert.NoError(t, err) key, _, err := fileKeyStore.GetKey(pubKey.ID()) assert.NoError(t, err) cert, err := cryptoservice.GenerateCertificate(key, gun) assert.NoError(t, err) certs[i] = cert } return tempBaseDir, keyStoreManager, cryptoService, certs } func testValidateSuccessfulRootRotation(t *testing.T, keyAlg, rootKeyType string) { // The gun to test gun := "docker.com/notary" tempBaseDir, keyStoreManager, cs, certs := filestoreWithTwoCerts(t, gun, keyAlg) defer os.RemoveAll(tempBaseDir) origRootCert := certs[0] replRootCert := certs[1] // Add the old root cert part of trustedCertificates keyStoreManager.AddTrustedCert(origRootCert) // We need the PEM representation of the replacement key to put it into the TUF data origRootPEMCert := trustmanager.CertToPEM(origRootCert) replRootPEMCert := trustmanager.CertToPEM(replRootCert) // Tuf key with PEM-encoded x509 certificate origRootKey := data.NewPublicKey(rootKeyType, origRootPEMCert) replRootKey := data.NewPublicKey(rootKeyType, replRootPEMCert) rootRole, err := data.NewRole("root", 1, []string{replRootKey.ID()}, nil, nil) assert.NoError(t, err) testRoot, err := data.NewRoot( map[string]data.PublicKey{replRootKey.ID(): replRootKey}, map[string]*data.RootRole{"root": &rootRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") signedTestRoot, err := testRoot.ToSigned() assert.NoError(t, err) err = signed.Sign(cs, signedTestRoot, replRootKey) assert.NoError(t, err) err = signed.Sign(cs, signedTestRoot, origRootKey) assert.NoError(t, err) // // This call to ValidateRoot will succeed since we are using a valid PEM // encoded certificate, and have no other certificates for this CN // err = keyStoreManager.ValidateRoot(signedTestRoot, gun) assert.NoError(t, err) // Finally, validate the only trusted certificate that exists is the new one certs = keyStoreManager.trustedCertificateStore.GetCertificates() assert.Len(t, certs, 1) assert.Equal(t, certs[0], replRootCert) } // TestValidateRootRotationMissingOrigSig runs through a full root certificate rotation // where we are missing the original root key signature. Verification should fail. // We test this with both an RSA and ECDSA root certificate func TestValidateRootRotationMissingOrigSig(t *testing.T) { testValidateRootRotationMissingOrigSig(t, data.ECDSAKey, data.ECDSAx509Key) if !testing.Short() { testValidateRootRotationMissingOrigSig(t, data.RSAKey, data.RSAx509Key) } } func testValidateRootRotationMissingOrigSig(t *testing.T, keyAlg, rootKeyType string) { gun := "docker.com/notary" tempBaseDir, keyStoreManager, cryptoService, certs := filestoreWithTwoCerts( t, gun, keyAlg) defer os.RemoveAll(tempBaseDir) origRootCert := certs[0] replRootCert := certs[1] // Add the old root cert part of trustedCertificates keyStoreManager.AddTrustedCert(origRootCert) // We need the PEM representation of the replacement key to put it into the TUF data replRootPEMCert := trustmanager.CertToPEM(replRootCert) // Tuf key with PEM-encoded x509 certificate replRootKey := data.NewPublicKey(rootKeyType, replRootPEMCert) rootRole, err := data.NewRole("root", 1, []string{replRootKey.ID()}, nil, nil) assert.NoError(t, err) testRoot, err := data.NewRoot( map[string]data.PublicKey{replRootKey.ID(): replRootKey}, map[string]*data.RootRole{"root": &rootRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") signedTestRoot, err := testRoot.ToSigned() assert.NoError(t, err) // We only sign with the new key, and not with the original one. err = signed.Sign(cryptoService, signedTestRoot, replRootKey) assert.NoError(t, err) // This call to ValidateRoot will succeed since we are using a valid PEM // encoded certificate, and have no other certificates for this CN err = keyStoreManager.ValidateRoot(signedTestRoot, gun) assert.Error(t, err, "insuficient signatures on root") // Finally, validate the only trusted certificate that exists is still // the old one certs = keyStoreManager.trustedCertificateStore.GetCertificates() assert.Len(t, certs, 1) assert.Equal(t, certs[0], origRootCert) } // TestValidateRootRotationMissingNewSig runs through a full root certificate rotation // where we are missing the new root key signature. Verification should fail. // We test this with both an RSA and ECDSA root certificate func TestValidateRootRotationMissingNewSig(t *testing.T) { testValidateRootRotationMissingNewSig(t, data.ECDSAKey, data.ECDSAx509Key) if !testing.Short() { testValidateRootRotationMissingNewSig(t, data.RSAKey, data.RSAx509Key) } } func testValidateRootRotationMissingNewSig(t *testing.T, keyAlg, rootKeyType string) { gun := "docker.com/notary" tempBaseDir, keyStoreManager, cryptoService, certs := filestoreWithTwoCerts( t, gun, keyAlg) defer os.RemoveAll(tempBaseDir) origRootCert := certs[0] replRootCert := certs[1] // Add the old root cert part of trustedCertificates keyStoreManager.AddTrustedCert(origRootCert) // We need the PEM representation of the replacement key to put it into the TUF data origRootPEMCert := trustmanager.CertToPEM(origRootCert) replRootPEMCert := trustmanager.CertToPEM(replRootCert) // Tuf key with PEM-encoded x509 certificate origRootKey := data.NewPublicKey(rootKeyType, origRootPEMCert) replRootKey := data.NewPublicKey(rootKeyType, replRootPEMCert) rootRole, err := data.NewRole("root", 1, []string{replRootKey.ID()}, nil, nil) assert.NoError(t, err) testRoot, err := data.NewRoot( map[string]data.PublicKey{replRootKey.ID(): replRootKey}, map[string]*data.RootRole{"root": &rootRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") signedTestRoot, err := testRoot.ToSigned() assert.NoError(t, err) // We only sign with the old key, and not with the new one err = signed.Sign(cryptoService, signedTestRoot, origRootKey) assert.NoError(t, err) // This call to ValidateRoot will succeed since we are using a valid PEM // encoded certificate, and have no other certificates for this CN err = keyStoreManager.ValidateRoot(signedTestRoot, gun) assert.Error(t, err, "insuficient signatures on root") // Finally, validate the only trusted certificate that exists is still // the old one certs = keyStoreManager.trustedCertificateStore.GetCertificates() assert.Len(t, certs, 1) assert.Equal(t, certs[0], origRootCert) } notary-0.1/notarymysql/000077500000000000000000000000001262207326400152745ustar00rootroot00000000000000notary-0.1/notarymysql/Dockerfile000066400000000000000000000004521262207326400172670ustar00rootroot00000000000000FROM ubuntu:14.04 MAINTAINER diogo@docker.com RUN apt-get update \ && apt-get install -y mysql-server \ && rm -rf /var/lib/mysql/mysql \ && rm -rf /var/lib/apt/lists/* ADD start /start ADD initial.sql /initial.sql ADD migrate.sql /migrate.sql RUN chmod 755 /start EXPOSE 3306 CMD ["/start"] notary-0.1/notarymysql/LICENSE000066400000000000000000000020661262207326400163050ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2014 Sameer Naik Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. notary-0.1/notarymysql/initial.sql000066400000000000000000000021521262207326400174460ustar00rootroot00000000000000DROP TABLE IF EXISTS `tuf_files`; CREATE TABLE `tuf_files` ( `id` int(11) NOT NULL AUTO_INCREMENT, `gun` varchar(255) NOT NULL, `role` varchar(255) NOT NULL, `version` int(11) NOT NULL, `data` longblob NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `gun` (`gun`,`role`,`version`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; DROP TABLE IF EXISTS `timestamp_keys`; CREATE TABLE `timestamp_keys` ( `gun` varchar(255) NOT NULL, `cipher` varchar(50) NOT NULL, `public` blob NOT NULL, PRIMARY KEY (`gun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; DROP TABLE IF EXISTS `private_keys`; CREATE TABLE `private_keys` ( `id` int(11) NOT NULL AUTO_INCREMENT, `created_at` timestamp NULL DEFAULT NULL, `updated_at` timestamp NULL DEFAULT NULL, `deleted_at` timestamp NULL DEFAULT NULL, `key_id` varchar(255) NOT NULL, `encryption_alg` varchar(255) NOT NULL, `keywrap_alg` varchar(255) NOT NULL, `algorithm` varchar(50) NOT NULL, `passphrase_alias` varchar(50) NOT NULL, `public` blob NOT NULL, `private` blob NOT NULL, PRIMARY KEY (`id`), UNIQUE (`key_id`), UNIQUE (`key_id`,`algorithm`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; notary-0.1/notarymysql/migrate.sql000066400000000000000000000012711262207326400174460ustar00rootroot00000000000000-- This migrates initial.sql to tables that are needed for GORM ALTER TABLE `tuf_files` ADD COLUMN `created_at` timestamp NULL DEFAULT NULL AFTER `id`, ADD COLUMN `updated_at` timestamp NULL DEFAULT NULL AFTER `created_at`, ADD COLUMN `deleted_at` timestamp NULL DEFAULT NULL AFTER `updated_at`, MODIFY `id` int(10) unsigned AUTO_INCREMENT; ALTER TABLE `timestamp_keys` ADD COLUMN `id` int(10) unsigned AUTO_INCREMENT FIRST, ADD COLUMN `created_at` timestamp NULL DEFAULT NULL AFTER `id`, ADD COLUMN `updated_at` timestamp NULL DEFAULT NULL AFTER `created_at`, ADD COLUMN `deleted_at` timestamp NULL DEFAULT NULL AFTER `updated_at`, DROP PRIMARY KEY, ADD PRIMARY KEY (`id`), ADD UNIQUE (`gun`); notary-0.1/notarymysql/start000077500000000000000000000074371262207326400163720ustar00rootroot00000000000000#!/bin/bash set -e DB_NAME='dockercondemo' DB_TABLE_FILES='tuf_files' DB_TABLE_KEYS='timestamp_keys' DB_USER='dockercondemo' DB_PASS='dockercondemo' DB_REMOTE_ROOT_NAME='dockercondemo' DB_REMOTE_ROOT_PASS='dockercondemo' DB_REMOTE_ROOT_HOST='' # disable error log sed 's/^log_error/# log_error/' -i /etc/mysql/my.cnf # Fixing StartUp Porblems with some DNS Situations and Speeds up the stuff # http://www.percona.com/blog/2008/05/31/dns-achilles-heel-mysql-installation/ cat > /etc/mysql/conf.d/mysql-skip-name-resolv.cnf </dev/null 2>&1 # start mysql server echo "Starting MySQL server..." /usr/bin/mysqld_safe >/dev/null 2>&1 & # wait for mysql server to start (max 30 seconds) timeout=30 echo -n "Waiting for database server to accept connections" while ! /usr/bin/mysqladmin -u root status >/dev/null 2>&1 do timeout=$(($timeout - 1)) if [ $timeout -eq 0 ]; then echo -e "\nCould not connect to database server. Aborting..." exit 1 fi echo -n "." sleep 1 done echo ## create a localhost only, debian-sys-maint user ## the debian-sys-maint is used while creating users and database ## as well as to shut down or starting up the mysql server via mysqladmin echo "Creating debian-sys-maint user..." mysql -uroot -e "GRANT ALL PRIVILEGES on *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '' WITH GRANT OPTION;" if [ -n "${DB_REMOTE_ROOT_NAME}" -a -n "${DB_REMOTE_ROOT_HOST}" ]; then echo "Creating remote user \"${DB_REMOTE_ROOT_NAME}\" with root privileges..." mysql -uroot \ -e "GRANT ALL PRIVILEGES ON *.* TO '${DB_REMOTE_ROOT_NAME}'@'${DB_REMOTE_ROOT_HOST}' IDENTIFIED BY '${DB_REMOTE_ROOT_PASS}' WITH GRANT OPTION; FLUSH PRIVILEGES;" fi /usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf shutdown fi # create new user / database if [ -n "${DB_USER}" -o -n "${DB_NAME}" ]; then /usr/bin/mysqld_safe >/dev/null 2>&1 & # wait for mysql server to start (max 30 seconds) timeout=30 while ! /usr/bin/mysqladmin -u root status >/dev/null 2>&1 do timeout=$(($timeout - 1)) if [ $timeout -eq 0 ]; then echo "Could not connect to mysql server. Aborting..." exit 1 fi sleep 1 done if [ -n "${DB_NAME}" ]; then for db in $(awk -F',' '{for (i = 1 ; i <= NF ; i++) print $i}' <<< "${DB_NAME}"); do echo "Creating database \"$db\"..." mysql --defaults-file=/etc/mysql/debian.cnf \ -e "CREATE DATABASE IF NOT EXISTS \`$db\` DEFAULT CHARACTER SET \`utf8\` COLLATE \`utf8_unicode_ci\`;" if [ -n "${DB_USER}" ]; then echo "Granting access to database \"$db\" for user \"${DB_USER}\"..." mysql --defaults-file=/etc/mysql/debian.cnf \ -e "GRANT ALL PRIVILEGES ON \`$db\`.* TO '${DB_USER}' IDENTIFIED BY '${DB_PASS}';" fi # Create our Database: mysql -uroot $db < ./initial.sql mysql -uroot $db < ./migrate.sql done fi /usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf shutdown fi # listen on all interfaces cat > /etc/mysql/conf.d/mysql-listen.cnf < 0 { if !createNew { fmt.Fprintln(out, "Passphrase incorrect. Please retry.") } } // Figure out if we should display a different string for this alias displayAlias := alias if aliasMap != nil { if val, ok := aliasMap[alias]; ok { displayAlias = val } } // First, check if we have a password cached for this alias. if numAttempts == 0 { if userEnteredTargetsSnapshotsPass && (alias == tufSnapshotAlias || alias == tufTargetsAlias) { return targetsSnapshotsPass, false, nil } if userEnteredRootsPass && (alias == "root") { return rootsPass, false, nil } } if numAttempts > 3 && !createNew { return "", true, ErrTooManyAttempts } state, err := term.SaveState(0) if err != nil { return "", false, err } term.DisableEcho(0, state) defer term.RestoreTerminal(0, state) stdin := bufio.NewReader(in) indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) if indexOfLastSeparator == -1 { indexOfLastSeparator = 0 } var shortName string if len(keyName) > indexOfLastSeparator+idBytesToDisplay { if indexOfLastSeparator > 0 { keyNamePrefix := keyName[:indexOfLastSeparator] keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1] shortName = keyNameID + " (" + keyNamePrefix + ")" } else { shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay] } } withID := fmt.Sprintf(" with ID %s", shortName) if shortName == "" { withID = "" } if createNew { fmt.Fprintf(out, "Enter passphrase for new %s key%s: ", displayAlias, withID) } else if displayAlias == "yubikey" { fmt.Fprintf(out, "Enter the %s for the attached Yubikey: ", keyName) } else { fmt.Fprintf(out, "Enter passphrase for %s key%s: ", displayAlias, withID) } passphrase, err := stdin.ReadBytes('\n') fmt.Fprintln(out) if err != nil { return "", false, err } retPass := strings.TrimSpace(string(passphrase)) if !createNew { if alias == tufSnapshotAlias || alias == tufTargetsAlias { userEnteredTargetsSnapshotsPass = true targetsSnapshotsPass = retPass } if alias == tufRootAlias { userEnteredRootsPass = true rootsPass = retPass } return retPass, false, nil } if len(retPass) < 8 { fmt.Fprintln(out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.") return "", false, ErrTooShort } fmt.Fprintf(out, "Repeat passphrase for new %s key%s: ", displayAlias, withID) confirmation, err := stdin.ReadBytes('\n') fmt.Fprintln(out) if err != nil { return "", false, err } confirmationStr := strings.TrimSpace(string(confirmation)) if retPass != confirmationStr { fmt.Fprintln(out, "Passphrases do not match. Please retry.") return "", false, ErrDontMatch } if alias == tufSnapshotAlias || alias == tufTargetsAlias { userEnteredTargetsSnapshotsPass = true targetsSnapshotsPass = retPass } if alias == tufRootAlias { userEnteredRootsPass = true rootsPass = retPass } return retPass, false, nil } } // ConstantRetriever returns a new Retriever which will return a constant string // as a passphrase. func ConstantRetriever(constantPassphrase string) Retriever { return func(k, a string, c bool, n int) (string, bool, error) { return constantPassphrase, false, nil } } notary-0.1/proto/000077500000000000000000000000001262207326400140355ustar00rootroot00000000000000notary-0.1/proto/signer.pb.go000066400000000000000000000265521262207326400162650ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: proto/signer.proto // DO NOT EDIT! /* Package proto is a generated protocol buffer package. It is generated from these files: proto/signer.proto It has these top-level messages: KeyInfo KeyID Algorithm PublicKey Signature SignatureRequest Void HealthStatus */ package proto import proto1 "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto1.Marshal var _ = fmt.Errorf var _ = math.Inf // KeyInfo holds a KeyID that is used to reference the key and it's algorithm type KeyInfo struct { KeyID *KeyID `protobuf:"bytes,1,opt,name=keyID" json:"keyID,omitempty"` Algorithm *Algorithm `protobuf:"bytes,2,opt,name=algorithm" json:"algorithm,omitempty"` } func (m *KeyInfo) Reset() { *m = KeyInfo{} } func (m *KeyInfo) String() string { return proto1.CompactTextString(m) } func (*KeyInfo) ProtoMessage() {} func (m *KeyInfo) GetKeyID() *KeyID { if m != nil { return m.KeyID } return nil } func (m *KeyInfo) GetAlgorithm() *Algorithm { if m != nil { return m.Algorithm } return nil } // KeyID holds an ID that is used to reference the key type KeyID struct { ID string `protobuf:"bytes,1,opt,name=ID" json:"ID,omitempty"` } func (m *KeyID) Reset() { *m = KeyID{} } func (m *KeyID) String() string { return proto1.CompactTextString(m) } func (*KeyID) ProtoMessage() {} // Type holds the type of crypto algorithm used type Algorithm struct { Algorithm string `protobuf:"bytes,1,opt,name=algorithm" json:"algorithm,omitempty"` } func (m *Algorithm) Reset() { *m = Algorithm{} } func (m *Algorithm) String() string { return proto1.CompactTextString(m) } func (*Algorithm) ProtoMessage() {} // PublicKey has a KeyInfo that is used to reference the key, and opaque bytes of a publicKey type PublicKey struct { KeyInfo *KeyInfo `protobuf:"bytes,1,opt,name=keyInfo" json:"keyInfo,omitempty"` PublicKey []byte `protobuf:"bytes,2,opt,name=publicKey,proto3" json:"publicKey,omitempty"` } func (m *PublicKey) Reset() { *m = PublicKey{} } func (m *PublicKey) String() string { return proto1.CompactTextString(m) } func (*PublicKey) ProtoMessage() {} func (m *PublicKey) GetKeyInfo() *KeyInfo { if m != nil { return m.KeyInfo } return nil } // Signature specifies a KeyInfo that was used for signing and signed content type Signature struct { KeyInfo *KeyInfo `protobuf:"bytes,1,opt,name=keyInfo" json:"keyInfo,omitempty"` Algorithm *Algorithm `protobuf:"bytes,2,opt,name=algorithm" json:"algorithm,omitempty"` Content []byte `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` } func (m *Signature) Reset() { *m = Signature{} } func (m *Signature) String() string { return proto1.CompactTextString(m) } func (*Signature) ProtoMessage() {} func (m *Signature) GetKeyInfo() *KeyInfo { if m != nil { return m.KeyInfo } return nil } func (m *Signature) GetAlgorithm() *Algorithm { if m != nil { return m.Algorithm } return nil } // SignatureRequests specifies a KeyInfo, and content to be signed type SignatureRequest struct { KeyID *KeyID `protobuf:"bytes,1,opt,name=keyID" json:"keyID,omitempty"` Content []byte `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` } func (m *SignatureRequest) Reset() { *m = SignatureRequest{} } func (m *SignatureRequest) String() string { return proto1.CompactTextString(m) } func (*SignatureRequest) ProtoMessage() {} func (m *SignatureRequest) GetKeyID() *KeyID { if m != nil { return m.KeyID } return nil } // Void represents an empty message type type Void struct { } func (m *Void) Reset() { *m = Void{} } func (m *Void) String() string { return proto1.CompactTextString(m) } func (*Void) ProtoMessage() {} // A mapping of health check name to the check result message type HealthStatus struct { Status map[string]string `protobuf:"bytes,1,rep,name=status" json:"status,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *HealthStatus) Reset() { *m = HealthStatus{} } func (m *HealthStatus) String() string { return proto1.CompactTextString(m) } func (*HealthStatus) ProtoMessage() {} func (m *HealthStatus) GetStatus() map[string]string { if m != nil { return m.Status } return nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // Client API for KeyManagement service type KeyManagementClient interface { // CreateKey creates as asymmetric key pair and returns the PublicKey CreateKey(ctx context.Context, in *Algorithm, opts ...grpc.CallOption) (*PublicKey, error) // DeleteKey deletes the key associated with a KeyID DeleteKey(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*Void, error) // GetKeyInfo returns the PublicKey associated with a KeyID GetKeyInfo(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*PublicKey, error) // CheckHealth returns the HealthStatus with the service CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) } type keyManagementClient struct { cc *grpc.ClientConn } func NewKeyManagementClient(cc *grpc.ClientConn) KeyManagementClient { return &keyManagementClient{cc} } func (c *keyManagementClient) CreateKey(ctx context.Context, in *Algorithm, opts ...grpc.CallOption) (*PublicKey, error) { out := new(PublicKey) err := grpc.Invoke(ctx, "/proto.KeyManagement/CreateKey", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *keyManagementClient) DeleteKey(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*Void, error) { out := new(Void) err := grpc.Invoke(ctx, "/proto.KeyManagement/DeleteKey", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *keyManagementClient) GetKeyInfo(ctx context.Context, in *KeyID, opts ...grpc.CallOption) (*PublicKey, error) { out := new(PublicKey) err := grpc.Invoke(ctx, "/proto.KeyManagement/GetKeyInfo", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *keyManagementClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) { out := new(HealthStatus) err := grpc.Invoke(ctx, "/proto.KeyManagement/CheckHealth", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for KeyManagement service type KeyManagementServer interface { // CreateKey creates as asymmetric key pair and returns the PublicKey CreateKey(context.Context, *Algorithm) (*PublicKey, error) // DeleteKey deletes the key associated with a KeyID DeleteKey(context.Context, *KeyID) (*Void, error) // GetKeyInfo returns the PublicKey associated with a KeyID GetKeyInfo(context.Context, *KeyID) (*PublicKey, error) // CheckHealth returns the HealthStatus with the service CheckHealth(context.Context, *Void) (*HealthStatus, error) } func RegisterKeyManagementServer(s *grpc.Server, srv KeyManagementServer) { s.RegisterService(&_KeyManagement_serviceDesc, srv) } func _KeyManagement_CreateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Algorithm) if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).CreateKey(ctx, in) if err != nil { return nil, err } return out, nil } func _KeyManagement_DeleteKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(KeyID) if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).DeleteKey(ctx, in) if err != nil { return nil, err } return out, nil } func _KeyManagement_GetKeyInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(KeyID) if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).GetKeyInfo(ctx, in) if err != nil { return nil, err } return out, nil } func _KeyManagement_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Void) if err := dec(in); err != nil { return nil, err } out, err := srv.(KeyManagementServer).CheckHealth(ctx, in) if err != nil { return nil, err } return out, nil } var _KeyManagement_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.KeyManagement", HandlerType: (*KeyManagementServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateKey", Handler: _KeyManagement_CreateKey_Handler, }, { MethodName: "DeleteKey", Handler: _KeyManagement_DeleteKey_Handler, }, { MethodName: "GetKeyInfo", Handler: _KeyManagement_GetKeyInfo_Handler, }, { MethodName: "CheckHealth", Handler: _KeyManagement_CheckHealth_Handler, }, }, Streams: []grpc.StreamDesc{}, } // Client API for Signer service type SignerClient interface { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature Sign(ctx context.Context, in *SignatureRequest, opts ...grpc.CallOption) (*Signature, error) // CheckHealth returns the HealthStatus with the service CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) } type signerClient struct { cc *grpc.ClientConn } func NewSignerClient(cc *grpc.ClientConn) SignerClient { return &signerClient{cc} } func (c *signerClient) Sign(ctx context.Context, in *SignatureRequest, opts ...grpc.CallOption) (*Signature, error) { out := new(Signature) err := grpc.Invoke(ctx, "/proto.Signer/Sign", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *signerClient) CheckHealth(ctx context.Context, in *Void, opts ...grpc.CallOption) (*HealthStatus, error) { out := new(HealthStatus) err := grpc.Invoke(ctx, "/proto.Signer/CheckHealth", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Signer service type SignerServer interface { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature Sign(context.Context, *SignatureRequest) (*Signature, error) // CheckHealth returns the HealthStatus with the service CheckHealth(context.Context, *Void) (*HealthStatus, error) } func RegisterSignerServer(s *grpc.Server, srv SignerServer) { s.RegisterService(&_Signer_serviceDesc, srv) } func _Signer_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(SignatureRequest) if err := dec(in); err != nil { return nil, err } out, err := srv.(SignerServer).Sign(ctx, in) if err != nil { return nil, err } return out, nil } func _Signer_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { in := new(Void) if err := dec(in); err != nil { return nil, err } out, err := srv.(SignerServer).CheckHealth(ctx, in) if err != nil { return nil, err } return out, nil } var _Signer_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.Signer", HandlerType: (*SignerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Sign", Handler: _Signer_Sign_Handler, }, { MethodName: "CheckHealth", Handler: _Signer_CheckHealth_Handler, }, }, Streams: []grpc.StreamDesc{}, } notary-0.1/proto/signer.proto000066400000000000000000000034421262207326400164140ustar00rootroot00000000000000syntax = "proto3"; package proto; // KeyManagement Interface service KeyManagement { // CreateKey creates as asymmetric key pair and returns the PublicKey rpc CreateKey(Algorithm) returns (PublicKey) {} // DeleteKey deletes the key associated with a KeyID rpc DeleteKey(KeyID) returns (Void) {} // GetKeyInfo returns the PublicKey associated with a KeyID rpc GetKeyInfo(KeyID) returns (PublicKey) {} // CheckHealth returns the HealthStatus with the service rpc CheckHealth(Void) returns (HealthStatus) {} } // Signer Interface service Signer { // Sign calculates a cryptographic signature using the Key associated with a KeyID and returns the signature rpc Sign(SignatureRequest) returns (Signature) {} // CheckHealth returns the HealthStatus with the service rpc CheckHealth(Void) returns (HealthStatus) {} } // KeyInfo holds a KeyID that is used to reference the key and it's algorithm message KeyInfo { KeyID keyID = 1; Algorithm algorithm = 2; } // KeyID holds an ID that is used to reference the key message KeyID { string ID = 1; } // Type holds the type of crypto algorithm used message Algorithm { string algorithm = 1; } // PublicKey has a KeyInfo that is used to reference the key, and opaque bytes of a publicKey message PublicKey { KeyInfo keyInfo = 1; bytes publicKey = 2; } // Signature specifies a KeyInfo that was used for signing and signed content message Signature { KeyInfo keyInfo = 1; Algorithm algorithm = 2; bytes content = 3; } // SignatureRequests specifies a KeyInfo, and content to be signed message SignatureRequest { KeyID keyID = 1; bytes content = 2; } // Void represents an empty message type message Void { } // A mapping of health check name to the check result message message HealthStatus { map status = 1; } notary-0.1/server/000077500000000000000000000000001262207326400142005ustar00rootroot00000000000000notary-0.1/server/handlers/000077500000000000000000000000001262207326400160005ustar00rootroot00000000000000notary-0.1/server/handlers/default.go000066400000000000000000000132601262207326400177550ustar00rootroot00000000000000package handlers import ( "bytes" "encoding/json" "io" "net/http" "strings" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/gorilla/mux" "golang.org/x/net/context" ctxu "github.com/docker/distribution/context" "github.com/docker/notary/errors" "github.com/docker/notary/server/storage" "github.com/docker/notary/server/timestamp" ) // MainHandler is the default handler for the server func MainHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { if r.Method == "GET" { _, err := w.Write([]byte("{}")) if err != nil { return errors.ErrUnknown.WithDetail(err) } } else { return errors.ErrGenericNotFound.WithDetail(nil) } return nil } // AtomicUpdateHandler will accept multiple TUF files and ensure that the storage // backend is atomically updated with all the new records. func AtomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { defer r.Body.Close() s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] reader, err := r.MultipartReader() if err != nil { return errors.ErrMalformedUpload.WithDetail(nil) } var updates []storage.MetaUpdate for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") if role == "" { return errors.ErrNoFilename.WithDetail(nil) } else if !data.ValidRole(role) { return errors.ErrInvalidRole.WithDetail(role) } meta := &data.SignedMeta{} var input []byte inBuf := bytes.NewBuffer(input) dec := json.NewDecoder(io.TeeReader(part, inBuf)) err = dec.Decode(meta) if err != nil { return errors.ErrMalformedJSON.WithDetail(nil) } version := meta.Signed.Version updates = append(updates, storage.MetaUpdate{ Role: role, Version: version, Data: inBuf.Bytes(), }) } if err = validateUpdate(gun, updates, store); err != nil { return errors.ErrMalformedUpload.WithDetail(err) } err = store.UpdateMany(gun, updates) if err != nil { return errors.ErrUpdating.WithDetail(err) } return nil } // GetHandler returns the json for a specified role and GUN. func GetHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] tufRole := vars["tufRole"] logger := ctxu.GetLoggerWithFields(ctx, map[string]interface{}{"gun": gun, "tufRole": tufRole}) out, err := store.GetCurrent(gun, tufRole) if err != nil { if _, ok := err.(*storage.ErrNotFound); ok { return errors.ErrMetadataNotFound.WithDetail(nil) } logger.Error("500 GET") return errors.ErrUnknown.WithDetail(err) } if out == nil { logger.Error("404 GET") return errors.ErrMetadataNotFound.WithDetail(nil) } w.Write(out) logger.Debug("200 GET") return nil } // DeleteHandler deletes all data for a GUN. A 200 responses indicates success. func DeleteHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] logger := ctxu.GetLoggerWithField(ctx, gun, "gun") err := store.Delete(gun) if err != nil { logger.Error("500 DELETE repository") return errors.ErrUnknown.WithDetail(err) } return nil } // GetTimestampHandler returns a timestamp.json given a GUN func GetTimestampHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { return errors.ErrNoStorage.WithDetail(nil) } cryptoServiceVal := ctx.Value("cryptoService") cryptoService, ok := cryptoServiceVal.(signed.CryptoService) if !ok { return errors.ErrNoCryptoService.WithDetail(nil) } vars := mux.Vars(r) gun := vars["imageName"] logger := ctxu.GetLoggerWithField(ctx, gun, "gun") out, err := timestamp.GetOrCreateTimestamp(gun, store, cryptoService) if err != nil { switch err.(type) { case *storage.ErrNoKey, *storage.ErrNotFound: logger.Error("404 GET timestamp") return errors.ErrMetadataNotFound.WithDetail(nil) default: logger.Error("500 GET timestamp") return errors.ErrUnknown.WithDetail(err) } } logger.Debug("200 GET timestamp") w.Write(out) return nil } // GetTimestampKeyHandler returns a timestamp public key, creating a new key-pair // it if it doesn't yet exist func GetTimestampKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) gun := vars["imageName"] logger := ctxu.GetLoggerWithField(ctx, gun, "gun") s := ctx.Value("metaStore") store, ok := s.(storage.MetaStore) if !ok { logger.Error("500 GET storage not configured") return errors.ErrNoStorage.WithDetail(nil) } c := ctx.Value("cryptoService") crypto, ok := c.(signed.CryptoService) if !ok { logger.Error("500 GET crypto service not configured") return errors.ErrNoCryptoService.WithDetail(nil) } algo := ctx.Value("keyAlgorithm") keyAlgo, ok := algo.(string) if !ok { logger.Error("500 GET key algorithm not configured") return errors.ErrNoKeyAlgorithm.WithDetail(nil) } keyAlgorithm := keyAlgo key, err := timestamp.GetOrCreateTimestampKey(gun, store, crypto, keyAlgorithm) if err != nil { logger.Errorf("500 GET timestamp key: %v", err) return errors.ErrUnknown.WithDetail(err) } out, err := json.Marshal(key) if err != nil { logger.Error("500 GET timestamp key") return errors.ErrUnknown.WithDetail(err) } logger.Debug("200 GET timestamp key") w.Write(out) return nil } notary-0.1/server/handlers/default_test.go000066400000000000000000000016121262207326400210120ustar00rootroot00000000000000package handlers import ( "net/http" "net/http/httptest" "testing" "golang.org/x/net/context" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/utils" ) func TestMainHandlerGet(t *testing.T) { hand := utils.RootHandlerFactory(nil, context.Background(), &signed.Ed25519{}) handler := hand(MainHandler) ts := httptest.NewServer(handler) defer ts.Close() _, err := http.Get(ts.URL) if err != nil { t.Fatalf("Received error on GET /: %s", err.Error()) } } func TestMainHandlerNotGet(t *testing.T) { hand := utils.RootHandlerFactory(nil, context.Background(), &signed.Ed25519{}) handler := hand(MainHandler) ts := httptest.NewServer(handler) defer ts.Close() res, err := http.Head(ts.URL) if err != nil { t.Fatalf("Received error on GET /: %s", err.Error()) } if res.StatusCode != http.StatusNotFound { t.Fatalf("Expected 404, received %d", res.StatusCode) } } notary-0.1/server/handlers/validation.go000066400000000000000000000273071262207326400204720ustar00rootroot00000000000000package handlers import ( "bytes" "encoding/json" "errors" "fmt" "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/Sirupsen/logrus" "github.com/docker/notary/server/storage" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" ) // ErrValidation represents a general validation error type ErrValidation struct { msg string } func (err ErrValidation) Error() string { return fmt.Sprintf("An error occurred during validation: %s", err.msg) } // ErrBadHierarchy represents a missing snapshot at this current time. // When delegations are implemented it will also represent a missing // delegation parent type ErrBadHierarchy struct { msg string } func (err ErrBadHierarchy) Error() string { return fmt.Sprintf("Hierarchy of updates in incorrect: %s", err.msg) } // ErrBadRoot represents a failure validating the root type ErrBadRoot struct { msg string } func (err ErrBadRoot) Error() string { return fmt.Sprintf("The root being updated is invalid: %s", err.msg) } // ErrBadTargets represents a failure to validate a targets (incl delegations) type ErrBadTargets struct { msg string } func (err ErrBadTargets) Error() string { return fmt.Sprintf("The targets being updated is invalid: %s", err.msg) } // ErrBadSnapshot represents a failure to validate the snapshot type ErrBadSnapshot struct { msg string } func (err ErrBadSnapshot) Error() string { return fmt.Sprintf("The snapshot being updated is invalid: %s", err.msg) } // validateUpload checks that the updates being pushed // are semantically correct and the signatures are correct func validateUpdate(gun string, updates []storage.MetaUpdate, store storage.MetaStore) error { kdb := keys.NewDB() repo := tuf.NewRepo(kdb, nil) rootRole := data.RoleName(data.CanonicalRootRole) targetsRole := data.RoleName(data.CanonicalTargetsRole) snapshotRole := data.RoleName(data.CanonicalSnapshotRole) // check that the necessary roles are present: roles := make(map[string]storage.MetaUpdate) for _, v := range updates { roles[v.Role] = v } if err := hierarchyOK(roles); err != nil { logrus.Error("ErrBadHierarchy: ", err.Error()) return ErrBadHierarchy{msg: err.Error()} } logrus.Debug("Successfully validated hierarchy") var root *data.SignedRoot oldRootJSON, err := store.GetCurrent(gun, rootRole) if _, ok := err.(*storage.ErrNotFound); err != nil && !ok { // problem with storage. No expectation we can // write if we can't read so bail. logrus.Error("error reading previous root: ", err.Error()) return err } if rootUpdate, ok := roles[rootRole]; ok { // if root is present, validate its integrity, possibly // against a previous root if root, err = validateRoot(gun, oldRootJSON, rootUpdate.Data); err != nil { logrus.Error("ErrBadRoot: ", err.Error()) return ErrBadRoot{msg: err.Error()} } // setting root will update keys db if err = repo.SetRoot(root); err != nil { logrus.Error("ErrValidation: ", err.Error()) return ErrValidation{msg: err.Error()} } logrus.Debug("Successfully validated root") } else { if oldRootJSON == nil { return ErrValidation{msg: "no pre-existing root and no root provided in update."} } parsedOldRoot := &data.SignedRoot{} if err := json.Unmarshal(oldRootJSON, parsedOldRoot); err != nil { return ErrValidation{msg: "pre-existing root is corrupted and no root provided in update."} } if err = repo.SetRoot(parsedOldRoot); err != nil { logrus.Error("ErrValidation: ", err.Error()) return ErrValidation{msg: err.Error()} } } // TODO: validate delegated targets roles. var t *data.SignedTargets if _, ok := roles[targetsRole]; ok { if t, err = validateTargets(targetsRole, roles, kdb); err != nil { logrus.Error("ErrBadTargets: ", err.Error()) return ErrBadTargets{msg: err.Error()} } repo.SetTargets(targetsRole, t) } logrus.Debug("Successfully validated targets") var oldSnap *data.SignedSnapshot oldSnapJSON, err := store.GetCurrent(gun, snapshotRole) if _, ok := err.(*storage.ErrNotFound); err != nil && !ok { // problem with storage. No expectation we can // write if we can't read so bail. logrus.Error("error reading previous snapshot: ", err.Error()) return err } else if err == nil { oldSnap = &data.SignedSnapshot{} if err := json.Unmarshal(oldSnapJSON, oldSnap); err != nil { oldSnap = nil } } if err := validateSnapshot(snapshotRole, oldSnap, roles[snapshotRole], roles, kdb); err != nil { logrus.Error("ErrBadSnapshot: ", err.Error()) return ErrBadSnapshot{msg: err.Error()} } logrus.Debug("Successfully validated snapshot") return nil } func validateSnapshot(role string, oldSnap *data.SignedSnapshot, snapUpdate storage.MetaUpdate, roles map[string]storage.MetaUpdate, kdb *keys.KeyDB) error { s := &data.Signed{} err := json.Unmarshal(snapUpdate.Data, s) if err != nil { return errors.New("could not parse snapshot") } // version specifically gets validated when writing to store to // better handle race conditions there. if err := signed.Verify(s, role, 0, kdb); err != nil { return err } snap, err := data.SnapshotFromSigned(s) if err != nil { return errors.New("could not parse snapshot") } if !data.ValidTUFType(snap.Signed.Type, data.CanonicalSnapshotRole) { return errors.New("snapshot has wrong type") } err = checkSnapshotEntries(role, oldSnap, snap, roles) if err != nil { return err } return nil } func checkSnapshotEntries(role string, oldSnap, snap *data.SignedSnapshot, roles map[string]storage.MetaUpdate) error { snapshotRole := data.RoleName(data.CanonicalSnapshotRole) timestampRole := data.RoleName(data.CanonicalTimestampRole) // just in case for r, update := range roles { if r == snapshotRole || r == timestampRole { continue } m, ok := snap.Signed.Meta[r] if !ok { return fmt.Errorf("snapshot missing metadata for %s", r) } if int64(len(update.Data)) != m.Length { return fmt.Errorf("snapshot has incorrect length for %s", r) } if !checkHashes(m, update.Data) { return fmt.Errorf("snapshot has incorrect hashes for %s", r) } } return nil } func checkHashes(meta data.FileMeta, update []byte) bool { for alg, digest := range meta.Hashes { d := utils.DoHash(alg, update) if !bytes.Equal(digest, d) { return false } } return true } func validateTargets(role string, roles map[string]storage.MetaUpdate, kdb *keys.KeyDB) (*data.SignedTargets, error) { // TODO: when delegations are being validated, validate parent // role exists for any delegation s := &data.Signed{} err := json.Unmarshal(roles[role].Data, s) if err != nil { return nil, fmt.Errorf("could not parse %s", role) } // version specifically gets validated when writing to store to // better handle race conditions there. if err := signed.Verify(s, role, 0, kdb); err != nil { return nil, err } t, err := data.TargetsFromSigned(s) if err != nil { return nil, err } if !data.ValidTUFType(t.Signed.Type, data.CanonicalTargetsRole) { return nil, fmt.Errorf("%s has wrong type", role) } return t, nil } // check the snapshot is present. If it is, the hierarchy // of the update is OK. This seems like a simplistic check // but is completely sufficient for all possible use cases: // 1. the user is updating only the snapshot. // 2. the user is updating a targets (incl. delegations) or // root metadata. This requires they also provide a new // snapshot. // N.B. users should never be updating timestamps. The server // always handles timestamping. If the user does send a // timestamp, the server will replace it on next // GET timestamp.jsonshould it detect the current // snapshot has a different hash to the one in the timestamp. func hierarchyOK(roles map[string]storage.MetaUpdate) error { snapshotRole := data.RoleName(data.CanonicalSnapshotRole) if _, ok := roles[snapshotRole]; !ok { return errors.New("snapshot missing from update") } return nil } func validateRoot(gun string, oldRoot, newRoot []byte) (*data.SignedRoot, error) { var parsedOldRoot *data.SignedRoot parsedNewRoot := &data.SignedRoot{} if oldRoot != nil { parsedOldRoot = &data.SignedRoot{} err := json.Unmarshal(oldRoot, parsedOldRoot) if err != nil { // TODO(david): if we can't read the old root should we continue // here to check new root self referential integrity? // This would permit recovery of a repo with a corrupted // root. logrus.Warn("Old root could not be parsed.") } } err := json.Unmarshal(newRoot, parsedNewRoot) if err != nil { return nil, err } if err := checkRoot(parsedOldRoot, parsedNewRoot); err != nil { // TODO(david): how strict do we want to be here about old signatures // for rotations? Should the user have to provide a flag // which gets transmitted to force a root update without // correct old key signatures. return nil, err } if !data.ValidTUFType(parsedNewRoot.Signed.Type, data.CanonicalRootRole) { return nil, fmt.Errorf("root has wrong type") } return parsedNewRoot, nil } // checkRoot returns true if no rotation, or a valid // rotation has taken place, and the threshold number of signatures // are valid. func checkRoot(oldRoot, newRoot *data.SignedRoot) error { rootRole := data.RoleName(data.CanonicalRootRole) targetsRole := data.RoleName(data.CanonicalTargetsRole) snapshotRole := data.RoleName(data.CanonicalSnapshotRole) timestampRole := data.RoleName(data.CanonicalTimestampRole) var oldRootRole *data.RootRole newRootRole, ok := newRoot.Signed.Roles[rootRole] if !ok { return errors.New("new root is missing role entry for root role") } oldThreshold := 1 rotation := false oldKeys := map[string]data.PublicKey{} newKeys := map[string]data.PublicKey{} if oldRoot != nil { // check for matching root key IDs oldRootRole = oldRoot.Signed.Roles[rootRole] oldThreshold = oldRootRole.Threshold for _, kid := range oldRootRole.KeyIDs { k, ok := oldRoot.Signed.Keys[kid] if !ok { // if the key itself wasn't contained in the root // we're skipping it because it could never have // been used to validate this root. continue } oldKeys[kid] = data.NewPublicKey(k.Algorithm(), k.Public()) } // super simple check for possible rotation rotation = len(oldKeys) != len(newRootRole.KeyIDs) } // if old and new had the same number of keys, iterate // to see if there's a difference. for _, kid := range newRootRole.KeyIDs { k, ok := newRoot.Signed.Keys[kid] if !ok { // if the key itself wasn't contained in the root // we're skipping it because it could never have // been used to validate this root. continue } newKeys[kid] = data.NewPublicKey(k.Algorithm(), k.Public()) if oldRoot != nil { if _, ok := oldKeys[kid]; !ok { // if there is any difference in keys, a key rotation may have // occurred. rotation = true } } } newSigned, err := newRoot.ToSigned() if err != nil { return err } if rotation { err = signed.VerifyRoot(newSigned, oldThreshold, oldKeys) if err != nil { return fmt.Errorf("rotation detected and new root was not signed with at least %d old keys", oldThreshold) } } err = signed.VerifyRoot(newSigned, newRootRole.Threshold, newKeys) if err != nil { return err } root, err := data.RootFromSigned(newSigned) if err != nil { return err } // at a minimum, check the 4 required roles are present for _, r := range []string{rootRole, targetsRole, snapshotRole, timestampRole} { role, ok := root.Signed.Roles[r] if !ok { return fmt.Errorf("missing required %s role from root", r) } if role.Threshold < 1 { return fmt.Errorf("%s role has invalid threshold", r) } if len(role.KeyIDs) < role.Threshold { return fmt.Errorf("%s role has insufficient number of keys", r) } } return nil } notary-0.1/server/handlers/validation_test.go000066400000000000000000000427141262207326400215300ustar00rootroot00000000000000package handlers import ( "testing" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/testutils" "github.com/stretchr/testify/assert" "github.com/docker/notary/server/storage" ) func TestValidateEmptyNew(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateNoNewRoot(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "root", Version: 1, Data: root, }, ) updates := []storage.MetaUpdate{ { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateNoNewTargets(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "targets", Version: 1, Data: targets, }, ) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateOnlySnapshot(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, _, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "root", Version: 1, Data: root, }, ) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "targets", Version: 1, Data: targets, }, ) updates := []storage.MetaUpdate{ { Role: "snapshot", Version: 1, Data: snapshot, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateOldRoot(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "root", Version: 1, Data: root, }, ) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateRootRotation(t *testing.T) { _, repo, crypto := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) store.UpdateCurrent( "testGUN", storage.MetaUpdate{ Role: "root", Version: 1, Data: root, }, ) oldRootRole := repo.Root.Signed.Roles["root"] oldRootKey := repo.Root.Signed.Keys[oldRootRole.KeyIDs[0]] rootKey, err := crypto.Create("root", data.ED25519Key) assert.NoError(t, err) rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil) assert.NoError(t, err) delete(repo.Root.Signed.Keys, oldRootRole.KeyIDs[0]) repo.Root.Signed.Roles["root"] = &rootRole.RootRole repo.Root.Signed.Keys[rootKey.ID()] = rootKey r, err = repo.SignRoot(data.DefaultExpires(data.CanonicalRootRole)) assert.NoError(t, err) err = signed.Sign(crypto, r, rootKey, oldRootKey) assert.NoError(t, err) rt, err := data.RootFromSigned(r) assert.NoError(t, err) repo.SetRoot(rt) sn, err = repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole)) assert.NoError(t, err) root, targets, snapshot, timestamp, err = testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.NoError(t, err) } func TestValidateNoRoot(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) _, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrValidation{}, err) } func TestValidateSnapshotMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, _, _, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadHierarchy{}, err) } // ### Role missing negative tests ### // These tests remove a role from the Root file and // check for a ErrBadRoot func TestValidateRootRoleMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() delete(repo.Root.Signed.Roles, "root") r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } func TestValidateTargetsRoleMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() delete(repo.Root.Signed.Roles, "targets") r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } func TestValidateSnapshotRoleMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() delete(repo.Root.Signed.Roles, "snapshot") r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } // ### End role missing negative tests ### // ### Signature missing negative tests ### func TestValidateRootSigMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() delete(repo.Root.Signed.Roles, "snapshot") r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) r.Signatures = nil root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } func TestValidateTargetsSigMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) tg.Signatures = nil root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadTargets{}, err) } func TestValidateSnapshotSigMissing(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) sn.Signatures = nil root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadSnapshot{}, err) } // ### End signature missing negative tests ### // ### Corrupted metadata negative tests ### func TestValidateRootCorrupt(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) // flip all the bits in the first byte root[0] = root[0] ^ 0xff updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } func TestValidateTargetsCorrupt(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) // flip all the bits in the first byte targets[0] = targets[0] ^ 0xff updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadTargets{}, err) } func TestValidateSnapshotCorrupt(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) // flip all the bits in the first byte snapshot[0] = snapshot[0] ^ 0xff updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadSnapshot{}, err) } // ### End corrupted metadata negative tests ### // ### Snapshot size mismatch negative tests ### func TestValidateRootModifiedSize(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) // add another copy of the signature so the hash is different r.Signatures = append(r.Signatures, r.Signatures[0]) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) // flip all the bits in the first byte root[0] = root[0] ^ 0xff updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadRoot{}, err) } func TestValidateTargetsModifiedSize(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) // add another copy of the signature so the hash is different tg.Signatures = append(tg.Signatures, tg.Signatures[0]) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadSnapshot{}, err) } // ### End snapshot size mismatch negative tests ### // ### Snapshot hash mismatch negative tests ### func TestValidateRootModifiedHash(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) snap, err := data.SnapshotFromSigned(sn) assert.NoError(t, err) snap.Signed.Meta["root"].Hashes["sha256"][0] = snap.Signed.Meta["root"].Hashes["sha256"][0] ^ 0xff sn, err = snap.ToSigned() assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadSnapshot{}, err) } func TestValidateTargetsModifiedHash(t *testing.T) { _, repo, _ := testutils.EmptyRepo() store := storage.NewMemStorage() r, tg, sn, ts, err := testutils.Sign(repo) assert.NoError(t, err) snap, err := data.SnapshotFromSigned(sn) assert.NoError(t, err) snap.Signed.Meta["targets"].Hashes["sha256"][0] = snap.Signed.Meta["targets"].Hashes["sha256"][0] ^ 0xff sn, err = snap.ToSigned() assert.NoError(t, err) root, targets, snapshot, timestamp, err := testutils.Serialize(r, tg, sn, ts) assert.NoError(t, err) updates := []storage.MetaUpdate{ { Role: "root", Version: 1, Data: root, }, { Role: "targets", Version: 1, Data: targets, }, { Role: "snapshot", Version: 1, Data: snapshot, }, { Role: "timestamp", Version: 1, Data: timestamp, }, } err = validateUpdate("testGUN", updates, store) assert.Error(t, err) assert.IsType(t, ErrBadSnapshot{}, err) } // ### End snapshot hash mismatch negative tests ### notary-0.1/server/server.go000066400000000000000000000066431262207326400160460ustar00rootroot00000000000000package server import ( "crypto/tls" "fmt" "net" "net/http" "github.com/Sirupsen/logrus" "github.com/docker/distribution/health" "github.com/docker/distribution/registry/auth" "github.com/docker/notary/server/handlers" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/utils" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) func init() { data.SetDefaultExpiryTimes( map[string]int{ "timestamp": 14, }, ) } func prometheusOpts(operation string) prometheus.SummaryOpts { return prometheus.SummaryOpts{ Namespace: "notary_server", Subsystem: "http", ConstLabels: prometheus.Labels{"operation": operation}, } } // Run sets up and starts a TLS server that can be cancelled using the // given configuration. The context it is passed is the context it should // use directly for the TLS server, and generate children off for requests func Run(ctx context.Context, addr string, tlsConfig *tls.Config, trust signed.CryptoService, authMethod string, authOpts interface{}) error { tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { return err } var lsnr net.Listener lsnr, err = net.ListenTCP("tcp", tcpAddr) if err != nil { return err } if tlsConfig != nil { logrus.Info("Enabling TLS") lsnr = tls.NewListener(lsnr, tlsConfig) } var ac auth.AccessController if authMethod == "token" { authOptions, ok := authOpts.(map[string]interface{}) if !ok { return fmt.Errorf("auth.options must be a map[string]interface{}") } ac, err = auth.GetAccessController(authMethod, authOptions) if err != nil { return err } } svr := http.Server{ Addr: addr, Handler: RootHandler(ac, ctx, trust), } logrus.Info("Starting on ", addr) err = svr.Serve(lsnr) return err } // RootHandler returns the handler that routes all the paths from / for the // server. func RootHandler(ac auth.AccessController, ctx context.Context, trust signed.CryptoService) http.Handler { hand := utils.RootHandlerFactory(ac, ctx, trust) r := mux.NewRouter() r.Methods("GET").Path("/v2/").Handler(hand(handlers.MainHandler)) r.Methods("POST").Path("/v2/{imageName:.*}/_trust/tuf/").Handler( prometheus.InstrumentHandlerWithOpts( prometheusOpts("UpdateTuf"), hand(handlers.AtomicUpdateHandler, "push", "pull"))) r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/{tufRole:(root|targets|snapshot)}.json").Handler( prometheus.InstrumentHandlerWithOpts( prometheusOpts("GetRole"), hand(handlers.GetHandler, "pull"))) r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/timestamp.json").Handler( prometheus.InstrumentHandlerWithOpts( prometheusOpts("GetTimestamp"), hand(handlers.GetTimestampHandler, "pull"))) r.Methods("GET").Path("/v2/{imageName:.*}/_trust/tuf/timestamp.key").Handler( prometheus.InstrumentHandlerWithOpts( prometheusOpts("GetTimestampKey"), hand(handlers.GetTimestampKeyHandler, "push", "pull"))) r.Methods("DELETE").Path("/v2/{imageName:.*}/_trust/tuf/").Handler( prometheus.InstrumentHandlerWithOpts( prometheusOpts("DeleteTuf"), hand(handlers.DeleteHandler, "push", "pull"))) r.Methods("GET").Path("/_notary_server/health").HandlerFunc(health.StatusHandler) r.Methods("GET").Path("/_notary_server/metrics").Handler(prometheus.Handler()) r.Methods("GET", "POST", "PUT", "HEAD", "DELETE").Path("/{other:.*}").Handler(hand(utils.NotFoundHandler)) return r } notary-0.1/server/server_test.go000066400000000000000000000022111262207326400170700ustar00rootroot00000000000000package server import ( "net" "net/http" "net/http/httptest" "strings" "testing" _ "github.com/docker/distribution/registry/auth/silly" "github.com/docker/notary/tuf/signed" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) func TestRunBadAddr(t *testing.T) { err := Run( context.Background(), "testAddr", nil, signed.NewEd25519(), "", nil, ) if err == nil { t.Fatal("Passed bad addr, Run should have failed") } } func TestRunReservedPort(t *testing.T) { ctx, _ := context.WithCancel(context.Background()) err := Run( ctx, "localhost:80", nil, signed.NewEd25519(), "", nil, ) if _, ok := err.(*net.OpError); !ok { t.Fatalf("Received unexpected err: %s", err.Error()) } if !strings.Contains(err.Error(), "bind: permission denied") { t.Fatalf("Received unexpected err: %s", err.Error()) } } func TestMetricsEndpoint(t *testing.T) { handler := RootHandler(nil, context.Background(), signed.NewEd25519()) ts := httptest.NewServer(handler) defer ts.Close() res, err := http.Get(ts.URL + "/_notary_server/metrics") assert.NoError(t, err) assert.Equal(t, http.StatusOK, res.StatusCode) } notary-0.1/server/storage/000077500000000000000000000000001262207326400156445ustar00rootroot00000000000000notary-0.1/server/storage/database.go000066400000000000000000000122011262207326400177330ustar00rootroot00000000000000package storage import ( "fmt" "github.com/Sirupsen/logrus" "github.com/go-sql-driver/mysql" "github.com/jinzhu/gorm" "github.com/mattn/go-sqlite3" ) // SQLStorage implements a versioned store using a relational database. // See server/storage/models.go type SQLStorage struct { gorm.DB } // NewSQLStorage is a convenience method to create a SQLStorage func NewSQLStorage(dialect string, args ...interface{}) (*SQLStorage, error) { gormDB, err := gorm.Open(dialect, args...) if err != nil { return nil, err } return &SQLStorage{ DB: gormDB, }, nil } // translateOldVersionError captures DB errors, and attempts to translate // duplicate entry - currently only supports MySQL and Sqlite3 func translateOldVersionError(err error) error { switch err := err.(type) { case *mysql.MySQLError: // https://dev.mysql.com/doc/refman/5.5/en/error-messages-server.html // 1022 = Can't write; duplicate key in table '%s' // 1062 = Duplicate entry '%s' for key %d if err.Number == 1022 || err.Number == 1062 { return &ErrOldVersion{} } case *sqlite3.Error: // https://godoc.org/github.com/mattn/go-sqlite3#pkg-variables if err.Code == sqlite3.ErrConstraint && err.ExtendedCode == sqlite3.ErrConstraintUnique { return &ErrOldVersion{} } } return err } // UpdateCurrent updates a single TUF. func (db *SQLStorage) UpdateCurrent(gun string, update MetaUpdate) error { // ensure we're not inserting an immediately old version - can't use the // struct, because that only works with non-zero values, and Version // can be 0. exists := db.Where("gun = ? and role = ? and version >= ?", gun, update.Role, update.Version).First(&TUFFile{}) if !exists.RecordNotFound() { return &ErrOldVersion{} } return translateOldVersionError(db.Create(&TUFFile{ Gun: gun, Role: update.Role, Version: update.Version, Data: update.Data, }).Error) } // UpdateMany atomically updates many TUF records in a single transaction func (db *SQLStorage) UpdateMany(gun string, updates []MetaUpdate) error { tx := db.Begin() if tx.Error != nil { return tx.Error } rollback := func(err error) error { if rxErr := tx.Rollback().Error; rxErr != nil { logrus.Error("Failed on Tx rollback with error: ", rxErr.Error()) return rxErr } return err } var ( query *gorm.DB added = make(map[uint]bool) ) for _, update := range updates { // This looks like the same logic as UpdateCurrent, but if we just // called, version ordering in the updates list must be enforced // (you cannot insert the version 2 before version 1). And we do // not care about monotonic ordering in the updates. query = db.Where("gun = ? and role = ? and version >= ?", gun, update.Role, update.Version).First(&TUFFile{}) if !query.RecordNotFound() { return rollback(&ErrOldVersion{}) } var row TUFFile query = tx.Where(map[string]interface{}{ "gun": gun, "role": update.Role, "version": update.Version, }).Attrs("data", update.Data).FirstOrCreate(&row) if query.Error != nil { return rollback(translateOldVersionError(query.Error)) } // it's previously been added, which means it's a duplicate entry // in the same transaction if _, ok := added[row.ID]; ok { return rollback(&ErrOldVersion{}) } added[row.ID] = true } return tx.Commit().Error } // GetCurrent gets a specific TUF record func (db *SQLStorage) GetCurrent(gun, tufRole string) ([]byte, error) { var row TUFFile q := db.Select("data").Where(&TUFFile{Gun: gun, Role: tufRole}).Order("version desc").Limit(1).First(&row) if q.RecordNotFound() { return nil, &ErrNotFound{} } else if q.Error != nil { return nil, q.Error } return row.Data, nil } // Delete deletes all the records for a specific GUN func (db *SQLStorage) Delete(gun string) error { return db.Where(&TUFFile{Gun: gun}).Delete(TUFFile{}).Error } // GetTimestampKey returns the timestamps Public Key data func (db *SQLStorage) GetTimestampKey(gun string) (algorithm string, public []byte, err error) { logrus.Debug("retrieving timestamp key for ", gun) var row TimestampKey query := db.Select("cipher, public").Where(&TimestampKey{Gun: gun}).Find(&row) if query.RecordNotFound() { return "", nil, &ErrNoKey{gun: gun} } else if query.Error != nil { return "", nil, query.Error } return row.Cipher, row.Public, nil } // SetTimestampKey attempts to write a TimeStamp key and returns an error if it already exists func (db *SQLStorage) SetTimestampKey(gun string, algorithm string, public []byte) error { entry := TimestampKey{ Gun: gun, Cipher: string(algorithm), Public: public, } if !db.Where(&entry).First(&TimestampKey{}).RecordNotFound() { return &ErrTimestampKeyExists{gun: gun} } return translateOldVersionError( db.FirstOrCreate(&TimestampKey{}, &entry).Error) } // CheckHealth asserts that both required tables are present func (db *SQLStorage) CheckHealth() error { interfaces := []interface { TableName() string }{&TUFFile{}, &TimestampKey{}} for _, model := range interfaces { tableOk := db.HasTable(model) if db.Error != nil { return db.Error } if !tableOk { return fmt.Errorf( "Cannot access table: %s", model.TableName()) } } return nil } notary-0.1/server/storage/database_test.go000066400000000000000000000250731262207326400210050ustar00rootroot00000000000000package storage import ( "io/ioutil" "os" "testing" "github.com/jinzhu/gorm" _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/assert" ) // SampleTUF returns a sample TUFFile with the given Version (ID will have // to be set independently) func SampleTUF(version int) TUFFile { return TUFFile{ Gun: "testGUN", Role: "root", Version: version, Data: []byte("1"), } } func SampleUpdate(version int) MetaUpdate { return MetaUpdate{ Role: "root", Version: version, Data: []byte("1"), } } // SetUpSQLite creates a sqlite database for testing func SetUpSQLite(t *testing.T, dbDir string) (*gorm.DB, *SQLStorage) { dbStore, err := NewSQLStorage("sqlite3", dbDir+"test_db") assert.NoError(t, err) // Create the DB tables err = CreateTUFTable(dbStore.DB) assert.NoError(t, err) err = CreateTimestampTable(dbStore.DB) assert.NoError(t, err) // verify that the tables are empty var count int for _, model := range [2]interface{}{&TUFFile{}, &TimestampKey{}} { query := dbStore.DB.Model(model).Count(&count) assert.NoError(t, query.Error) assert.Equal(t, 0, count) } return &dbStore.DB, dbStore } // TestSQLUpdateCurrent asserts that UpdateCurrent will add a new TUF file // if no previous version existed. func TestSQLUpdateCurrentNew(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) // Adding a new TUF file should succeed err = dbStore.UpdateCurrent("testGUN", SampleUpdate(0)) assert.NoError(t, err, "Creating a row in an empty DB failed.") // There should just be one row var rows []TUFFile query := gormDB.Select("ID, Gun, Role, Version, Data").Find(&rows) assert.NoError(t, query.Error) expected := SampleTUF(0) expected.ID = 1 assert.Equal(t, []TUFFile{expected}, rows) } // TestSQLUpdateCurrentNewVersion asserts that UpdateCurrent will add a // new (higher) version of an existing TUF file func TestSQLUpdateCurrentNewVersion(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) // insert row oldVersion := SampleTUF(0) query := gormDB.Create(&oldVersion) assert.NoError(t, query.Error, "Creating a row in an empty DB failed.") // UpdateCurrent with a newer version should succeed update := SampleUpdate(2) err = dbStore.UpdateCurrent("testGUN", update) assert.NoError(t, err, "Creating a row in an empty DB failed.") // There should just be one row var rows []TUFFile query = gormDB.Select("ID, Gun, Role, Version, Data").Find(&rows) assert.NoError(t, query.Error) oldVersion.Model = gorm.Model{ID: 1} expected := SampleTUF(2) expected.Model = gorm.Model{ID: 2} assert.Equal(t, []TUFFile{oldVersion, expected}, rows) } // TestSQLUpdateCurrentOldVersionError asserts that an error is raised if // trying to update to an older version of a TUF file. func TestSQLUpdateCurrentOldVersionError(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) // insert row newVersion := SampleTUF(3) query := gormDB.Create(&newVersion) assert.NoError(t, query.Error, "Creating a row in an empty DB failed.") // UpdateCurrent should fail due to the version being lower than the // previous row err = dbStore.UpdateCurrent("testGUN", SampleUpdate(0)) assert.Error(t, err, "Error should not be nil") assert.IsType(t, &ErrOldVersion{}, err, "Expected ErrOldVersion error type, got: %v", err) // There should just be one row var rows []TUFFile query = gormDB.Select("ID, Gun, Role, Version, Data").Find(&rows) assert.NoError(t, query.Error) newVersion.Model = gorm.Model{ID: 1} assert.Equal(t, []TUFFile{newVersion}, rows) dbStore.DB.Close() } // TestSQLUpdateMany asserts that inserting multiple updates succeeds if the // updates do not conflict with each. func TestSQLUpdateMany(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) err = dbStore.UpdateMany("testGUN", []MetaUpdate{ SampleUpdate(0), { Role: "targets", Version: 1, Data: []byte("2"), }, SampleUpdate(2), }) assert.NoError(t, err, "UpdateMany errored unexpectedly: %v", err) gorm1 := SampleTUF(0) gorm1.ID = 1 gorm2 := TUFFile{ Model: gorm.Model{ID: 2}, Gun: "testGUN", Role: "targets", Version: 1, Data: []byte("2")} gorm3 := SampleTUF(2) gorm3.ID = 3 expected := []TUFFile{gorm1, gorm2, gorm3} var rows []TUFFile query := gormDB.Select("ID, Gun, Role, Version, Data").Find(&rows) assert.NoError(t, query.Error) assert.Equal(t, expected, rows) dbStore.DB.Close() } // TestSQLUpdateManyVersionOrder asserts that inserting updates with // non-monotonic versions still succeeds. func TestSQLUpdateManyVersionOrder(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) err = dbStore.UpdateMany( "testGUN", []MetaUpdate{SampleUpdate(2), SampleUpdate(0)}) assert.NoError(t, err) // the whole transaction should have rolled back, so there should be // no entries. gorm1 := SampleTUF(2) gorm1.ID = 1 gorm2 := SampleTUF(0) gorm2.ID = 2 var rows []TUFFile query := gormDB.Select("ID, Gun, Role, Version, Data").Find(&rows) assert.NoError(t, query.Error) assert.Equal(t, []TUFFile{gorm1, gorm2}, rows) dbStore.DB.Close() } // TestSQLUpdateManyDuplicateRollback asserts that inserting duplicate // updates fails. func TestSQLUpdateManyDuplicateRollback(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) update := SampleUpdate(0) err = dbStore.UpdateMany("testGUN", []MetaUpdate{update, update}) assert.Error( t, err, "There should be an error updating the same data twice.") assert.IsType(t, &ErrOldVersion{}, err, "UpdateMany returned wrong error type") // the whole transaction should have rolled back, so there should be // no entries. var count int query := gormDB.Model(&TUFFile{}).Count(&count) assert.NoError(t, query.Error) assert.Equal(t, 0, count) dbStore.DB.Close() } func TestSQLGetCurrent(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) byt, err := dbStore.GetCurrent("testGUN", "root") assert.Nil(t, byt) assert.Error(t, err, "There should be an error Getting an empty table") assert.IsType(t, &ErrNotFound{}, err, "Should get a not found error") tuf := SampleTUF(0) query := gormDB.Create(&tuf) assert.NoError(t, query.Error, "Creating a row in an empty DB failed.") byt, err = dbStore.GetCurrent("testGUN", "root") assert.NoError(t, err, "There should not be any errors getting.") assert.Equal(t, []byte("1"), byt, "Returned data was incorrect") dbStore.DB.Close() } func TestSQLDelete(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) tuf := SampleTUF(0) query := gormDB.Create(&tuf) assert.NoError(t, query.Error, "Creating a row in an empty DB failed.") err = dbStore.Delete("testGUN") assert.NoError(t, err, "There should not be any errors deleting.") // verify deletion var count int query = gormDB.Model(&TUFFile{}).Count(&count) assert.NoError(t, query.Error) assert.Equal(t, 0, count) dbStore.DB.Close() } func TestSQLGetTimestampKeyNoKey(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) cipher, public, err := dbStore.GetTimestampKey("testGUN") assert.Equal(t, "", cipher) assert.Nil(t, public) assert.IsType(t, &ErrNoKey{}, err, "Expected ErrNoKey from GetTimestampKey") query := gormDB.Create(&TimestampKey{ Gun: "testGUN", Cipher: "testCipher", Public: []byte("1"), }) assert.NoError( t, query.Error, "Inserting timestamp into empty DB should succeed") cipher, public, err = dbStore.GetTimestampKey("testGUN") assert.Equal(t, "testCipher", cipher, "Returned cipher was incorrect") assert.Equal(t, []byte("1"), public, "Returned pubkey was incorrect") } func TestSQLSetTimestampKeyExists(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") gormDB, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) err = dbStore.SetTimestampKey("testGUN", "testCipher", []byte("1")) assert.NoError(t, err, "Inserting timestamp into empty DB should succeed") err = dbStore.SetTimestampKey("testGUN", "testCipher", []byte("1")) assert.Error(t, err) assert.IsType(t, &ErrTimestampKeyExists{}, err, "Expected ErrTimestampKeyExists from SetTimestampKey") var rows []TimestampKey query := gormDB.Select("ID, Gun, Cipher, Public").Find(&rows) assert.NoError(t, query.Error) expected := TimestampKey{Gun: "testGUN", Cipher: "testCipher", Public: []byte("1")} expected.Model = gorm.Model{ID: 1} assert.Equal(t, []TimestampKey{expected}, rows) dbStore.DB.Close() } // TestDBCheckHealthTableMissing asserts that the health check fails if one or // both the tables are missing. func TestDBCheckHealthTableMissing(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") _, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) dbStore.DropTable(&TUFFile{}) dbStore.DropTable(&TimestampKey{}) // No tables, health check fails err = dbStore.CheckHealth() assert.Error(t, err, "Cannot access table:") // only one table existing causes health check to fail CreateTUFTable(dbStore.DB) err = dbStore.CheckHealth() assert.Error(t, err, "Cannot access table:") dbStore.DropTable(&TUFFile{}) CreateTimestampTable(dbStore.DB) err = dbStore.CheckHealth() assert.Error(t, err, "Cannot access table:") } // TestDBCheckHealthDBCOnnection asserts that if the DB is not connectable, the // health check fails. func TestDBCheckHealthDBConnectionFail(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") _, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) err = dbStore.Close() assert.NoError(t, err) err = dbStore.CheckHealth() assert.Error(t, err, "Cannot access table:") } // TestDBCheckHealthSuceeds asserts that if the DB is connectable and both // tables exist, the health check succeeds. func TestDBCheckHealthSucceeds(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") _, dbStore := SetUpSQLite(t, tempBaseDir) defer os.RemoveAll(tempBaseDir) err = dbStore.CheckHealth() assert.NoError(t, err) } notary-0.1/server/storage/errors.go000066400000000000000000000022141262207326400175060ustar00rootroot00000000000000package storage import ( "fmt" ) // ErrOldVersion is returned when a newer version of TUF metadada is already available type ErrOldVersion struct{} // ErrOldVersion is returned when a newer version of TUF metadada is already available func (err ErrOldVersion) Error() string { return fmt.Sprintf("Error updating metadata. A newer version is already available") } // ErrNotFound is returned when TUF metadata isn't found for a specific record type ErrNotFound struct{} // Error implements error func (err ErrNotFound) Error() string { return fmt.Sprintf("No record found") } // ErrTimestampKeyExists is returned when a timestamp key already exists type ErrTimestampKeyExists struct { gun string } // ErrTimestampKeyExists is returned when a timestamp key already exists func (err ErrTimestampKeyExists) Error() string { return fmt.Sprintf("Error, timestamp key already exists for %s", err.gun) } // ErrNoKey is returned when no timestamp key is found type ErrNoKey struct { gun string } // ErrNoKey is returned when no timestamp key is found func (err ErrNoKey) Error() string { return fmt.Sprintf("Error, no timestamp key found for %s", err.gun) } notary-0.1/server/storage/interface.go000066400000000000000000000027241262207326400201400ustar00rootroot00000000000000package storage // MetaStore holds the methods that are used for a Metadata Store type MetaStore interface { // UpdateCurrent adds new metadata version for the given GUN if and only // if it's a new role, or the version is greater than the current version // for the role. Otherwise an error is returned. UpdateCurrent(gun string, update MetaUpdate) error // UpdateMany adds multiple new metadata for the given GUN. It can even // add multiple versions for the same role, so long as those versions are // all unique and greater than any current versions. Otherwise, // none of the metadata is added, and an error is be returned. UpdateMany(gun string, updates []MetaUpdate) error // GetCurrent returns the data part of the metadata for the latest version // of the given GUN and role. If there is no data for the given GUN and // role, an error is returned. GetCurrent(gun, tufRole string) (data []byte, err error) // Delete removes all metadata for a given GUN. It does not return an // error if no metadata exists for the given GUN. Delete(gun string) error // GetTimestampKey returns the algorithm and public key for the given GUN. // If the GUN doesn't exist, returns an error. GetTimestampKey(gun string) (algorithm string, public []byte, err error) // SetTimeStampKey sets the algorithm and public key for the given GUN if // it doesn't already exist. Otherwise an error is returned. SetTimestampKey(gun string, algorithm string, public []byte) error } notary-0.1/server/storage/memory.go000066400000000000000000000050371262207326400175100ustar00rootroot00000000000000package storage import ( "fmt" "strings" "sync" ) type key struct { algorithm string public []byte } type ver struct { version int data []byte } // MemStorage is really just designed for dev and testing. It is very // inefficient in many scenarios type MemStorage struct { lock sync.Mutex tufMeta map[string][]*ver tsKeys map[string]*key } // NewMemStorage instantiates a memStorage instance func NewMemStorage() *MemStorage { return &MemStorage{ tufMeta: make(map[string][]*ver), tsKeys: make(map[string]*key), } } // UpdateCurrent updates the meta data for a specific role func (st *MemStorage) UpdateCurrent(gun string, update MetaUpdate) error { id := entryKey(gun, update.Role) st.lock.Lock() defer st.lock.Unlock() if space, ok := st.tufMeta[id]; ok { for _, v := range space { if v.version >= update.Version { return &ErrOldVersion{} } } } st.tufMeta[id] = append(st.tufMeta[id], &ver{version: update.Version, data: update.Data}) return nil } // UpdateMany updates multiple TUF records func (st *MemStorage) UpdateMany(gun string, updates []MetaUpdate) error { for _, u := range updates { st.UpdateCurrent(gun, u) } return nil } // GetCurrent returns the metadada for a given role, under a GUN func (st *MemStorage) GetCurrent(gun, role string) (data []byte, err error) { id := entryKey(gun, role) st.lock.Lock() defer st.lock.Unlock() space, ok := st.tufMeta[id] if !ok || len(space) == 0 { return nil, &ErrNotFound{} } return space[len(space)-1].data, nil } // Delete delets all the metadata for a given GUN func (st *MemStorage) Delete(gun string) error { st.lock.Lock() defer st.lock.Unlock() for k := range st.tufMeta { if strings.HasPrefix(k, gun) { delete(st.tufMeta, k) } } return nil } // GetTimestampKey returns the public key material of the timestamp key of a given gun func (st *MemStorage) GetTimestampKey(gun string) (algorithm string, public []byte, err error) { // no need for lock. It's ok to return nil if an update // wasn't observed k, ok := st.tsKeys[gun] if !ok { return "", nil, &ErrNoKey{gun: gun} } return k.algorithm, k.public, nil } // SetTimestampKey sets a Timestamp key under a gun func (st *MemStorage) SetTimestampKey(gun string, algorithm string, public []byte) error { k := &key{algorithm: algorithm, public: public} st.lock.Lock() defer st.lock.Unlock() if _, ok := st.tsKeys[gun]; ok { return &ErrTimestampKeyExists{gun: gun} } st.tsKeys[gun] = k return nil } func entryKey(gun, role string) string { return fmt.Sprintf("%s.%s", gun, role) } notary-0.1/server/storage/memory_test.go000066400000000000000000000040511262207326400205420ustar00rootroot00000000000000package storage import ( "testing" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) func TestUpdateCurrent(t *testing.T) { s := NewMemStorage() s.UpdateCurrent("gun", MetaUpdate{"role", 1, []byte("test")}) k := entryKey("gun", "role") gun, ok := s.tufMeta[k] v := gun[0] assert.True(t, ok, "Did not find gun in store") assert.Equal(t, 1, v.version, "Version mismatch. Expected 1, found %d", v.version) assert.Equal(t, []byte("test"), v.data, "Data was incorrect") } func TestGetCurrent(t *testing.T) { s := NewMemStorage() _, err := s.GetCurrent("gun", "role") assert.IsType(t, &ErrNotFound{}, err, "Expected error to be ErrNotFound") s.UpdateCurrent("gun", MetaUpdate{"role", 1, []byte("test")}) d, err := s.GetCurrent("gun", "role") assert.Nil(t, err, "Expected error to be nil") assert.Equal(t, []byte("test"), d, "Data was incorrect") } func TestDelete(t *testing.T) { s := NewMemStorage() s.UpdateCurrent("gun", MetaUpdate{"role", 1, []byte("test")}) s.Delete("gun") k := entryKey("gun", "role") _, ok := s.tufMeta[k] assert.False(t, ok, "Found gun in store, should have been deleted") } func TestGetTimestampKey(t *testing.T) { s := NewMemStorage() //_, _, err := s.GetTimestampKey("gun") //assert.IsType(t, &ErrNoKey{}, err, "Expected err to be ErrNoKey") s.SetTimestampKey("gun", data.RSAKey, []byte("test")) c, k, err := s.GetTimestampKey("gun") assert.Nil(t, err, "Expected error to be nil") assert.Equal(t, data.RSAKey, c, "Expected algorithm rsa, received %s", c) assert.Equal(t, []byte("test"), k, "Key data was wrong") } func TestSetTimestampKey(t *testing.T) { s := NewMemStorage() s.SetTimestampKey("gun", data.RSAKey, []byte("test")) err := s.SetTimestampKey("gun", data.RSAKey, []byte("test2")) assert.IsType(t, &ErrTimestampKeyExists{}, err, "Expected err to be ErrTimestampKeyExists") k := s.tsKeys["gun"] assert.Equal(t, data.RSAKey, k.algorithm, "Expected algorithm to be rsa, received %s", k.algorithm) assert.Equal(t, []byte("test"), k.public, "Public key did not match expected") } notary-0.1/server/storage/models.go000066400000000000000000000027301262207326400174600ustar00rootroot00000000000000package storage import "github.com/jinzhu/gorm" // TUFFile represents a TUF file in the database type TUFFile struct { gorm.Model Gun string `sql:"type:varchar(255);not null"` Role string `sql:"type:varchar(255);not null"` Version int `sql:"not null"` Data []byte `sql:"type:longblob;not null"` } // TableName sets a specific table name for TUFFile func (g TUFFile) TableName() string { return "tuf_files" } // TimestampKey represents a single timestamp key in the database type TimestampKey struct { gorm.Model Gun string `sql:"type:varchar(255);unique;not null"` Cipher string `sql:"type:varchar(30);not null"` Public []byte `sql:"type:blob;not null"` } // TableName sets a specific table name for our TimestampKey func (g TimestampKey) TableName() string { return "timestamp_keys" } // CreateTUFTable creates the DB table for TUFFile func CreateTUFTable(db gorm.DB) error { // TODO: gorm query := db.Set("gorm:table_options", "ENGINE=InnoDB DEFAULT CHARSET=utf8").CreateTable(&TUFFile{}) if query.Error != nil { return query.Error } query = db.Model(&TUFFile{}).AddUniqueIndex( "idx_gun", "gun", "role", "version") if query.Error != nil { return query.Error } return nil } // CreateTimestampTable creates the DB table for TUFFile func CreateTimestampTable(db gorm.DB) error { query := db.Set("gorm:table_options", "ENGINE=InnoDB DEFAULT CHARSET=utf8").CreateTable(&TimestampKey{}) if query.Error != nil { return query.Error } return nil } notary-0.1/server/storage/types.go000066400000000000000000000002361262207326400173400ustar00rootroot00000000000000package storage // MetaUpdate packages up the fields required to update a TUF record type MetaUpdate struct { Role string Version int Data []byte } notary-0.1/server/timestamp/000077500000000000000000000000001262207326400162035ustar00rootroot00000000000000notary-0.1/server/timestamp/timestamp.go000066400000000000000000000114031262207326400205340ustar00rootroot00000000000000package timestamp import ( "bytes" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/jfrazelle/go/canonical/json" "github.com/Sirupsen/logrus" "github.com/docker/notary/server/storage" ) // GetOrCreateTimestampKey returns the timestamp key for the gun. It uses the store to // lookup an existing timestamp key and the crypto to generate a new one if none is // found. It attempts to handle the race condition that may occur if 2 servers try to // create the key at the same time by simply querying the store a second time if it // receives a conflict when writing. func GetOrCreateTimestampKey(gun string, store storage.MetaStore, crypto signed.CryptoService, fallBackAlgorithm string) (data.PublicKey, error) { keyAlgorithm, public, err := store.GetTimestampKey(gun) if err == nil { return data.NewPublicKey(keyAlgorithm, public), nil } if _, ok := err.(*storage.ErrNoKey); ok { key, err := crypto.Create("timestamp", fallBackAlgorithm) if err != nil { return nil, err } logrus.Debug("Creating new timestamp key for ", gun, ". With algo: ", key.Algorithm()) err = store.SetTimestampKey(gun, key.Algorithm(), key.Public()) if err == nil { return key, nil } if _, ok := err.(*storage.ErrTimestampKeyExists); ok { keyAlgorithm, public, err = store.GetTimestampKey(gun) if err != nil { return nil, err } return data.NewPublicKey(keyAlgorithm, public), nil } return nil, err } return nil, err } // GetOrCreateTimestamp returns the current timestamp for the gun. This may mean // a new timestamp is generated either because none exists, or because the current // one has expired. Once generated, the timestamp is saved in the store. func GetOrCreateTimestamp(gun string, store storage.MetaStore, cryptoService signed.CryptoService) ([]byte, error) { snapshot, err := store.GetCurrent(gun, "snapshot") if err != nil { return nil, err } d, err := store.GetCurrent(gun, "timestamp") if err != nil { if _, ok := err.(*storage.ErrNotFound); !ok { logrus.Error("error retrieving timestamp: ", err.Error()) return nil, err } logrus.Debug("No timestamp found, will proceed to create first timestamp") } ts := &data.SignedTimestamp{} if d != nil { err := json.Unmarshal(d, ts) if err != nil { logrus.Error("Failed to unmarshal existing timestamp") return nil, err } if !timestampExpired(ts) && !snapshotExpired(ts, snapshot) { return d, nil } } sgnd, version, err := CreateTimestamp(gun, ts, snapshot, store, cryptoService) if err != nil { logrus.Error("Failed to create a new timestamp") return nil, err } out, err := json.Marshal(sgnd) if err != nil { logrus.Error("Failed to marshal new timestamp") return nil, err } err = store.UpdateCurrent(gun, storage.MetaUpdate{Role: "timestamp", Version: version, Data: out}) if err != nil { return nil, err } return out, nil } // timestampExpired compares the current time to the expiry time of the timestamp func timestampExpired(ts *data.SignedTimestamp) bool { return signed.IsExpired(ts.Signed.Expires) } func snapshotExpired(ts *data.SignedTimestamp, snapshot []byte) bool { meta, err := data.NewFileMeta(bytes.NewReader(snapshot), "sha256") if err != nil { // if we can't generate FileMeta from the current snapshot, we should // continue to serve the old timestamp if it isn't time expired // because we won't be able to generate a new one. return false } hash := meta.Hashes["sha256"] return !bytes.Equal(hash, ts.Signed.Meta["snapshot"].Hashes["sha256"]) } // CreateTimestamp creates a new timestamp. If a prev timestamp is provided, it // is assumed this is the immediately previous one, and the new one will have a // version number one higher than prev. The store is used to lookup the current // snapshot, this function does not save the newly generated timestamp. func CreateTimestamp(gun string, prev *data.SignedTimestamp, snapshot []byte, store storage.MetaStore, cryptoService signed.CryptoService) (*data.Signed, int, error) { algorithm, public, err := store.GetTimestampKey(gun) if err != nil { // owner of gun must have generated a timestamp key otherwise // we won't proceed with generating everything. return nil, 0, err } key := data.NewPublicKey(algorithm, public) sn := &data.Signed{} err = json.Unmarshal(snapshot, sn) if err != nil { // couldn't parse snapshot return nil, 0, err } ts, err := data.NewTimestamp(sn) if err != nil { return nil, 0, err } if prev != nil { ts.Signed.Version = prev.Signed.Version + 1 } sgndTs, err := json.MarshalCanonical(ts.Signed) if err != nil { return nil, 0, err } out := &data.Signed{ Signatures: ts.Signatures, Signed: sgndTs, } err = signed.Sign(cryptoService, out, key) if err != nil { return nil, 0, err } return out, ts.Signed.Version, nil } notary-0.1/server/timestamp/timestamp_test.go000066400000000000000000000053041262207326400215760ustar00rootroot00000000000000package timestamp import ( "encoding/json" "testing" "time" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/stretchr/testify/assert" "github.com/docker/notary/server/storage" ) func TestTimestampExpired(t *testing.T) { ts := &data.SignedTimestamp{ Signatures: nil, Signed: data.Timestamp{ Expires: time.Now().AddDate(-1, 0, 0), }, } assert.True(t, timestampExpired(ts), "Timestamp should have expired") ts = &data.SignedTimestamp{ Signatures: nil, Signed: data.Timestamp{ Expires: time.Now().AddDate(1, 0, 0), }, } assert.False(t, timestampExpired(ts), "Timestamp should NOT have expired") } func TestGetTimestampKey(t *testing.T) { store := storage.NewMemStorage() crypto := signed.NewEd25519() k, err := GetOrCreateTimestampKey("gun", store, crypto, data.ED25519Key) assert.Nil(t, err, "Expected nil error") assert.NotNil(t, k, "Key should not be nil") k2, err := GetOrCreateTimestampKey("gun", store, crypto, data.ED25519Key) assert.Nil(t, err, "Expected nil error") // trying to get the same key again should return the same value assert.Equal(t, k, k2, "Did not receive same key when attempting to recreate.") assert.NotNil(t, k2, "Key should not be nil") } func TestGetTimestamp(t *testing.T) { store := storage.NewMemStorage() crypto := signed.NewEd25519() snapshot := &data.SignedSnapshot{} snapJSON, _ := json.Marshal(snapshot) store.UpdateCurrent("gun", storage.MetaUpdate{Role: "snapshot", Version: 0, Data: snapJSON}) // create a key to be used by GetTimestamp _, err := GetOrCreateTimestampKey("gun", store, crypto, data.ED25519Key) assert.Nil(t, err, "GetTimestampKey errored") _, err = GetOrCreateTimestamp("gun", store, crypto) assert.Nil(t, err, "GetTimestamp errored") } func TestGetTimestampNewSnapshot(t *testing.T) { store := storage.NewMemStorage() crypto := signed.NewEd25519() snapshot := data.SignedSnapshot{} snapshot.Signed.Version = 0 snapJSON, _ := json.Marshal(snapshot) store.UpdateCurrent("gun", storage.MetaUpdate{Role: "snapshot", Version: 0, Data: snapJSON}) // create a key to be used by GetTimestamp _, err := GetOrCreateTimestampKey("gun", store, crypto, data.ED25519Key) assert.Nil(t, err, "GetTimestampKey errored") ts1, err := GetOrCreateTimestamp("gun", store, crypto) assert.Nil(t, err, "GetTimestamp errored") snapshot = data.SignedSnapshot{} snapshot.Signed.Version = 1 snapJSON, _ = json.Marshal(snapshot) store.UpdateCurrent("gun", storage.MetaUpdate{Role: "snapshot", Version: 1, Data: snapJSON}) ts2, err := GetOrCreateTimestamp("gun", store, crypto) assert.Nil(t, err, "GetTimestamp errored") assert.NotEqual(t, ts1, ts2, "Timestamp was not regenerated when snapshot changed") } notary-0.1/signer/000077500000000000000000000000001262207326400141615ustar00rootroot00000000000000notary-0.1/signer/Dockerfile000066400000000000000000000017771262207326400161670ustar00rootroot00000000000000FROM golang:1.5.1 MAINTAINER Diogo Monica "diogo@docker.com" RUN apt-get update && apt-get install -y \ sqlite3 \ && rm -rf /var/lib/apt/lists/* RUN buildDeps=' \ autoconf \ automake \ build-essential \ libtool \ libssl-dev \ libsqlite3-dev \ ' \ && set -x \ && apt-get update && apt-get install -y $buildDeps --no-install-recommends \ && rm -rf /var/lib/apt/lists/* \ && git clone https://github.com/opendnssec/SoftHSMv2.git /usr/src/SoftHSMv2 \ && cd /usr/src/SoftHSMv2 \ && sh autogen.sh \ && ./configure --with-objectstore-backend-db \ && make \ && make install \ && rm -rf /usr/src/SoftHSMv2 \ && apt-get purge -y --auto-remove $buildDeps \ && mkdir -p /softhsm2/tokens # Default locations for the SoftHSM2 configuration and PKCS11 bindings ENV SOFTHSM2_CONF="/etc/softhsm2/softhsm2.conf" ENV LIBDIR="/usr/local/lib/softhsm/" COPY ./softhsm2.conf /etc/softhsm2/softhsm2.conf ENTRYPOINT ["softhsm2-util"] notary-0.1/signer/api/000077500000000000000000000000001262207326400147325ustar00rootroot00000000000000notary-0.1/signer/api/api.go000066400000000000000000000136241262207326400160400ustar00rootroot00000000000000package api import ( "encoding/json" "net/http" "github.com/docker/notary/signer" "github.com/docker/notary/signer/keys" "github.com/docker/notary/tuf/signed" "github.com/gorilla/mux" pb "github.com/docker/notary/proto" ) // Handlers sets up all the handers for the routes, injecting a specific CryptoService object for them to use func Handlers(cryptoServices signer.CryptoServiceIndex) *mux.Router { r := mux.NewRouter() r.Methods("GET").Path("/{ID}").Handler(KeyInfo(cryptoServices)) r.Methods("POST").Path("/new/{Algorithm}").Handler(CreateKey(cryptoServices)) r.Methods("POST").Path("/delete").Handler(DeleteKey(cryptoServices)) r.Methods("POST").Path("/sign").Handler(Sign(cryptoServices)) return r } // getCryptoService handles looking up the correct signing service, given the // algorithm specified in the HTTP request. If the algorithm isn't specified // or isn't supported, an error is returned to the client and this function // returns a nil CryptoService func getCryptoService(w http.ResponseWriter, algorithm string, cryptoServices signer.CryptoServiceIndex) signed.CryptoService { if algorithm == "" { http.Error(w, "algorithm not specified", http.StatusBadRequest) return nil } service := cryptoServices[algorithm] if service == nil { http.Error(w, "algorithm "+algorithm+" not supported", http.StatusBadRequest) return nil } return service } // KeyInfo returns a Handler that given a specific Key ID param, returns the public key bits of that key func KeyInfo(cryptoServices signer.CryptoServiceIndex) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) tufKey, _, err := FindKeyByID(cryptoServices, &pb.KeyID{ID: vars["ID"]}) if err != nil { switch err { // If we received an ErrInvalidKeyID, the key doesn't exist, return 404 case keys.ErrInvalidKeyID: w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) return // If we received anything else, it is unexpected, and we return a 500 default: w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } } key := &pb.PublicKey{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, PublicKey: tufKey.Public(), } json.NewEncoder(w).Encode(key) return }) } // CreateKey returns a handler that generates a new func CreateKey(cryptoServices signer.CryptoServiceIndex) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) cryptoService := getCryptoService(w, vars["Algorithm"], cryptoServices) if cryptoService == nil { // Error handled inside getCryptoService return } tufKey, err := cryptoService.Create("", vars["Algorithm"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } key := &pb.PublicKey{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, PublicKey: tufKey.Public(), } json.NewEncoder(w).Encode(key) return }) } // DeleteKey returns a handler that delete a specific KeyID func DeleteKey(cryptoServices signer.CryptoServiceIndex) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var keyID *pb.KeyID err := json.NewDecoder(r.Body).Decode(&keyID) defer r.Body.Close() if err != nil || keyID.ID == "" { w.WriteHeader(http.StatusBadRequest) jsonErr, _ := json.Marshal("Malformed request") w.Write([]byte(jsonErr)) return } _, cryptoService, err := FindKeyByID(cryptoServices, keyID) if err != nil { switch err { // If we received an ErrInvalidKeyID, the key doesn't exist, return 404 case keys.ErrInvalidKeyID: w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) return // If we received anything else, it is unexpected, and we return a 500 default: w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } } if err = cryptoService.RemoveKey(keyID.ID); err != nil { switch err { // If we received an ErrInvalidKeyID, the key doesn't exist, return 404 case keys.ErrInvalidKeyID: w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) return // If we received anything else, it is unexpected, and we return a 500 default: w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } } // In case we successfully delete this key, return 200 return }) } // Sign returns a handler that is able to perform signatures on a given blob func Sign(cryptoServices signer.CryptoServiceIndex) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var sigRequest *pb.SignatureRequest err := json.NewDecoder(r.Body).Decode(&sigRequest) defer r.Body.Close() if err != nil || sigRequest.Content == nil || sigRequest.KeyID == nil { w.WriteHeader(http.StatusBadRequest) jsonErr, _ := json.Marshal("Malformed request") w.Write([]byte(jsonErr)) return } tufKey, cryptoService, err := FindKeyByID(cryptoServices, sigRequest.KeyID) if err == keys.ErrInvalidKeyID { w.WriteHeader(http.StatusNotFound) w.Write([]byte(err.Error())) return } else if err != nil { // We got an unexpected error w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } signatures, err := cryptoService.Sign([]string{sigRequest.KeyID.ID}, sigRequest.Content) if err != nil || len(signatures) != 1 { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } signature := &pb.Signature{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, Content: signatures[0].Signature, } json.NewEncoder(w).Encode(signature) return }) } notary-0.1/signer/api/api_test.go000066400000000000000000000162571262207326400171040ustar00rootroot00000000000000package api_test import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/signer" "github.com/docker/notary/signer/api" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" pb "github.com/docker/notary/proto" ) var ( server *httptest.Server reader io.Reader deleteKeyBaseURL string createKeyBaseURL string keyInfoBaseURL string signBaseURL string passphraseRetriever = func(string, string, bool, int) (string, bool, error) { return "passphrase", false, nil } ) func setup(cryptoServices signer.CryptoServiceIndex) { server = httptest.NewServer(api.Handlers(cryptoServices)) deleteKeyBaseURL = fmt.Sprintf("%s/delete", server.URL) createKeyBaseURL = fmt.Sprintf("%s/new", server.URL) keyInfoBaseURL = fmt.Sprintf("%s", server.URL) signBaseURL = fmt.Sprintf("%s/sign", server.URL) } func TestDeleteKeyHandlerReturns404WithNonexistentKey(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" keyID := &pb.KeyID{ID: fakeID} requestJson, _ := json.Marshal(keyID) reader = strings.NewReader(string(requestJson)) request, err := http.NewRequest("POST", deleteKeyBaseURL, reader) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 404, res.StatusCode) } func TestDeleteKeyHandler(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) tufKey, _ := cryptoService.Create("", data.ED25519Key) assert.NotNil(t, tufKey) requestJson, _ := json.Marshal(&pb.KeyID{ID: tufKey.ID()}) reader = strings.NewReader(string(requestJson)) request, err := http.NewRequest("POST", deleteKeyBaseURL, reader) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 200, res.StatusCode) } func TestKeyInfoHandler(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) tufKey, _ := cryptoService.Create("", data.ED25519Key) assert.NotNil(t, tufKey) keyInfoURL := fmt.Sprintf("%s/%s", keyInfoBaseURL, tufKey.ID()) request, err := http.NewRequest("GET", keyInfoURL, nil) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) jsonBlob, err := ioutil.ReadAll(res.Body) assert.Nil(t, err) var pubKey *pb.PublicKey err = json.Unmarshal(jsonBlob, &pubKey) assert.Nil(t, err) assert.Equal(t, tufKey.ID(), pubKey.KeyInfo.KeyID.ID) assert.Equal(t, 200, res.StatusCode) } func TestKeyInfoHandlerReturns404WithNonexistentKey(t *testing.T) { // We associate both key types with this signing service to bypass the // ID -> keyType logic in the tests keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" keyInfoURL := fmt.Sprintf("%s/%s", keyInfoBaseURL, fakeID) request, err := http.NewRequest("GET", keyInfoURL, nil) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 404, res.StatusCode) } func TestSoftwareCreateKeyHandler(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) createKeyURL := fmt.Sprintf("%s/%s", createKeyBaseURL, data.ED25519Key) request, err := http.NewRequest("POST", createKeyURL, nil) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 200, res.StatusCode) jsonBlob, err := ioutil.ReadAll(res.Body) assert.Nil(t, err) var keyInfo *pb.PublicKey err = json.Unmarshal(jsonBlob, &keyInfo) assert.Nil(t, err) } func TestSoftwareSignHandler(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) tufKey, err := cryptoService.Create("", data.ED25519Key) assert.Nil(t, err) sigRequest := &pb.SignatureRequest{KeyID: &pb.KeyID{ID: tufKey.ID()}, Content: make([]byte, 10)} requestJson, _ := json.Marshal(sigRequest) reader = strings.NewReader(string(requestJson)) request, err := http.NewRequest("POST", signBaseURL, reader) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 200, res.StatusCode) jsonBlob, err := ioutil.ReadAll(res.Body) assert.Nil(t, err) var sig *pb.Signature err = json.Unmarshal(jsonBlob, &sig) assert.Nil(t, err) assert.Equal(t, tufKey.ID(), sig.KeyInfo.KeyID.ID) } func TestSoftwareSignWithInvalidRequestHandler(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) requestJson := "{\"blob\":\"7d16f1d0b95310a7bc557747fc4f20fcd41c1c5095ae42f189df0717e7d7f4a0a2b55debce630f43c4ac099769c612965e3fda3cd4c0078ee6a460f14fa19307\"}" reader = strings.NewReader(requestJson) request, err := http.NewRequest("POST", signBaseURL, reader) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) jsonBlob, err := ioutil.ReadAll(res.Body) assert.Nil(t, err) var sig *pb.Signature err = json.Unmarshal(jsonBlob, &sig) assert.Equal(t, 400, res.StatusCode) } func TestSignHandlerReturns404WithNonexistentKey(t *testing.T) { keyStore := trustmanager.NewKeyMemoryStore(passphraseRetriever) cryptoService := cryptoservice.NewCryptoService("", keyStore) setup(signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService}) fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" cryptoService.Create("", data.ED25519Key) sigRequest := &pb.SignatureRequest{KeyID: &pb.KeyID{ID: fakeID}, Content: make([]byte, 10)} requestJson, _ := json.Marshal(sigRequest) reader = strings.NewReader(string(requestJson)) request, err := http.NewRequest("POST", signBaseURL, reader) assert.Nil(t, err) res, err := http.DefaultClient.Do(request) assert.Nil(t, err) assert.Equal(t, 404, res.StatusCode) } notary-0.1/signer/api/find_key.go000066400000000000000000000015431262207326400170540ustar00rootroot00000000000000package api import ( "github.com/docker/notary/signer" "github.com/docker/notary/signer/keys" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" pb "github.com/docker/notary/proto" ) // FindKeyByID looks for the key with the given ID in each of the // signing services in sigServices. It returns the first matching key it finds, // or ErrInvalidKeyID if the key is not found in any of the signing services. // It also returns the CryptoService associated with the key, so the caller // can perform operations with the key (such as signing). func FindKeyByID(cryptoServices signer.CryptoServiceIndex, keyID *pb.KeyID) (data.PublicKey, signed.CryptoService, error) { for _, service := range cryptoServices { key := service.GetKey(keyID.ID) if key != nil { return key, service, nil } } return nil, nil, keys.ErrInvalidKeyID } notary-0.1/signer/api/rpc_api.go000066400000000000000000000114231262207326400166770ustar00rootroot00000000000000package api import ( "fmt" ctxu "github.com/docker/distribution/context" "github.com/docker/notary/signer" "github.com/docker/notary/signer/keys" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "github.com/docker/notary/proto" ) //KeyManagementServer implements the KeyManagementServer grpc interface type KeyManagementServer struct { CryptoServices signer.CryptoServiceIndex HealthChecker func() map[string]string } //SignerServer implements the SignerServer grpc interface type SignerServer struct { CryptoServices signer.CryptoServiceIndex HealthChecker func() map[string]string } //CreateKey returns a PublicKey created using KeyManagementServer's SigningService func (s *KeyManagementServer) CreateKey(ctx context.Context, algorithm *pb.Algorithm) (*pb.PublicKey, error) { keyAlgo := algorithm.Algorithm service := s.CryptoServices[keyAlgo] logger := ctxu.GetLogger(ctx) if service == nil { logger.Error("CreateKey: unsupported algorithm: ", algorithm.Algorithm) return nil, fmt.Errorf("algorithm %s not supported for create key", algorithm.Algorithm) } tufKey, err := service.Create("", keyAlgo) if err != nil { logger.Error("CreateKey: failed to create key: ", err) return nil, grpc.Errorf(codes.Internal, "Key creation failed") } logger.Info("CreateKey: Created KeyID ", tufKey.ID()) return &pb.PublicKey{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, PublicKey: tufKey.Public(), }, nil } //DeleteKey deletes they key associated with a KeyID func (s *KeyManagementServer) DeleteKey(ctx context.Context, keyID *pb.KeyID) (*pb.Void, error) { _, service, err := FindKeyByID(s.CryptoServices, keyID) logger := ctxu.GetLogger(ctx) if err != nil { logger.Errorf("DeleteKey: key %s not found", keyID.ID) return nil, grpc.Errorf(codes.NotFound, "key %s not found", keyID.ID) } err = service.RemoveKey(keyID.ID) logger.Info("DeleteKey: Deleted KeyID ", keyID.ID) if err != nil { switch err { case keys.ErrInvalidKeyID: logger.Errorf("DeleteKey: key %s not found", keyID.ID) return nil, grpc.Errorf(codes.NotFound, "key %s not found", keyID.ID) default: logger.Error("DeleteKey: deleted key ", keyID.ID) return nil, grpc.Errorf(codes.Internal, "Key deletion for KeyID %s failed", keyID.ID) } } return &pb.Void{}, nil } //GetKeyInfo returns they PublicKey associated with a KeyID func (s *KeyManagementServer) GetKeyInfo(ctx context.Context, keyID *pb.KeyID) (*pb.PublicKey, error) { _, service, err := FindKeyByID(s.CryptoServices, keyID) logger := ctxu.GetLogger(ctx) if err != nil { logger.Errorf("GetKeyInfo: key %s not found", keyID.ID) return nil, grpc.Errorf(codes.NotFound, "key %s not found", keyID.ID) } tufKey := service.GetKey(keyID.ID) if tufKey == nil { logger.Errorf("GetKeyInfo: key %s not found", keyID.ID) return nil, grpc.Errorf(codes.NotFound, "key %s not found", keyID.ID) } logger.Debug("GetKeyInfo: Returning PublicKey for KeyID ", keyID.ID) return &pb.PublicKey{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, PublicKey: tufKey.Public(), }, nil } //CheckHealth returns the HealthStatus with the service func (s *KeyManagementServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) { return &pb.HealthStatus{ Status: s.HealthChecker(), }, nil } //Sign signs a message and returns the signature using a private key associate with the KeyID from the SignatureRequest func (s *SignerServer) Sign(ctx context.Context, sr *pb.SignatureRequest) (*pb.Signature, error) { tufKey, service, err := FindKeyByID(s.CryptoServices, sr.KeyID) logger := ctxu.GetLogger(ctx) if err != nil { logger.Errorf("Sign: key %s not found", sr.KeyID.ID) return nil, grpc.Errorf(codes.NotFound, "key %s not found", sr.KeyID.ID) } signatures, err := service.Sign([]string{sr.KeyID.ID}, sr.Content) if err != nil || len(signatures) != 1 { logger.Errorf("Sign: signing failed for KeyID %s on hash %s", sr.KeyID.ID, sr.Content) return nil, grpc.Errorf(codes.Internal, "Signing failed for KeyID %s on hash %s", sr.KeyID.ID, sr.Content) } logger.Info("Sign: Signed ", string(sr.Content), " with KeyID ", sr.KeyID.ID) signature := &pb.Signature{ KeyInfo: &pb.KeyInfo{ KeyID: &pb.KeyID{ID: tufKey.ID()}, Algorithm: &pb.Algorithm{Algorithm: tufKey.Algorithm()}, }, Algorithm: &pb.Algorithm{Algorithm: signatures[0].Method.String()}, Content: signatures[0].Signature, } return signature, nil } //CheckHealth returns the HealthStatus with the service func (s *SignerServer) CheckHealth(ctx context.Context, v *pb.Void) (*pb.HealthStatus, error) { return &pb.HealthStatus{ Status: s.HealthChecker(), }, nil } notary-0.1/signer/api/rpc_api_test.go000066400000000000000000000124131262207326400177360ustar00rootroot00000000000000package api_test import ( "fmt" "log" "net" "testing" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/passphrase" "github.com/docker/notary/signer" "github.com/docker/notary/signer/api" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" pb "github.com/docker/notary/proto" ) var ( kmClient pb.KeyManagementClient sClient pb.SignerClient grpcServer *grpc.Server void *pb.Void pr passphrase.Retriever health = map[string]string{ "db": "ok", "other": "not ok", } ) func init() { pr = func(string, string, bool, int) (string, bool, error) { return "passphrase", false, nil } keyStore := trustmanager.NewKeyMemoryStore(pr) cryptoService := cryptoservice.NewCryptoService("", keyStore) cryptoServices := signer.CryptoServiceIndex{data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService} void = &pb.Void{} fakeHealth := func() map[string]string { return health } //server setup kms := &api.KeyManagementServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth} ss := &api.SignerServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth} grpcServer = grpc.NewServer() pb.RegisterKeyManagementServer(grpcServer, kms) pb.RegisterSignerServer(grpcServer, ss) lis, err := net.Listen("tcp", "127.0.0.1:7899") if err != nil { log.Fatalf("failed to listen %v", err) } go grpcServer.Serve(lis) //client setup conn, err := grpc.Dial("127.0.0.1:7899", grpc.WithInsecure()) if err != nil { log.Fatalf("fail to dial: %v", err) } kmClient = pb.NewKeyManagementClient(conn) sClient = pb.NewSignerClient(conn) } func TestDeleteKeyHandlerReturnsNotFoundWithNonexistentKey(t *testing.T) { fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" keyID := &pb.KeyID{ID: fakeID} ret, err := kmClient.DeleteKey(context.Background(), keyID) assert.NotNil(t, err) assert.Equal(t, grpc.Code(err), codes.NotFound) assert.Nil(t, ret) } func TestCreateKeyHandlerCreatesKey(t *testing.T) { publicKey, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) assert.NotNil(t, publicKey) assert.NotEmpty(t, publicKey.PublicKey) assert.NotEmpty(t, publicKey.KeyInfo) assert.Nil(t, err) assert.Equal(t, grpc.Code(err), codes.OK) } func TestDeleteKeyHandlerDeletesCreatedKey(t *testing.T) { publicKey, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) ret, err := kmClient.DeleteKey(context.Background(), publicKey.KeyInfo.KeyID) assert.Nil(t, err) assert.Equal(t, ret, void) } func TestKeyInfoReturnsCreatedKeys(t *testing.T) { publicKey, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) fmt.Println("Pubkey ID: " + publicKey.GetKeyInfo().KeyID.ID) returnedPublicKey, err := kmClient.GetKeyInfo(context.Background(), publicKey.KeyInfo.KeyID) fmt.Println("returnedPublicKey ID: " + returnedPublicKey.GetKeyInfo().KeyID.ID) assert.Nil(t, err) assert.Equal(t, publicKey.KeyInfo, returnedPublicKey.KeyInfo) assert.Equal(t, publicKey.PublicKey, returnedPublicKey.PublicKey) } func TestCreateKeyCreatesNewKeys(t *testing.T) { publicKey1, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) assert.Nil(t, err) publicKey2, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) assert.Nil(t, err) assert.NotEqual(t, publicKey1, publicKey2) assert.NotEqual(t, publicKey1.KeyInfo, publicKey2.KeyInfo) assert.NotEqual(t, publicKey1.PublicKey, publicKey2.PublicKey) } func TestGetKeyInfoReturnsNotFoundOnNonexistKeys(t *testing.T) { fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" keyID := &pb.KeyID{ID: fakeID} ret, err := kmClient.GetKeyInfo(context.Background(), keyID) assert.NotNil(t, err) assert.Equal(t, grpc.Code(err), codes.NotFound) assert.Nil(t, ret) } func TestCreatedKeysCanBeUsedToSign(t *testing.T) { message := []byte{0, 0, 0, 0} publicKey, err := kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: data.ED25519Key}) assert.Nil(t, err) assert.NotNil(t, publicKey) sr := &pb.SignatureRequest{Content: message, KeyID: publicKey.KeyInfo.KeyID} assert.NotNil(t, sr) signature, err := sClient.Sign(context.Background(), sr) assert.Nil(t, err) assert.NotNil(t, signature) assert.NotEmpty(t, signature.Content) assert.Equal(t, publicKey.KeyInfo, signature.KeyInfo) } func TestSignReturnsNotFoundOnNonexistKeys(t *testing.T) { fakeID := "c62e6d68851cef1f7e55a9d56e3b0c05f3359f16838cad43600f0554e7d3b54d" keyID := &pb.KeyID{ID: fakeID} message := []byte{0, 0, 0, 0} sr := &pb.SignatureRequest{Content: message, KeyID: keyID} ret, err := sClient.Sign(context.Background(), sr) assert.NotNil(t, err) assert.Equal(t, grpc.Code(err), codes.NotFound) assert.Nil(t, ret) } func TestHealthChecksForServices(t *testing.T) { sHealthStatus, err := sClient.CheckHealth(context.Background(), void) assert.Nil(t, err) assert.Equal(t, health, sHealthStatus.Status) kmHealthStatus, err := kmClient.CheckHealth(context.Background(), void) assert.Nil(t, err) assert.Equal(t, health, kmHealthStatus.Status) } notary-0.1/signer/client/000077500000000000000000000000001262207326400154375ustar00rootroot00000000000000notary-0.1/signer/client/signer_trust.go000066400000000000000000000145431262207326400205250ustar00rootroot00000000000000// A CryptoService client wrapper around a remote wrapper service. package client import ( "crypto" "crypto/tls" "crypto/x509" "errors" "fmt" "io" "net" "time" "github.com/Sirupsen/logrus" pb "github.com/docker/notary/proto" "github.com/docker/notary/tuf/data" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" ) // The only thing needed from grpc.ClientConn is it's state. type checkableConnectionState interface { State() grpc.ConnectivityState } // RemotePrivateKey is a key that is on a remote service, so no private // key bytes are available type RemotePrivateKey struct { data.PublicKey sClient pb.SignerClient } // RemoteSigner wraps a RemotePrivateKey and implements the crypto.Signer // interface type RemoteSigner struct { RemotePrivateKey } // Public method of a crypto.Signer needs to return a crypto public key. func (rs *RemoteSigner) Public() crypto.PublicKey { publicKey, err := x509.ParsePKIXPublicKey(rs.RemotePrivateKey.Public()) if err != nil { return nil } return publicKey } // NewRemotePrivateKey returns RemotePrivateKey, a data.PrivateKey that is only // good for signing. (You can't get the private bytes out for instance.) func NewRemotePrivateKey(pubKey data.PublicKey, sClient pb.SignerClient) *RemotePrivateKey { return &RemotePrivateKey{ PublicKey: pubKey, sClient: sClient, } } // Private returns nil bytes func (pk *RemotePrivateKey) Private() []byte { return nil } // Sign calls a remote service to sign a message. func (pk *RemotePrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { keyID := pb.KeyID{ID: pk.ID()} sr := &pb.SignatureRequest{ Content: msg, KeyID: &keyID, } sig, err := pk.sClient.Sign(context.Background(), sr) if err != nil { return nil, err } return sig.Content, nil } // SignatureAlgorithm returns the signing algorithm based on the type of // PublicKey algorithm. func (pk *RemotePrivateKey) SignatureAlgorithm() data.SigAlgorithm { switch pk.PublicKey.Algorithm() { case data.ECDSAKey, data.ECDSAx509Key: return data.ECDSASignature case data.RSAKey, data.RSAx509Key: return data.RSAPSSSignature case data.ED25519Key: return data.EDDSASignature default: // unknown return "" } } // CryptoSigner returns a crypto.Signer tha wraps the RemotePrivateKey. Needed // for implementing the interface. func (pk *RemotePrivateKey) CryptoSigner() crypto.Signer { return &RemoteSigner{RemotePrivateKey: *pk} } // NotarySigner implements a RPC based Trust service that calls the Notary-signer Service type NotarySigner struct { kmClient pb.KeyManagementClient sClient pb.SignerClient clientConn checkableConnectionState } // NewNotarySigner is a convinience method that returns NotarySigner func NewNotarySigner(hostname string, port string, tlsConfig *tls.Config) *NotarySigner { var opts []grpc.DialOption netAddr := net.JoinHostPort(hostname, port) creds := credentials.NewTLS(tlsConfig) opts = append(opts, grpc.WithTransportCredentials(creds)) conn, err := grpc.Dial(netAddr, opts...) if err != nil { logrus.Fatal("fail to dial: ", err) } kmClient := pb.NewKeyManagementClient(conn) sClient := pb.NewSignerClient(conn) return &NotarySigner{ kmClient: kmClient, sClient: sClient, clientConn: conn, } } // Sign signs a byte string with a number of KeyIDs func (trust *NotarySigner) Sign(keyIDs []string, toSign []byte) ([]data.Signature, error) { signatures := make([]data.Signature, 0, len(keyIDs)) for _, ID := range keyIDs { keyID := pb.KeyID{ID: ID} sr := &pb.SignatureRequest{ Content: toSign, KeyID: &keyID, } sig, err := trust.sClient.Sign(context.Background(), sr) if err != nil { return nil, err } signatures = append(signatures, data.Signature{ KeyID: sig.KeyInfo.KeyID.ID, Method: data.SigAlgorithm(sig.Algorithm.Algorithm), Signature: sig.Content, }) } return signatures, nil } // Create creates a remote key and returns the PublicKey associated with the remote private key func (trust *NotarySigner) Create(role, algorithm string) (data.PublicKey, error) { publicKey, err := trust.kmClient.CreateKey(context.Background(), &pb.Algorithm{Algorithm: algorithm}) if err != nil { return nil, err } public := data.NewPublicKey(publicKey.KeyInfo.Algorithm.Algorithm, publicKey.PublicKey) return public, nil } // RemoveKey deletes a key func (trust *NotarySigner) RemoveKey(keyid string) error { _, err := trust.kmClient.DeleteKey(context.Background(), &pb.KeyID{ID: keyid}) return err } // GetKey retrieves a key func (trust *NotarySigner) GetKey(keyid string) data.PublicKey { publicKey, err := trust.kmClient.GetKeyInfo(context.Background(), &pb.KeyID{ID: keyid}) if err != nil { return nil } return data.NewPublicKey(publicKey.KeyInfo.Algorithm.Algorithm, publicKey.PublicKey) } // GetPrivateKey errors in all cases func (trust *NotarySigner) GetPrivateKey(keyid string) (data.PrivateKey, string, error) { pubKey := trust.GetKey(keyid) if pubKey == nil { return nil, "", nil } return NewRemotePrivateKey(pubKey, trust.sClient), "", nil } // ListKeys not supported for NotarySigner func (trust *NotarySigner) ListKeys(role string) []string { return []string{} } // ListAllKeys not supported for NotarySigner func (trust *NotarySigner) ListAllKeys() map[string]string { return map[string]string{} } // CheckHealth checks the health of one of the clients, since both clients run // from the same GRPC server. func (trust *NotarySigner) CheckHealth(timeout time.Duration) error { // Do not bother starting checking at all if the connection is broken. if trust.clientConn.State() != grpc.Idle && trust.clientConn.State() != grpc.Ready { return fmt.Errorf("Not currently connected to trust server.") } ctx, cancel := context.WithTimeout(context.Background(), timeout) status, err := trust.kmClient.CheckHealth(ctx, &pb.Void{}) defer cancel() if err == nil && len(status.Status) > 0 { return fmt.Errorf("Trust is not healthy") } else if err != nil && grpc.Code(err) == codes.DeadlineExceeded { return fmt.Errorf( "Timed out reaching trust service after %s.", timeout) } return err } // ImportRootKey satisfies the CryptoService interface. It should not be implemented // for a NotarySigner. func (trust *NotarySigner) ImportRootKey(r io.Reader) error { return errors.New("Importing a root key to NotarySigner is not supported") } notary-0.1/signer/client/signer_trust_test.go000066400000000000000000000130321262207326400215540ustar00rootroot00000000000000package client import ( "crypto/rand" "errors" "strings" "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "github.com/docker/notary/cryptoservice" "github.com/docker/notary/passphrase" pb "github.com/docker/notary/proto" "github.com/docker/notary/signer" "github.com/docker/notary/signer/api" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) type rpcHealthCheck func( context.Context, *pb.Void, ...grpc.CallOption) (*pb.HealthStatus, error) type StubKeyManagementClient struct { pb.KeyManagementClient healthCheck rpcHealthCheck } func (c StubKeyManagementClient) CheckHealth(x context.Context, v *pb.Void, o ...grpc.CallOption) (*pb.HealthStatus, error) { return c.healthCheck(x, v, o...) } type StubGRPCConnection struct { fakeConnStatus grpc.ConnectivityState } func (c StubGRPCConnection) State() grpc.ConnectivityState { return c.fakeConnStatus } func stubHealthFunction(t *testing.T, status map[string]string, err error) rpcHealthCheck { return func(ctx context.Context, v *pb.Void, o ...grpc.CallOption) (*pb.HealthStatus, error) { _, withDeadline := ctx.Deadline() assert.True(t, withDeadline) return &pb.HealthStatus{Status: status}, err } } func makeSigner(kmFunc rpcHealthCheck, conn StubGRPCConnection) NotarySigner { return NotarySigner{ StubKeyManagementClient{ pb.NewKeyManagementClient(nil), kmFunc, }, pb.NewSignerClient(nil), conn, } } // CheckHealth does not succeed if the KM server is unhealthy func TestHealthCheckKMUnhealthy(t *testing.T) { signer := makeSigner( stubHealthFunction(t, map[string]string{"health": "not good"}, nil), StubGRPCConnection{}) assert.Error(t, signer.CheckHealth(1*time.Second)) } // CheckHealth does not succeed if the health check to the KM server errors func TestHealthCheckKMError(t *testing.T) { signer := makeSigner( stubHealthFunction(t, nil, errors.New("Something's wrong")), StubGRPCConnection{}) assert.Error(t, signer.CheckHealth(1*time.Second)) } // CheckHealth does not succeed if the health check to the KM server times out func TestHealthCheckKMTimeout(t *testing.T) { signer := makeSigner( stubHealthFunction(t, nil, grpc.Errorf(codes.DeadlineExceeded, "")), StubGRPCConnection{}) err := signer.CheckHealth(1 * time.Second) assert.Error(t, err) assert.True(t, strings.Contains(err.Error(), "Timed out")) } // CheckHealth succeeds if KM is healthy and reachable. func TestHealthCheckKMHealthy(t *testing.T) { signer := makeSigner( stubHealthFunction(t, make(map[string]string), nil), StubGRPCConnection{}) assert.NoError(t, signer.CheckHealth(1*time.Second)) } // CheckHealth fails immediately if not connected to the server. func TestHealthCheckConnectionDied(t *testing.T) { signer := makeSigner( stubHealthFunction(t, make(map[string]string), nil), StubGRPCConnection{grpc.Connecting}) assert.Error(t, signer.CheckHealth(1*time.Second)) } var ret = passphrase.ConstantRetriever("pass") func TestGetPrivateKeyIfNoKey(t *testing.T) { signer := setUpSigner(t, trustmanager.NewKeyMemoryStore(ret)) privKey, _, err := signer.GetPrivateKey("bogus key ID") assert.NoError(t, err) assert.Nil(t, privKey) } func TestGetPrivateKeyAndSignWithExistingKey(t *testing.T) { key, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate key") store := trustmanager.NewKeyMemoryStore(ret) err = store.AddKey(key.ID(), "timestamp", key) assert.NoError(t, err, "could not add key to store") signer := setUpSigner(t, store) privKey, _, err := signer.GetPrivateKey(key.ID()) assert.NoError(t, err) assert.NotNil(t, privKey) msg := []byte("message!") sig, err := privKey.Sign(rand.Reader, msg, nil) assert.NoError(t, err) err = signed.Verifiers[data.ECDSASignature].Verify( data.PublicKeyFromPrivate(key), sig, msg) assert.NoError(t, err) } type StubClientFromServers struct { api.KeyManagementServer api.SignerServer } func (c *StubClientFromServers) CreateKey(ctx context.Context, algorithm *pb.Algorithm, _ ...grpc.CallOption) (*pb.PublicKey, error) { return c.KeyManagementServer.CreateKey(ctx, algorithm) } func (c *StubClientFromServers) DeleteKey(ctx context.Context, keyID *pb.KeyID, _ ...grpc.CallOption) (*pb.Void, error) { return c.KeyManagementServer.DeleteKey(ctx, keyID) } func (c *StubClientFromServers) GetKeyInfo(ctx context.Context, keyID *pb.KeyID, _ ...grpc.CallOption) (*pb.PublicKey, error) { return c.KeyManagementServer.GetKeyInfo(ctx, keyID) } func (c *StubClientFromServers) Sign(ctx context.Context, sr *pb.SignatureRequest, _ ...grpc.CallOption) (*pb.Signature, error) { return c.SignerServer.Sign(ctx, sr) } func (c *StubClientFromServers) CheckHealth(ctx context.Context, v *pb.Void, _ ...grpc.CallOption) (*pb.HealthStatus, error) { return c.KeyManagementServer.CheckHealth(ctx, v) } func setUpSigner(t *testing.T, store trustmanager.KeyStore) NotarySigner { cryptoService := cryptoservice.NewCryptoService("", store) cryptoServices := signer.CryptoServiceIndex{ data.ED25519Key: cryptoService, data.RSAKey: cryptoService, data.ECDSAKey: cryptoService, } fakeHealth := func() map[string]string { return map[string]string{} } client := StubClientFromServers{ KeyManagementServer: api.KeyManagementServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth}, SignerServer: api.SignerServer{CryptoServices: cryptoServices, HealthChecker: fakeHealth}, } return NotarySigner{kmClient: &client, sClient: &client} } notary-0.1/signer/keydbstore/000077500000000000000000000000001262207326400163345ustar00rootroot00000000000000notary-0.1/signer/keydbstore/keydbstore.go000066400000000000000000000146621262207326400210470ustar00rootroot00000000000000package keydbstore import ( "database/sql" "errors" "fmt" "sync" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" jose "github.com/dvsekhvalnov/jose2go" "github.com/jinzhu/gorm" ) // Constants const ( EncryptionAlg = jose.A256GCM KeywrapAlg = jose.PBES2_HS256_A128KW ) // KeyDBStore persists and manages private keys on a SQL database type KeyDBStore struct { sync.Mutex db gorm.DB defaultPassAlias string retriever passphrase.Retriever cachedKeys map[string]data.PrivateKey } // GormPrivateKey represents a PrivateKey in the database type GormPrivateKey struct { gorm.Model KeyID string `sql:"not null;unique;index:key_id_idx"` EncryptionAlg string `sql:"not null"` KeywrapAlg string `sql:"not null"` Algorithm string `sql:"not null"` PassphraseAlias string `sql:"not null"` Public string `sql:"not null"` Private string `sql:"not null"` } // TableName sets a specific table name for our GormPrivateKey func (g GormPrivateKey) TableName() string { return "private_keys" } // NewKeyDBStore returns a new KeyDBStore backed by a SQL database func NewKeyDBStore(passphraseRetriever passphrase.Retriever, defaultPassAlias, dbType string, dbSQL *sql.DB) (*KeyDBStore, error) { cachedKeys := make(map[string]data.PrivateKey) // Open a connection to our database db, _ := gorm.Open(dbType, dbSQL) return &KeyDBStore{db: db, defaultPassAlias: defaultPassAlias, retriever: passphraseRetriever, cachedKeys: cachedKeys}, nil } // Name returns a user friendly name for the storage location func (s *KeyDBStore) Name() string { return "database" } // AddKey stores the contents of a private key. Both name and alias are ignored, // we always use Key IDs as name, and don't support aliases func (s *KeyDBStore) AddKey(name, alias string, privKey data.PrivateKey) error { passphrase, _, err := s.retriever(privKey.ID(), s.defaultPassAlias, false, 1) if err != nil { return err } encryptedKey, err := jose.Encrypt(string(privKey.Private()), KeywrapAlg, EncryptionAlg, passphrase) if err != nil { return err } gormPrivKey := GormPrivateKey{ KeyID: privKey.ID(), EncryptionAlg: EncryptionAlg, KeywrapAlg: KeywrapAlg, PassphraseAlias: s.defaultPassAlias, Algorithm: privKey.Algorithm(), Public: string(privKey.Public()), Private: encryptedKey} // Add encrypted private key to the database s.db.Create(&gormPrivKey) // Value will be false if Create suceeds failure := s.db.NewRecord(gormPrivKey) if failure { return fmt.Errorf("failed to add private key to database: %s", privKey.ID()) } // Add the private key to our cache s.Lock() defer s.Unlock() s.cachedKeys[privKey.ID()] = privKey return nil } // GetKey returns the PrivateKey given a KeyID func (s *KeyDBStore) GetKey(name string) (data.PrivateKey, string, error) { s.Lock() defer s.Unlock() cachedKeyEntry, ok := s.cachedKeys[name] if ok { return cachedKeyEntry, "", nil } // Retrieve the GORM private key from the database dbPrivateKey := GormPrivateKey{} if s.db.Where(&GormPrivateKey{KeyID: name}).First(&dbPrivateKey).RecordNotFound() { return nil, "", trustmanager.ErrKeyNotFound{} } // Get the passphrase to use for this key passphrase, _, err := s.retriever(dbPrivateKey.KeyID, dbPrivateKey.PassphraseAlias, false, 1) if err != nil { return nil, "", err } // Decrypt private bytes from the gorm key decryptedPrivKey, _, err := jose.Decode(dbPrivateKey.Private, passphrase) if err != nil { return nil, "", err } pubKey := data.NewPublicKey(dbPrivateKey.Algorithm, []byte(dbPrivateKey.Public)) // Create a new PrivateKey with unencrypted bytes privKey, err := data.NewPrivateKey(pubKey, []byte(decryptedPrivKey)) if err != nil { return nil, "", err } // Add the key to cache s.cachedKeys[privKey.ID()] = privKey return privKey, "", nil } // ListKeys always returns nil. This method is here to satisfy the KeyStore interface func (s *KeyDBStore) ListKeys() map[string]string { return nil } // RemoveKey removes the key from the keyfilestore func (s *KeyDBStore) RemoveKey(name string) error { s.Lock() defer s.Unlock() delete(s.cachedKeys, name) // Retrieve the GORM private key from the database dbPrivateKey := GormPrivateKey{} if s.db.Where(&GormPrivateKey{KeyID: name}).First(&dbPrivateKey).RecordNotFound() { return trustmanager.ErrKeyNotFound{} } // Delete the key from the database s.db.Delete(&dbPrivateKey) return nil } // RotateKeyPassphrase rotates the key-encryption-key func (s *KeyDBStore) RotateKeyPassphrase(name, newPassphraseAlias string) error { // Retrieve the GORM private key from the database dbPrivateKey := GormPrivateKey{} if s.db.Where(&GormPrivateKey{KeyID: name}).First(&dbPrivateKey).RecordNotFound() { return trustmanager.ErrKeyNotFound{} } // Get the current passphrase to use for this key passphrase, _, err := s.retriever(dbPrivateKey.KeyID, dbPrivateKey.PassphraseAlias, false, 1) if err != nil { return err } // Decrypt private bytes from the gorm key decryptedPrivKey, _, err := jose.Decode(dbPrivateKey.Private, passphrase) if err != nil { return err } // Get the new passphrase to use for this key newPassphrase, _, err := s.retriever(dbPrivateKey.KeyID, newPassphraseAlias, false, 1) if err != nil { return err } // Re-encrypt the private bytes with the new passphrase newEncryptedKey, err := jose.Encrypt(decryptedPrivKey, KeywrapAlg, EncryptionAlg, newPassphrase) if err != nil { return err } // Update the database object dbPrivateKey.Private = newEncryptedKey dbPrivateKey.PassphraseAlias = newPassphraseAlias s.db.Save(dbPrivateKey) return nil } // ExportKey is currently unimplemented and will always return an error func (s *KeyDBStore) ExportKey(name string) ([]byte, error) { return nil, errors.New("Exporting from a KeyDBStore is not supported.") } // ImportKey is currently unimplemented and will always return an error func (s *KeyDBStore) ImportKey(pemBytes []byte, alias string) error { return errors.New("Importing into a KeyDBStore is not supported") } // HealthCheck verifies that DB exists and is query-able func (s *KeyDBStore) HealthCheck() error { dbPrivateKey := GormPrivateKey{} tableOk := s.db.HasTable(&dbPrivateKey) switch { case s.db.Error != nil: return s.db.Error case !tableOk: return fmt.Errorf( "Cannot access table: %s", dbPrivateKey.TableName()) } return nil } notary-0.1/signer/keydbstore/keydbstore_test.go000066400000000000000000000123241262207326400220770ustar00rootroot00000000000000package keydbstore import ( "crypto/rand" "database/sql" "errors" "io/ioutil" "os" "testing" "github.com/docker/notary/trustmanager" _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/assert" ) var retriever = func(string, string, bool, int) (string, bool, error) { return "passphrase_1", false, nil } var anotherRetriever = func(keyName, alias string, createNew bool, attempts int) (string, bool, error) { switch alias { case "alias_1": return "passphrase_1", false, nil case "alias_2": return "passphrase_2", false, nil } return "", false, errors.New("password alias no found") } func TestCreateRead(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // We are using SQLite for the tests db, err := sql.Open("sqlite3", tempBaseDir+"test_db") assert.NoError(t, err) // Create a new KeyDB store dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db) assert.NoError(t, err) // Ensure that the private_key table exists dbStore.db.CreateTable(&GormPrivateKey{}) // Test writing new key in database/cache err = dbStore.AddKey("", "", testKey) assert.NoError(t, err) // Test retrieval of key from DB delete(dbStore.cachedKeys, testKey.ID()) retrKey, _, err := dbStore.GetKey(testKey.ID()) assert.NoError(t, err) assert.Equal(t, retrKey, testKey) // Tests retrieval of key from Cache // Close database connection err = dbStore.db.Close() assert.NoError(t, err) retrKey, _, err = dbStore.GetKey(testKey.ID()) assert.NoError(t, err) assert.Equal(t, retrKey, testKey) } func TestDoubleCreate(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) anotherTestKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // We are using SQLite for the tests db, err := sql.Open("sqlite3", tempBaseDir+"test_db") assert.NoError(t, err) // Create a new KeyDB store dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db) assert.NoError(t, err) // Ensure that the private_key table exists dbStore.db.CreateTable(&GormPrivateKey{}) // Test writing new key in database/cache err = dbStore.AddKey("", "", testKey) assert.NoError(t, err) // Test writing the same key in the database. Should fail. err = dbStore.AddKey("", "", testKey) assert.Error(t, err, "failed to add private key to database:") // Test writing new key succeeds err = dbStore.AddKey("", "", anotherTestKey) assert.NoError(t, err) } func TestCreateDelete(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // We are using SQLite for the tests db, err := sql.Open("sqlite3", tempBaseDir+"test_db") assert.NoError(t, err) // Create a new KeyDB store dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db) assert.NoError(t, err) // Ensure that the private_key table exists dbStore.db.CreateTable(&GormPrivateKey{}) // Test writing new key in database/cache err = dbStore.AddKey("", "", testKey) assert.NoError(t, err) // Test deleting the key from the db err = dbStore.RemoveKey(testKey.ID()) assert.NoError(t, err) // This should fail _, _, err = dbStore.GetKey(testKey.ID()) assert.Error(t, err, "signing key not found:") } func TestKeyRotation(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) testKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // We are using SQLite for the tests db, err := sql.Open("sqlite3", tempBaseDir+"test_db") assert.NoError(t, err) // Create a new KeyDB store dbStore, err := NewKeyDBStore(anotherRetriever, "alias_1", "sqlite3", db) assert.NoError(t, err) // Ensure that the private_key table exists dbStore.db.CreateTable(&GormPrivateKey{}) // Test writing new key in database/cache err = dbStore.AddKey("", "", testKey) assert.NoError(t, err) // Try rotating the key to alias-2 err = dbStore.RotateKeyPassphrase(testKey.ID(), "alias_2") assert.NoError(t, err) // Try rotating the key to alias-3 err = dbStore.RotateKeyPassphrase(testKey.ID(), "alias_3") assert.Error(t, err, "password alias no found") } func TestDBHealthCheck(t *testing.T) { tempBaseDir, err := ioutil.TempDir("", "notary-test-") defer os.RemoveAll(tempBaseDir) // We are using SQLite for the tests db, err := sql.Open("sqlite3", tempBaseDir+"test_db") assert.NoError(t, err) // Create a new KeyDB store dbStore, err := NewKeyDBStore(retriever, "", "sqlite3", db) assert.NoError(t, err) // No key table, health check fails err = dbStore.HealthCheck() assert.Error(t, err, "Cannot access table:") // Ensure that the private_key table exists dbStore.db.CreateTable(&GormPrivateKey{}) // Heath check success because the table exists err = dbStore.HealthCheck() assert.NoError(t, err) // Close the connection err = dbStore.db.Close() assert.NoError(t, err) // Heath check fail because the connection is closed err = dbStore.HealthCheck() assert.Error(t, err, "Cannot access table:") } notary-0.1/signer/keys/000077500000000000000000000000001262207326400151345ustar00rootroot00000000000000notary-0.1/signer/keys/keys.go000066400000000000000000000006721262207326400164430ustar00rootroot00000000000000package keys import "errors" var ( // ErrExists happens when a Key already exists in a database ErrExists = errors.New("notary-signer: key already in db") // ErrInvalidKeyID error happens when a key isn't found ErrInvalidKeyID = errors.New("notary-signer: invalid key id") // ErrFailedKeyGeneration happens when there is a failure in generating a key ErrFailedKeyGeneration = errors.New("notary-signer: failed to generate new key") ) notary-0.1/signer/signer.go000066400000000000000000000020201262207326400157710ustar00rootroot00000000000000package signer import ( pb "github.com/docker/notary/proto" "github.com/docker/notary/tuf/signed" ) // SigningService is the interface to implement a key management and signing service type SigningService interface { KeyManager // Signer returns a Signer for a given keyID Signer(keyID *pb.KeyID) (Signer, error) } // CryptoServiceIndex represents a mapping between a service algorithm string // and a CryptoService type CryptoServiceIndex map[string]signed.CryptoService // KeyManager is the interface to implement key management (possibly a key database) type KeyManager interface { // CreateKey creates a new key and returns it's Information CreateKey() (*pb.PublicKey, error) // DeleteKey removes a key DeleteKey(keyID *pb.KeyID) (*pb.Void, error) // KeyInfo returns the public key of a particular key KeyInfo(keyID *pb.KeyID) (*pb.PublicKey, error) } // Signer is the interface that allows the signing service to return signatures type Signer interface { Sign(request *pb.SignatureRequest) (*pb.Signature, error) } notary-0.1/trustmanager/000077500000000000000000000000001262207326400154065ustar00rootroot00000000000000notary-0.1/trustmanager/filestore.go000066400000000000000000000151611262207326400177350ustar00rootroot00000000000000package trustmanager import ( "errors" "fmt" "github.com/docker/notary" "io/ioutil" "os" "path/filepath" "strings" "sync" ) const ( visible = notary.PubCertPerms private = notary.PrivKeyPerms ) var ( // ErrPathOutsideStore indicates that the returned path would be // outside the store ErrPathOutsideStore = errors.New("path outside file store") ) // LimitedFileStore implements the bare bones primitives (no hierarchy) type LimitedFileStore interface { Add(fileName string, data []byte) error Remove(fileName string) error Get(fileName string) ([]byte, error) ListFiles() []string } // FileStore is the interface for full-featured FileStores type FileStore interface { LimitedFileStore RemoveDir(directoryName string) error GetPath(fileName string) (string, error) ListDir(directoryName string) []string BaseDir() string } // SimpleFileStore implements FileStore type SimpleFileStore struct { baseDir string fileExt string perms os.FileMode } // NewSimpleFileStore creates a directory with 755 permissions func NewSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error) { baseDir = filepath.Clean(baseDir) if err := CreateDirectory(baseDir); err != nil { return nil, err } return &SimpleFileStore{ baseDir: baseDir, fileExt: fileExt, perms: visible, }, nil } // NewPrivateSimpleFileStore creates a directory with 700 permissions func NewPrivateSimpleFileStore(baseDir string, fileExt string) (*SimpleFileStore, error) { if err := CreatePrivateDirectory(baseDir); err != nil { return nil, err } return &SimpleFileStore{ baseDir: baseDir, fileExt: fileExt, perms: private, }, nil } // Add writes data to a file with a given name func (f *SimpleFileStore) Add(name string, data []byte) error { filePath, err := f.GetPath(name) if err != nil { return err } createDirectory(filepath.Dir(filePath), f.perms) return ioutil.WriteFile(filePath, data, f.perms) } // Remove removes a file identified by name func (f *SimpleFileStore) Remove(name string) error { // Attempt to remove filePath, err := f.GetPath(name) if err != nil { return err } return os.Remove(filePath) } // RemoveDir removes the directory identified by name func (f *SimpleFileStore) RemoveDir(name string) error { dirPath := filepath.Join(f.baseDir, name) // Check to see if directory exists fi, err := os.Stat(dirPath) if err != nil { return err } // Check to see if it is a directory if !fi.IsDir() { return fmt.Errorf("directory not found: %s", name) } return os.RemoveAll(dirPath) } // Get returns the data given a file name func (f *SimpleFileStore) Get(name string) ([]byte, error) { filePath, err := f.GetPath(name) if err != nil { return nil, err } data, err := ioutil.ReadFile(filePath) if err != nil { return nil, err } return data, nil } // GetPath returns the full final path of a file with a given name func (f *SimpleFileStore) GetPath(name string) (string, error) { fileName := f.genFileName(name) fullPath := filepath.Clean(filepath.Join(f.baseDir, fileName)) if !strings.HasPrefix(fullPath, f.baseDir) { return "", ErrPathOutsideStore } return fullPath, nil } // ListFiles lists all the files inside of a store func (f *SimpleFileStore) ListFiles() []string { return f.list(f.baseDir) } // ListDir lists all the files inside of a directory identified by a name func (f *SimpleFileStore) ListDir(name string) []string { fullPath := filepath.Join(f.baseDir, name) return f.list(fullPath) } // list lists all the files in a directory given a full path. Ignores symlinks. func (f *SimpleFileStore) list(path string) []string { files := make([]string, 0, 0) filepath.Walk(path, func(fp string, fi os.FileInfo, err error) error { // If there are errors, ignore this particular file if err != nil { return nil } // Ignore if it is a directory if fi.IsDir() { return nil } // If this is a symlink, ignore it if fi.Mode()&os.ModeSymlink == os.ModeSymlink { return nil } // Only allow matches that end with our certificate extension (e.g. *.crt) matched, _ := filepath.Match("*"+f.fileExt, fi.Name()) if matched { // Find the relative path for this file relative to the base path. fp, err = filepath.Rel(path, fp) if err != nil { return err } files = append(files, fp) } return nil }) return files } // genFileName returns the name using the right extension func (f *SimpleFileStore) genFileName(name string) string { return fmt.Sprintf("%s.%s", name, f.fileExt) } // BaseDir returns the base directory of the filestore func (f *SimpleFileStore) BaseDir() string { return f.baseDir } // CreateDirectory uses createDirectory to create a chmod 755 Directory func CreateDirectory(dir string) error { return createDirectory(dir, visible) } // CreatePrivateDirectory uses createDirectory to create a chmod 700 Directory func CreatePrivateDirectory(dir string) error { return createDirectory(dir, private) } // createDirectory receives a string of the path to a directory. // It does not support passing files, so the caller has to remove // the filename by doing filepath.Dir(full_path_to_file) func createDirectory(dir string, perms os.FileMode) error { // This prevents someone passing /path/to/dir and 'dir' not being created // If two '//' exist, MkdirAll deals it with correctly dir = dir + "/" return os.MkdirAll(dir, perms) } // MemoryFileStore is an implementation of LimitedFileStore that keeps // the contents in memory. type MemoryFileStore struct { sync.Mutex files map[string][]byte } // NewMemoryFileStore creates a MemoryFileStore func NewMemoryFileStore() *MemoryFileStore { return &MemoryFileStore{ files: make(map[string][]byte), } } // ErrMemFileNotFound is returned for a nonexistent "file" in the memory file // store var ErrMemFileNotFound = errors.New("key not found in memory file store") // Add writes data to a file with a given name func (f *MemoryFileStore) Add(name string, data []byte) error { f.Lock() defer f.Unlock() f.files[name] = data return nil } // Remove removes a file identified by name func (f *MemoryFileStore) Remove(name string) error { f.Lock() defer f.Unlock() if _, present := f.files[name]; !present { return ErrMemFileNotFound } delete(f.files, name) return nil } // Get returns the data given a file name func (f *MemoryFileStore) Get(name string) ([]byte, error) { f.Lock() defer f.Unlock() fileData, present := f.files[name] if !present { return nil, ErrMemFileNotFound } return fileData, nil } // ListFiles lists all the files inside of a store func (f *MemoryFileStore) ListFiles() []string { var list []string for name := range f.files { list = append(list, name) } return list } notary-0.1/trustmanager/filestore_test.go000066400000000000000000000257641262207326400210060ustar00rootroot00000000000000package trustmanager import ( "bytes" "crypto/rand" "fmt" "io/ioutil" "os" "path/filepath" "strconv" "testing" ) func TestAddFile(t *testing.T) { testData := []byte("This test data should be part of the file.") testName := "docker.com/notary/certificate" testExt := "crt" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, testName+"."+testExt) // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } // Call the Add function err = store.Add(testName, testData) if err != nil { t.Fatalf("failed to add file to store: %v", err) } // Check to see if file exists b, err := ioutil.ReadFile(expectedFilePath) if err != nil { t.Fatalf("expected file not found: %v", err) } if !bytes.Equal(b, testData) { t.Fatalf("unexpected content in the file: %s", expectedFilePath) } } func TestRemoveFile(t *testing.T) { testName := "docker.com/notary/certificate" testExt := "crt" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, testName+"."+testExt) _, err = generateRandomFile(expectedFilePath, perms) if err != nil { t.Fatalf("failed to generate random file: %v", err) } // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } // Call the Remove function err = store.Remove(testName) if err != nil { t.Fatalf("failed to remove file from store: %v", err) } // Check to see if file exists _, err = os.Stat(expectedFilePath) if err == nil { t.Fatalf("expected not to find file: %s", expectedFilePath) } } func TestRemoveDir(t *testing.T) { testName := "docker.com/diogomonica/" testExt := "key" perms := os.FileMode(0700) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, testName+"."+testExt) _, err = generateRandomFile(expectedFilePath, perms) if err != nil { t.Fatalf("failed to generate random file: %v", err) } // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } // Call the RemoveDir function err = store.RemoveDir(testName) if err != nil { t.Fatalf("failed to remove directory: %v", err) } expectedDirectory := filepath.Dir(expectedFilePath) // Check to see if file exists _, err = os.Stat(expectedDirectory) if err == nil { t.Fatalf("expected not to find directory: %s", expectedDirectory) } } func TestListFiles(t *testing.T) { testName := "docker.com/notary/certificate" testExt := "crt" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) var expectedFilePath string // Create 10 randomfiles for i := 1; i <= 10; i++ { // Since we're generating this manually we need to add the extension '.' expectedFilename := testName + strconv.Itoa(i) + "." + testExt expectedFilePath = filepath.Join(tempBaseDir, expectedFilename) _, err = generateRandomFile(expectedFilePath, perms) if err != nil { t.Fatalf("failed to generate random file: %v", err) } } // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } // Call the List function. Expect 10 files files := store.ListFiles() if len(files) != 10 { t.Fatalf("expected 10 files in listing, got: %d", len(files)) } } func TestListDir(t *testing.T) { testName := "docker.com/notary/certificate" testExt := "crt" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) var expectedFilePath string // Create 10 randomfiles for i := 1; i <= 10; i++ { // Since we're generating this manually we need to add the extension '.' fileName := fmt.Sprintf("%s-%s.%s", testName, strconv.Itoa(i), testExt) expectedFilePath = filepath.Join(tempBaseDir, fileName) _, err = generateRandomFile(expectedFilePath, perms) if err != nil { t.Fatalf("failed to generate random file: %v", err) } } // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } // Call the ListDir function files := store.ListDir("docker.com/") if len(files) != 10 { t.Fatalf("expected 10 files in listing, got: %d", len(files)) } files = store.ListDir("docker.com/notary") if len(files) != 10 { t.Fatalf("expected 10 files in listing, got: %d", len(files)) } files = store.ListDir("fakedocker.com/") if len(files) != 0 { t.Fatalf("expected 0 files in listing, got: %d", len(files)) } } func TestGetPath(t *testing.T) { testExt := "crt" perms := os.FileMode(0755) // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: "", fileExt: testExt, perms: perms, } firstPath := "diogomonica.com/openvpn/0xdeadbeef.crt" secondPath := "/docker.io/testing-dashes/@#$%^&().crt" result, err := store.GetPath("diogomonica.com/openvpn/0xdeadbeef") if err != nil { t.Fatalf("unexpected error from GetPath: %v", err) } if result != firstPath { t.Fatalf("Expecting: %s", firstPath) } result, err = store.GetPath("/docker.io/testing-dashes/@#$%^&()") if err != nil { t.Fatalf("unexpected error from GetPath: %v", err) } if result != secondPath { t.Fatalf("Expecting: %s", secondPath) } } func TestGetPathProtection(t *testing.T) { testExt := "crt" perms := os.FileMode(0755) // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: "/path/to/filestore/", fileExt: testExt, perms: perms, } // Should deny requests for paths outside the filestore if _, err := store.GetPath("../../etc/passwd"); err != ErrPathOutsideStore { t.Fatalf("expected ErrPathOutsideStore error from GetPath") } if _, err := store.GetPath("private/../../../etc/passwd"); err != ErrPathOutsideStore { t.Fatalf("expected ErrPathOutsideStore error from GetPath") } // Convoluted paths should work as long as they end up inside the store expected := "/path/to/filestore/filename.crt" result, err := store.GetPath("private/../../filestore/./filename") if err != nil { t.Fatalf("unexpected error from GetPath: %v", err) } if result != expected { t.Fatalf("Expecting: %s (got: %s)", expected, result) } // Repeat tests with a relative baseDir relStore := &SimpleFileStore{ baseDir: "relative/file/path", fileExt: testExt, perms: perms, } // Should deny requests for paths outside the filestore if _, err := relStore.GetPath("../../etc/passwd"); err != ErrPathOutsideStore { t.Fatalf("expected ErrPathOutsideStore error from GetPath") } if _, err := relStore.GetPath("private/../../../etc/passwd"); err != ErrPathOutsideStore { t.Fatalf("expected ErrPathOutsideStore error from GetPath") } // Convoluted paths should work as long as they end up inside the store expected = "relative/file/path/filename.crt" result, err = relStore.GetPath("private/../../path/./filename") if err != nil { t.Fatalf("unexpected error from GetPath: %v", err) } if result != expected { t.Fatalf("Expecting: %s (got: %s)", expected, result) } } func TestGetData(t *testing.T) { testName := "docker.com/notary/certificate" testExt := "crt" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, testName+"."+testExt) expectedData, err := generateRandomFile(expectedFilePath, perms) if err != nil { t.Fatalf("failed to generate random file: %v", err) } // Create our SimpleFileStore store := &SimpleFileStore{ baseDir: tempBaseDir, fileExt: testExt, perms: perms, } testData, err := store.Get(testName) if err != nil { t.Fatalf("failed to get data from: %s", testName) } if !bytes.Equal(testData, expectedData) { t.Fatalf("unexpected content for the file: %s", expectedFilePath) } } func TestCreateDirectory(t *testing.T) { testDir := "fake/path/to/directory" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) dirPath := filepath.Join(tempBaseDir, testDir) // Call createDirectory CreateDirectory(dirPath) // Check to see if file exists fi, err := os.Stat(dirPath) if err != nil { t.Fatalf("expected find directory: %s", dirPath) } // Check to see if it is a directory if !fi.IsDir() { t.Fatalf("expected to be directory: %s", dirPath) } // Check to see if the permissions match if fi.Mode().String() != "drwxr-xr-x" { t.Fatalf("permissions are wrong for: %s. Got: %s", dirPath, fi.Mode().String()) } } func TestCreatePrivateDirectory(t *testing.T) { testDir := "fake/path/to/private/directory" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") if err != nil { t.Fatalf("failed to create a temporary directory: %v", err) } defer os.RemoveAll(tempBaseDir) dirPath := filepath.Join(tempBaseDir, testDir) // Call createDirectory CreatePrivateDirectory(dirPath) // Check to see if file exists fi, err := os.Stat(dirPath) if err != nil { t.Fatalf("expected find directory: %s", dirPath) } // Check to see if it is a directory if !fi.IsDir() { t.Fatalf("expected to be directory: %s", dirPath) } // Check to see if the permissions match if fi.Mode().String() != "drwx------" { t.Fatalf("permissions are wrong for: %s. Got: %s", dirPath, fi.Mode().String()) } } func generateRandomFile(filePath string, perms os.FileMode) ([]byte, error) { rndBytes := make([]byte, 10) _, err := rand.Read(rndBytes) if err != nil { return nil, err } os.MkdirAll(filepath.Dir(filePath), perms) if err = ioutil.WriteFile(filePath, rndBytes, perms); err != nil { return nil, err } return rndBytes, nil } notary-0.1/trustmanager/keyfilestore.go000066400000000000000000000244471262207326400204550ustar00rootroot00000000000000package trustmanager import ( "fmt" "path/filepath" "strings" "sync" "github.com/docker/notary/passphrase" "github.com/docker/notary/tuf/data" ) const ( rootKeysSubdir = "root_keys" nonRootKeysSubdir = "tuf_keys" privDir = "private" ) // KeyFileStore persists and manages private keys on disk type KeyFileStore struct { sync.Mutex SimpleFileStore passphrase.Retriever cachedKeys map[string]*cachedKey } // KeyMemoryStore manages private keys in memory type KeyMemoryStore struct { sync.Mutex MemoryFileStore passphrase.Retriever cachedKeys map[string]*cachedKey } // NewKeyFileStore returns a new KeyFileStore creating a private directory to // hold the keys. func NewKeyFileStore(baseDir string, passphraseRetriever passphrase.Retriever) (*KeyFileStore, error) { baseDir = filepath.Join(baseDir, privDir) fileStore, err := NewPrivateSimpleFileStore(baseDir, keyExtension) if err != nil { return nil, err } cachedKeys := make(map[string]*cachedKey) return &KeyFileStore{SimpleFileStore: *fileStore, Retriever: passphraseRetriever, cachedKeys: cachedKeys}, nil } // Name returns a user friendly name for the location this store // keeps its data func (s *KeyFileStore) Name() string { return fmt.Sprintf("file (%s)", s.SimpleFileStore.BaseDir()) } // AddKey stores the contents of a PEM-encoded private key as a PEM block func (s *KeyFileStore) AddKey(name, alias string, privKey data.PrivateKey) error { s.Lock() defer s.Unlock() return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey) } // GetKey returns the PrivateKey given a KeyID func (s *KeyFileStore) GetKey(name string) (data.PrivateKey, string, error) { s.Lock() defer s.Unlock() return getKey(s, s.Retriever, s.cachedKeys, name) } // ListKeys returns a list of unique PublicKeys present on the KeyFileStore. func (s *KeyFileStore) ListKeys() map[string]string { return listKeys(s) } // RemoveKey removes the key from the keyfilestore func (s *KeyFileStore) RemoveKey(name string) error { s.Lock() defer s.Unlock() return removeKey(s, s.cachedKeys, name) } // ExportKey exportes the encrypted bytes from the keystore and writes it to // dest. func (s *KeyFileStore) ExportKey(name string) ([]byte, error) { keyBytes, _, err := getRawKey(s, name) if err != nil { return nil, err } return keyBytes, nil } // ImportKey imports the private key in the encrypted bytes into the keystore // with the given key ID and alias. func (s *KeyFileStore) ImportKey(pemBytes []byte, alias string) error { return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes) } // NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory func NewKeyMemoryStore(passphraseRetriever passphrase.Retriever) *KeyMemoryStore { memStore := NewMemoryFileStore() cachedKeys := make(map[string]*cachedKey) return &KeyMemoryStore{MemoryFileStore: *memStore, Retriever: passphraseRetriever, cachedKeys: cachedKeys} } // Name returns a user friendly name for the location this store // keeps its data func (s *KeyMemoryStore) Name() string { return "memory" } // AddKey stores the contents of a PEM-encoded private key as a PEM block func (s *KeyMemoryStore) AddKey(name, alias string, privKey data.PrivateKey) error { s.Lock() defer s.Unlock() return addKey(s, s.Retriever, s.cachedKeys, name, alias, privKey) } // GetKey returns the PrivateKey given a KeyID func (s *KeyMemoryStore) GetKey(name string) (data.PrivateKey, string, error) { s.Lock() defer s.Unlock() return getKey(s, s.Retriever, s.cachedKeys, name) } // ListKeys returns a list of unique PublicKeys present on the KeyFileStore. func (s *KeyMemoryStore) ListKeys() map[string]string { return listKeys(s) } // RemoveKey removes the key from the keystore func (s *KeyMemoryStore) RemoveKey(name string) error { s.Lock() defer s.Unlock() return removeKey(s, s.cachedKeys, name) } // ExportKey exportes the encrypted bytes from the keystore and writes it to // dest. func (s *KeyMemoryStore) ExportKey(name string) ([]byte, error) { keyBytes, _, err := getRawKey(s, name) if err != nil { return nil, err } return keyBytes, nil } // ImportKey imports the private key in the encrypted bytes into the keystore // with the given key ID and alias. func (s *KeyMemoryStore) ImportKey(pemBytes []byte, alias string) error { return importKey(s, s.Retriever, s.cachedKeys, alias, pemBytes) } func addKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error { var ( chosenPassphrase string giveup bool err error ) for attempts := 0; ; attempts++ { chosenPassphrase, giveup, err = passphraseRetriever(name, alias, true, attempts) if err != nil { continue } if giveup { return ErrAttemptsExceeded{} } if attempts > 10 { return ErrAttemptsExceeded{} } break } return encryptAndAddKey(s, chosenPassphrase, cachedKeys, name, alias, privKey) } func getKeyAlias(s LimitedFileStore, keyID string) (string, error) { files := s.ListFiles() name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID))) for _, file := range files { filename := filepath.Base(file) if strings.HasPrefix(filename, name) { aliasPlusDotKey := strings.TrimPrefix(filename, name+"_") retVal := strings.TrimSuffix(aliasPlusDotKey, "."+keyExtension) return retVal, nil } } return "", &ErrKeyNotFound{KeyID: keyID} } // GetKey returns the PrivateKey given a KeyID func getKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, name string) (data.PrivateKey, string, error) { cachedKeyEntry, ok := cachedKeys[name] if ok { return cachedKeyEntry.key, cachedKeyEntry.alias, nil } keyBytes, keyAlias, err := getRawKey(s, name) if err != nil { return nil, "", err } var retErr error // See if the key is encrypted. If its encrypted we'll fail to parse the private key privKey, err := ParsePEMPrivateKey(keyBytes, "") if err != nil { privKey, _, retErr = GetPasswdDecryptBytes(passphraseRetriever, keyBytes, name, string(keyAlias)) } if retErr != nil { return nil, "", retErr } cachedKeys[name] = &cachedKey{alias: keyAlias, key: privKey} return privKey, keyAlias, nil } // ListKeys returns a map of unique PublicKeys present on the KeyFileStore and // their corresponding aliases. func listKeys(s LimitedFileStore) map[string]string { keyIDMap := make(map[string]string) for _, f := range s.ListFiles() { // Remove the prefix of the directory from the filename if f[:len(rootKeysSubdir)] == rootKeysSubdir { f = strings.TrimPrefix(f, rootKeysSubdir+"/") } else { f = strings.TrimPrefix(f, nonRootKeysSubdir+"/") } // Remove the extension from the full filename // abcde_root.key becomes abcde_root keyIDFull := strings.TrimSpace(strings.TrimSuffix(f, filepath.Ext(f))) // If the key does not have a _, it is malformed underscoreIndex := strings.LastIndex(keyIDFull, "_") if underscoreIndex == -1 { continue } // The keyID is the first part of the keyname // The KeyAlias is the second part of the keyname // in a key named abcde_root, abcde is the keyID and root is the KeyAlias keyID := keyIDFull[:underscoreIndex] keyAlias := keyIDFull[underscoreIndex+1:] keyIDMap[keyID] = keyAlias } return keyIDMap } // RemoveKey removes the key from the keyfilestore func removeKey(s LimitedFileStore, cachedKeys map[string]*cachedKey, name string) error { keyAlias, err := getKeyAlias(s, name) if err != nil { return err } delete(cachedKeys, name) // being in a subdirectory is for backwards compatibliity filename := name + "_" + keyAlias err = s.Remove(filepath.Join(getSubdir(keyAlias), filename)) if err != nil { return err } return nil } // Assumes 2 subdirectories, 1 containing root keys and 1 containing tuf keys func getSubdir(alias string) string { if alias == "root" { return rootKeysSubdir } return nonRootKeysSubdir } // Given a key ID, gets the bytes and alias belonging to that key if the key // exists func getRawKey(s LimitedFileStore, name string) ([]byte, string, error) { keyAlias, err := getKeyAlias(s, name) if err != nil { return nil, "", err } filename := name + "_" + keyAlias var keyBytes []byte keyBytes, err = s.Get(filepath.Join(getSubdir(keyAlias), filename)) if err != nil { return nil, "", err } return keyBytes, keyAlias, nil } // GetPasswdDecryptBytes gets the password to decript the given pem bytes. // Returns the password and private key func GetPasswdDecryptBytes(passphraseRetriever passphrase.Retriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) { var ( passwd string retErr error privKey data.PrivateKey ) for attempts := 0; ; attempts++ { var ( giveup bool err error ) passwd, giveup, err = passphraseRetriever(name, alias, false, attempts) // Check if the passphrase retriever got an error or if it is telling us to give up if giveup || err != nil { return nil, "", ErrPasswordInvalid{} } if attempts > 10 { return nil, "", ErrAttemptsExceeded{} } // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase privKey, err = ParsePEMPrivateKey(pemBytes, passwd) if err != nil { retErr = ErrPasswordInvalid{} } else { // We managed to parse the PrivateKey. We've succeeded! retErr = nil break } } if retErr != nil { return nil, "", retErr } return privKey, passwd, nil } func encryptAndAddKey(s LimitedFileStore, passwd string, cachedKeys map[string]*cachedKey, name, alias string, privKey data.PrivateKey) error { var ( pemPrivKey []byte err error ) if passwd != "" { pemPrivKey, err = EncryptPrivateKey(privKey, passwd) } else { pemPrivKey, err = KeyToPEM(privKey) } if err != nil { return err } cachedKeys[name] = &cachedKey{alias: alias, key: privKey} return s.Add(filepath.Join(getSubdir(alias), name+"_"+alias), pemPrivKey) } func importKey(s LimitedFileStore, passphraseRetriever passphrase.Retriever, cachedKeys map[string]*cachedKey, alias string, pemBytes []byte) error { if alias != data.CanonicalRootRole { return s.Add(alias, pemBytes) } privKey, passphrase, err := GetPasswdDecryptBytes( passphraseRetriever, pemBytes, "", "imported "+alias) if err != nil { return err } var name string name = privKey.ID() return encryptAndAddKey(s, passphrase, cachedKeys, name, alias, privKey) } notary-0.1/trustmanager/keyfilestore_test.go000066400000000000000000000453021262207326400215050ustar00rootroot00000000000000package trustmanager import ( "crypto/rand" "errors" "io/ioutil" "os" "path/filepath" "testing" "github.com/docker/notary/passphrase" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) const cannedPassphrase = "passphrase" var passphraseRetriever = func(keyID string, alias string, createNew bool, numAttempts int) (string, bool, error) { if numAttempts > 5 { giveup := true return "", giveup, errors.New("passPhraseRetriever failed after too many requests") } return cannedPassphrase, false, nil } func TestAddKey(t *testing.T) { testName := "docker.com/notary/root" testExt := "key" testAlias := "root" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, privDir, rootKeysSubdir, testName+"_"+testAlias+"."+testExt) // Create our store store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(testName, "root", privKey) assert.NoError(t, err, "failed to add key to store") // Check to see if file exists b, err := ioutil.ReadFile(expectedFilePath) assert.NoError(t, err, "expected file not found") assert.Contains(t, string(b), "-----BEGIN EC PRIVATE KEY-----") } func TestGet(t *testing.T) { testData := []byte(`-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAyUIXjsrWRrvPa4Bzp3VJ6uOUGPay2fUpSV8XzNxZxIG/Opdr +k3EQi1im6WOqF3Y5AS1UjYRxNuRN+cAZeo3uS1pOTuoSupBXuchVw8s4hZJ5vXn TRmGb+xY7tZ1ZVgPfAZDib9sRSUsL/gC+aSyprAjG/YBdbF06qKbfOfsoCEYW1OQ 82JqHzQH514RFYPTnEGpvfxWaqmFQLmv0uMxV/cAYvqtrGkXuP0+a8PknlD2obw5 0rHE56Su1c3Q42S7L51K38tpbgWOSRcTfDUWEj5v9wokkNQvyKBwbS996s4EJaZd 7r6M0h1pHnuRxcSaZLYRwgOe1VNGg2VfWzgd5QIDAQABAoIBAF9LGwpygmj1jm3R YXGd+ITugvYbAW5wRb9G9mb6wspnwNsGTYsz/UR0ZudZyaVw4jx8+jnV/i3e5PC6 QRcAgqf8l4EQ/UuThaZg/AlT1yWp9g4UyxNXja87EpTsGKQGwTYxZRM4/xPyWOzR mt8Hm8uPROB9aA2JG9npaoQG8KSUj25G2Qot3ukw/IOtqwN/Sx1EqF0EfCH1K4KU a5TrqlYDFmHbqT1zTRec/BTtVXNsg8xmF94U1HpWf3Lpg0BPYT7JiN2DPoLelRDy a/A+a3ZMRNISL5wbq/jyALLOOyOkIqa+KEOeW3USuePd6RhDMzMm/0ocp5FCwYfo k4DDeaECgYEA0eSMD1dPGo+u8UTD8i7ZsZCS5lmXLNuuAg5f5B/FGghD8ymPROIb dnJL5QSbUpmBsYJ+nnO8RiLrICGBe7BehOitCKi/iiZKJO6edrfNKzhf4XlU0HFl jAOMa975pHjeCoZ1cXJOEO9oW4SWTCyBDBSqH3/ZMgIOiIEk896lSmkCgYEA9Xf5 Jqv3HtQVvjugV/axAh9aI8LMjlfFr9SK7iXpY53UdcylOSWKrrDok3UnrSEykjm7 UL3eCU5jwtkVnEXesNn6DdYo3r43E6iAiph7IBkB5dh0yv3vhIXPgYqyTnpdz4pg 3yPGBHMPnJUBThg1qM7k6a2BKHWySxEgC1DTMB0CgYAGvdmF0J8Y0k6jLzs/9yNE 4cjmHzCM3016gW2xDRgumt9b2xTf+Ic7SbaIV5qJj6arxe49NqhwdESrFohrKaIP kM2l/o2QaWRuRT/Pvl2Xqsrhmh0QSOQjGCYVfOb10nAHVIRHLY22W4o1jk+piLBo a+1+74NRaOGAnu1J6/fRKQKBgAF180+dmlzemjqFlFCxsR/4G8s2r4zxTMXdF+6O 3zKuj8MbsqgCZy7e8qNeARxwpCJmoYy7dITNqJ5SOGSzrb2Trn9ClP+uVhmR2SH6 AlGQlIhPn3JNzI0XVsLIloMNC13ezvDE/7qrDJ677EQQtNEKWiZh1/DrsmHr+irX EkqpAoGAJWe8PC0XK2RE9VkbSPg9Ehr939mOLWiHGYTVWPttUcum/rTKu73/X/mj WxnPWGtzM1pHWypSokW90SP4/xedMxludvBvmz+CTYkNJcBGCrJumy11qJhii9xp EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0= -----END RSA PRIVATE KEY----- `) testName := "docker.com/notary/root" testExt := "key" testAlias := "root" perms := os.FileMode(0755) emptyPassphraseRetriever := func(string, string, bool, int) (string, bool, error) { return "", false, nil } // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' filePath := filepath.Join(tempBaseDir, privDir, rootKeysSubdir, testName+"_"+testAlias+"."+testExt) os.MkdirAll(filepath.Dir(filePath), perms) err = ioutil.WriteFile(filePath, testData, perms) assert.NoError(t, err, "failed to write test file") // Create our store store, err := NewKeyFileStore(tempBaseDir, emptyPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") // Call the GetKey function privKey, _, err := store.GetKey(testName) assert.NoError(t, err, "failed to get key from store") pemPrivKey, err := KeyToPEM(privKey) assert.NoError(t, err, "failed to convert key to PEM") assert.Equal(t, testData, pemPrivKey) } func TestListKeys(t *testing.T) { testName := "docker.com/notary/root" perms := os.FileMode(0755) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Create our store store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(testName, "root", privKey) assert.NoError(t, err, "failed to add key to store") // Check to see if the keystore lists this key keyMap := store.ListKeys() // Expect to see exactly one key in the map assert.Len(t, keyMap, 1) // Expect to see privKeyID inside of the map role, ok := keyMap[testName] assert.True(t, ok) assert.Equal(t, role, "root") // Call the AddKey function for the second key err = store.AddKey(testName+"2", "targets", privKey) assert.NoError(t, err, "failed to add key to store") // Check to see if the keystore lists this key keyMap = store.ListKeys() // Expect to see exactly two keys in the map assert.Len(t, keyMap, 2) // Expect to see privKeyID2 inside of the map role, ok = keyMap[testName+"2"] assert.True(t, ok) assert.Equal(t, role, "targets") // Write an invalid filename to the directory filePath := filepath.Join(tempBaseDir, privDir, rootKeysSubdir, "fakekeyname.key") err = ioutil.WriteFile(filePath, []byte("data"), perms) assert.NoError(t, err, "failed to write test file") // Check to see if the keystore still lists two keys keyMap = store.ListKeys() assert.Len(t, keyMap, 2) } func TestAddGetKeyMemStore(t *testing.T) { testName := "docker.com/notary/root" testAlias := "root" // Create our store store := NewKeyMemoryStore(passphraseRetriever) privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(testName, testAlias, privKey) assert.NoError(t, err, "failed to add key to store") // Check to see if file exists retrievedKey, retrievedAlias, err := store.GetKey(testName) assert.NoError(t, err, "failed to get key from store") assert.Equal(t, retrievedAlias, testAlias) assert.Equal(t, retrievedKey.Public(), privKey.Public()) assert.Equal(t, retrievedKey.Private(), privKey.Private()) } func TestGetDecryptedWithTamperedCipherText(t *testing.T) { testExt := "key" testAlias := "root" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Create our FileStore store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") // Generate a new Private Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddEncryptedKey function err = store.AddKey(privKey.ID(), testAlias, privKey) assert.NoError(t, err, "failed to add key to store") // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, privDir, rootKeysSubdir, privKey.ID()+"_"+testAlias+"."+testExt) // Get file description, open file fp, err := os.OpenFile(expectedFilePath, os.O_WRONLY, 0600) assert.NoError(t, err, "expected file not found") // Tamper the file fp.WriteAt([]byte("a"), int64(1)) // Recreate the KeyFileStore to avoid caching store, err = NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") // Try to decrypt the file _, _, err = store.GetKey(privKey.ID()) assert.Error(t, err, "expected error while decrypting the content due to invalid cipher text") } func TestGetDecryptedWithInvalidPassphrase(t *testing.T) { // Make a passphraseRetriever that always returns a different passphrase in order to test // decryption failure a := "a" var invalidPassphraseRetriever = func(keyId string, alias string, createNew bool, numAttempts int) (string, bool, error) { if numAttempts > 5 { giveup := true return "", giveup, nil } a = a + a return a, false, nil } // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Test with KeyFileStore fileStore, err := NewKeyFileStore(tempBaseDir, invalidPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") newFileStore, err := NewKeyFileStore(tempBaseDir, invalidPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") testGetDecryptedWithInvalidPassphrase(t, fileStore, newFileStore, ErrPasswordInvalid{}) // Can't test with KeyMemoryStore because we cache the decrypted version of // the key forever } func TestGetDecryptedWithConsistentlyInvalidPassphrase(t *testing.T) { // Make a passphraseRetriever that always returns a different passphrase in order to test // decryption failure a := "aaaaaaaaaaaaa" var consistentlyInvalidPassphraseRetriever = func(keyID string, alias string, createNew bool, numAttempts int) (string, bool, error) { a = a + "a" return a, false, nil } // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Test with KeyFileStore fileStore, err := NewKeyFileStore(tempBaseDir, consistentlyInvalidPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") newFileStore, err := NewKeyFileStore(tempBaseDir, consistentlyInvalidPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") testGetDecryptedWithInvalidPassphrase(t, fileStore, newFileStore, ErrAttemptsExceeded{}) // Can't test with KeyMemoryStore because we cache the decrypted version of // the key forever } // testGetDecryptedWithInvalidPassphrase takes two keystores so it can add to // one and get from the other (to work around caching) func testGetDecryptedWithInvalidPassphrase(t *testing.T, store KeyStore, newStore KeyStore, expectedFailureType interface{}) { testAlias := "root" // Generate a new random RSA Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(privKey.ID(), testAlias, privKey) assert.NoError(t, err, "failed to add key to store") // Try to decrypt the file with an invalid passphrase _, _, err = newStore.GetKey(privKey.ID()) assert.Error(t, err, "expected error while decrypting the content due to invalid passphrase") assert.IsType(t, err, expectedFailureType) } func TestRemoveKey(t *testing.T) { testName := "docker.com/notary/root" testExt := "key" testAlias := "alias" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) // Since we're generating this manually we need to add the extension '.' expectedFilePath := filepath.Join(tempBaseDir, privDir, nonRootKeysSubdir, testName+"_"+testAlias+"."+testExt) // Create our store store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(testName, testAlias, privKey) assert.NoError(t, err, "failed to add key to store") // Check to see if file exists _, err = ioutil.ReadFile(expectedFilePath) assert.NoError(t, err, "expected file not found") // Call remove key err = store.RemoveKey(testName) assert.NoError(t, err, "unable to remove key") // Check to see if file still exists _, err = ioutil.ReadFile(expectedFilePath) assert.Error(t, err, "file should not exist") } func TestKeysAreCached(t *testing.T) { testName := "docker.com/notary/root" testAlias := "alias" // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err, "failed to create a temporary directory") defer os.RemoveAll(tempBaseDir) var countingPassphraseRetriever passphrase.Retriever numTimesCalled := 0 countingPassphraseRetriever = func(keyId, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) { numTimesCalled++ return "password", false, nil } // Create our store store, err := NewKeyFileStore(tempBaseDir, countingPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err, "could not generate private key") // Call the AddKey function err = store.AddKey(testName, testAlias, privKey) assert.NoError(t, err, "failed to add key to store") assert.Equal(t, 1, numTimesCalled, "numTimesCalled should have been 1") // Call the AddKey function privKey2, _, err := store.GetKey(testName) assert.NoError(t, err, "failed to add key to store") assert.Equal(t, privKey.Public(), privKey2.Public(), "cachedPrivKey should be the same as the added privKey") assert.Equal(t, privKey.Private(), privKey2.Private(), "cachedPrivKey should be the same as the added privKey") assert.Equal(t, 1, numTimesCalled, "numTimesCalled should be 1 -- no additional call to passphraseRetriever") // Create a new store store2, err := NewKeyFileStore(tempBaseDir, countingPassphraseRetriever) assert.NoError(t, err, "failed to create new key filestore") // Call the GetKey function privKey3, _, err := store2.GetKey(testName) assert.NoError(t, err, "failed to get key from store") assert.Equal(t, privKey2.Private(), privKey3.Private(), "privkey from store1 should be the same as privkey from store2") assert.Equal(t, privKey2.Public(), privKey3.Public(), "privkey from store1 should be the same as privkey from store2") assert.Equal(t, 2, numTimesCalled, "numTimesCalled should be 2 -- one additional call to passphraseRetriever") // Call the GetKey function a bunch of times for i := 0; i < 10; i++ { _, _, err := store2.GetKey(testName) assert.NoError(t, err, "failed to get key from store") } assert.Equal(t, 2, numTimesCalled, "numTimesCalled should be 2 -- no additional call to passphraseRetriever") } // Exporting a key is successful (it is a valid key) func TestKeyFileStoreExportSuccess(t *testing.T) { // Generate a new Private Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err) defer os.RemoveAll(tempBaseDir) // Create our FileStore and add the key store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err) err = store.AddKey(privKey.ID(), "root", privKey) assert.NoError(t, err) assertExportKeySuccess(t, store, privKey) } // Exporting a key that doesn't exist fails (it is a valid key) func TestKeyFileStoreExportNonExistantFailure(t *testing.T) { // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err) defer os.RemoveAll(tempBaseDir) // Create empty FileStore store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err) _, err = store.ExportKey("12345") assert.Error(t, err) } // Exporting a key is successful (it is a valid key) func TestKeyMemoryStoreExportSuccess(t *testing.T) { // Generate a new Private Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // Create our MemoryStore and add key to it store := NewKeyMemoryStore(passphraseRetriever) assert.NoError(t, err) err = store.AddKey(privKey.ID(), "root", privKey) assert.NoError(t, err) assertExportKeySuccess(t, store, privKey) } // Exporting a key that doesn't exist fails (it is a valid key) func TestKeyMemoryStoreExportNonExistantFailure(t *testing.T) { store := NewKeyMemoryStore(passphraseRetriever) _, err := store.ExportKey("12345") assert.Error(t, err) } // Importing a key is successful func TestKeyFileStoreImportSuccess(t *testing.T) { // Generate a new Private Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // Temporary directory where test files will be created tempBaseDir, err := ioutil.TempDir("", "notary-test-") assert.NoError(t, err) defer os.RemoveAll(tempBaseDir) // Create our FileStore store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever) assert.NoError(t, err) assertImportKeySuccess(t, store, privKey) } // Importing a key is successful func TestKeyMemoryStoreImportSuccess(t *testing.T) { // Generate a new Private Key privKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // Create our MemoryStore store := NewKeyMemoryStore(passphraseRetriever) assert.NoError(t, err) assertImportKeySuccess(t, store, privKey) } // Given a keystore and expected key that is in the store, export the key // and assert that the exported key is the same and encrypted with the right // password. func assertExportKeySuccess( t *testing.T, s KeyStore, expectedKey data.PrivateKey) { pemBytes, err := s.ExportKey(expectedKey.ID()) assert.NoError(t, err) reparsedKey, err := ParsePEMPrivateKey(pemBytes, cannedPassphrase) assert.NoError(t, err) assert.Equal(t, expectedKey.Private(), reparsedKey.Private()) assert.Equal(t, expectedKey.Public(), reparsedKey.Public()) } // Given a keystore and expected key, generate an encrypted PEM of the key // and assert that the then imported key is the same and encrypted with the // right password. func assertImportKeySuccess( t *testing.T, s KeyStore, expectedKey data.PrivateKey) { pemBytes, err := EncryptPrivateKey(expectedKey, cannedPassphrase) assert.NoError(t, err) err = s.ImportKey(pemBytes, "root") assert.NoError(t, err) reimportedKey, reimportedAlias, err := s.GetKey(expectedKey.ID()) assert.NoError(t, err) assert.Equal(t, "root", reimportedAlias) assert.Equal(t, expectedKey.Private(), reimportedKey.Private()) assert.Equal(t, expectedKey.Public(), reimportedKey.Public()) } notary-0.1/trustmanager/keystore.go000066400000000000000000000033121262207326400176010ustar00rootroot00000000000000package trustmanager import ( "fmt" "github.com/docker/notary/tuf/data" ) // ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key type ErrAttemptsExceeded struct{} // ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key func (err ErrAttemptsExceeded) Error() string { return "maximum number of passphrase attempts exceeded" } // ErrPasswordInvalid is returned when signing fails. It could also mean the signing // key file was corrupted, but we have no way to distinguish. type ErrPasswordInvalid struct{} // ErrPasswordInvalid is returned when signing fails. It could also mean the signing // key file was corrupted, but we have no way to distinguish. func (err ErrPasswordInvalid) Error() string { return "password invalid, operation has failed." } // ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. type ErrKeyNotFound struct { KeyID string } // ErrKeyNotFound is returned when the keystore fails to retrieve a specific key. func (err ErrKeyNotFound) Error() string { return fmt.Sprintf("signing key not found: %s", err.KeyID) } const ( keyExtension = "key" ) // KeyStore is a generic interface for private key storage type KeyStore interface { // Add Key adds a key to the KeyStore, and if the key already exists, // succeeds. Otherwise, returns an error if it cannot add. AddKey(name, alias string, privKey data.PrivateKey) error GetKey(name string) (data.PrivateKey, string, error) ListKeys() map[string]string RemoveKey(name string) error ExportKey(name string) ([]byte, error) ImportKey(pemBytes []byte, alias string) error Name() string } type cachedKey struct { alias string key data.PrivateKey } notary-0.1/trustmanager/x509filestore.go000066400000000000000000000165101262207326400203620ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "errors" "os" "path" "github.com/Sirupsen/logrus" ) // X509FileStore implements X509Store that persists on disk type X509FileStore struct { validate Validator fileMap map[CertID]string fingerprintMap map[CertID]*x509.Certificate nameMap map[string][]CertID fileStore FileStore } // NewX509FileStore returns a new X509FileStore. func NewX509FileStore(directory string) (*X509FileStore, error) { validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true }) return newX509FileStore(directory, validate) } // NewX509FilteredFileStore returns a new X509FileStore that validates certificates // that are added. func NewX509FilteredFileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) { return newX509FileStore(directory, validate) } func newX509FileStore(directory string, validate func(*x509.Certificate) bool) (*X509FileStore, error) { fileStore, err := NewSimpleFileStore(directory, certExtension) if err != nil { return nil, err } s := &X509FileStore{ validate: ValidatorFunc(validate), fileMap: make(map[CertID]string), fingerprintMap: make(map[CertID]*x509.Certificate), nameMap: make(map[string][]CertID), fileStore: fileStore, } err = loadCertsFromDir(s) if err != nil { return nil, err } return s, nil } // AddCert creates a filename for a given cert and adds a certificate with that name func (s *X509FileStore) AddCert(cert *x509.Certificate) error { if cert == nil { return errors.New("adding nil Certificate to X509Store") } // Check if this certificate meets our validation criteria if !s.validate.Validate(cert) { return &ErrCertValidation{} } // Attempt to write the certificate to the file if err := s.addNamedCert(cert); err != nil { return err } return nil } // addNamedCert allows adding a certificate while controlling the filename it gets // stored under. If the file does not exist on disk, saves it. func (s *X509FileStore) addNamedCert(cert *x509.Certificate) error { fileName, certID, err := fileName(cert) if err != nil { return err } logrus.Debug("Adding cert with certID: ", certID) // Validate if we already added this certificate before if _, ok := s.fingerprintMap[certID]; ok { return &ErrCertExists{} } // Convert certificate to PEM certBytes := CertToPEM(cert) // Save the file to disk if not already there. filePath, err := s.fileStore.GetPath(fileName) if err != nil { return err } if _, err := os.Stat(filePath); os.IsNotExist(err) { if err := s.fileStore.Add(fileName, certBytes); err != nil { return err } } else if err != nil { return err } // We wrote the certificate succcessfully, add it to our in-memory storage s.fingerprintMap[certID] = cert s.fileMap[certID] = fileName name := string(cert.Subject.CommonName) s.nameMap[name] = append(s.nameMap[name], certID) return nil } // RemoveCert removes a certificate from a X509FileStore. func (s *X509FileStore) RemoveCert(cert *x509.Certificate) error { if cert == nil { return errors.New("removing nil Certificate from X509Store") } certID, err := fingerprintCert(cert) if err != nil { return err } delete(s.fingerprintMap, certID) filename := s.fileMap[certID] delete(s.fileMap, certID) name := string(cert.Subject.CommonName) // Filter the fingerprint out of this name entry fpList := s.nameMap[name] newfpList := fpList[:0] for _, x := range fpList { if x != certID { newfpList = append(newfpList, x) } } s.nameMap[name] = newfpList if err := s.fileStore.Remove(filename); err != nil { return err } return nil } // RemoveAll removes all the certificates from the store func (s *X509FileStore) RemoveAll() error { for _, filename := range s.fileMap { if err := s.fileStore.Remove(filename); err != nil { return err } } s.fileMap = make(map[CertID]string) s.fingerprintMap = make(map[CertID]*x509.Certificate) s.nameMap = make(map[string][]CertID) return nil } // AddCertFromPEM adds the first certificate that it finds in the byte[], returning // an error if no Certificates are found func (s X509FileStore) AddCertFromPEM(pemBytes []byte) error { cert, err := LoadCertFromPEM(pemBytes) if err != nil { return err } return s.AddCert(cert) } // AddCertFromFile tries to adds a X509 certificate to the store given a filename func (s *X509FileStore) AddCertFromFile(filename string) error { cert, err := LoadCertFromFile(filename) if err != nil { return err } return s.AddCert(cert) } // GetCertificates returns an array with all of the current X509 Certificates. func (s *X509FileStore) GetCertificates() []*x509.Certificate { certs := make([]*x509.Certificate, len(s.fingerprintMap)) i := 0 for _, v := range s.fingerprintMap { certs[i] = v i++ } return certs } // GetCertificatePool returns an x509 CertPool loaded with all the certificates // in the store. func (s *X509FileStore) GetCertificatePool() *x509.CertPool { pool := x509.NewCertPool() for _, v := range s.fingerprintMap { pool.AddCert(v) } return pool } // GetCertificateByCertID returns the certificate that matches a certain certID func (s *X509FileStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) { return s.getCertificateByCertID(CertID(certID)) } // getCertificateByCertID returns the certificate that matches a certain certID func (s *X509FileStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) { // If it does not look like a hex encoded sha256 hash, error if len(certID) != 64 { return nil, errors.New("invalid Subject Key Identifier") } // Check to see if this subject key identifier exists if cert, ok := s.fingerprintMap[CertID(certID)]; ok { return cert, nil } return nil, &ErrNoCertificatesFound{query: string(certID)} } // GetCertificatesByCN returns all the certificates that match a specific // CommonName func (s *X509FileStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) { var certs []*x509.Certificate if ids, ok := s.nameMap[cn]; ok { for _, v := range ids { cert, err := s.getCertificateByCertID(v) if err != nil { // This error should never happen. This would mean that we have // an inconsistent X509FileStore return nil, &ErrBadCertificateStore{} } certs = append(certs, cert) } } if len(certs) == 0 { return nil, &ErrNoCertificatesFound{query: cn} } return certs, nil } // GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore // as part of the roots list. This never allows the use of system roots, returning // an error if there are no root CAs. func (s *X509FileStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) { // If we have no Certificates loaded return error (we don't want to rever to using // system CAs). if len(s.fingerprintMap) == 0 { return x509.VerifyOptions{}, errors.New("no root CAs available") } opts := x509.VerifyOptions{ DNSName: dnsName, Roots: s.GetCertificatePool(), } return opts, nil } // Empty returns true if there are no certificates in the X509FileStore, false // otherwise. func (s *X509FileStore) Empty() bool { return len(s.fingerprintMap) == 0 } func fileName(cert *x509.Certificate) (string, CertID, error) { certID, err := fingerprintCert(cert) if err != nil { return "", "", err } return path.Join(cert.Subject.CommonName, string(certID)), certID, nil } notary-0.1/trustmanager/x509filestore_test.go000066400000000000000000000275221262207326400214260ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "encoding/pem" "io/ioutil" "os" "path/filepath" "testing" "github.com/stretchr/testify/assert" ) func TestNewX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempDir) store, err := NewX509FileStore(tempDir) if err != nil { t.Fatalf("failed to create a new X509FileStore: %v", store) } } // NewX509FileStore loads any existing certs from the directory, and does // not overwrite any of the. func TestNewX509FileStoreLoadsExistingCerts(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") assert.NoError(t, err) defer os.RemoveAll(tempDir) certBytes, err := ioutil.ReadFile("../fixtures/root-ca.crt") assert.NoError(t, err) out, err := os.Create(filepath.Join(tempDir, "root-ca.crt")) assert.NoError(t, err) // to distinguish it from the canonical format distinguishingBytes := []byte{'\n', '\n', '\n', '\n', '\n', '\n'} nBytes, err := out.Write(distinguishingBytes) assert.NoError(t, err) assert.Len(t, distinguishingBytes, nBytes) nBytes, err = out.Write(certBytes) assert.NoError(t, err) assert.Len(t, certBytes, nBytes) err = out.Close() assert.NoError(t, err) store, err := NewX509FileStore(tempDir) assert.NoError(t, err) expectedCert, err := LoadCertFromFile("../fixtures/root-ca.crt") assert.NoError(t, err) assert.Equal(t, []*x509.Certificate{expectedCert}, store.GetCertificates()) outBytes, err := ioutil.ReadFile(filepath.Join(tempDir, "root-ca.crt")) assert.NoError(t, err) assert.Equal(t, distinguishingBytes, outBytes[:6], "original file overwritten") assert.Equal(t, certBytes, outBytes[6:], "original file overwritten") } func TestAddCertX509FileStore(t *testing.T) { // Read certificate from file b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Decode PEM block var block *pem.Block block, _ = pem.Decode(b) // Load X509 Certificate cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Create a Store and add the certificate to it store, _ := NewX509FileStore(tempDir) err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } // Retrieve all the certificates certs := store.GetCertificates() // Check to see if certificate is present and total number of certs is correct numCerts := len(certs) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } if certs[0] != cert { t.Fatalf("expected certificates to be the same") } } func TestAddCertFromFileX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") assert.NoError(t, err, "failed to create temporary directory") store, err := NewX509FileStore(tempDir) assert.NoError(t, err, "failed to load x509 filestore") err = store.AddCertFromFile("../fixtures/root-ca.crt") assert.NoError(t, err, "failed to add certificate from file") assert.Len(t, store.GetCertificates(), 1) // Now load the x509 filestore with the same path and expect the same result newStore, err := NewX509FileStore(tempDir) assert.NoError(t, err, "failed to load x509 filestore") assert.Len(t, newStore.GetCertificates(), 1) // Test that adding the same certificate returns an error err = newStore.AddCert(newStore.GetCertificates()[0]) if assert.Error(t, err, "expected error when adding certificate twice") { assert.Equal(t, err, &ErrCertExists{}) } } // TestNewX509FileStoreEmpty verifies the behavior of the Empty function func TestNewX509FileStoreEmpty(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") assert.NoError(t, err) defer os.RemoveAll(tempDir) store, err := NewX509FileStore(tempDir) assert.NoError(t, err) assert.True(t, store.Empty()) err = store.AddCertFromFile("../fixtures/root-ca.crt") assert.NoError(t, err) assert.False(t, store.Empty()) } func TestAddCertFromPEMX509FileStore(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } store, _ := NewX509FileStore(tempDir) err = store.AddCertFromPEM(b) if err != nil { t.Fatalf("failed to load certificate from PEM: %v", err) } numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestRemoveCertX509FileStore(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } store, _ := NewX509FileStore(tempDir) err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } // Number of certificates should be 1 since we added the cert numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // Remove the cert from the store err = store.RemoveCert(cert) if err != nil { t.Fatalf("failed to remove certificate: %v", err) } // Number of certificates should be 0 since we added and removed the cert numCerts = len(store.GetCertificates()) if numCerts != 0 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestRemoveAllX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Add three certificates to store store, _ := NewX509FileStore(tempDir) certFiles := [3]string{"../fixtures/root-ca.crt", "../fixtures/intermediate-ca.crt", "../fixtures/secure.example.com.crt"} for _, file := range certFiles { b, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } } // Number of certificates should be 3 since we added the cert numCerts := len(store.GetCertificates()) if numCerts != 3 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // Remove the cert from the store err = store.RemoveAll() if err != nil { t.Fatalf("failed to remove all certificates: %v", err) } // Number of certificates should be 0 since we added and removed the cert numCerts = len(store.GetCertificates()) if numCerts != 0 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestInexistentGetCertificateByKeyIDX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } store, _ := NewX509FileStore(tempDir) err = store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } _, err = store.GetCertificateByCertID("4d06afd30b8bed131d2a84c97d00b37f422021598bfae34285ce98e77b708b5a") if err == nil { t.Fatalf("no error returned for inexistent certificate") } } func TestGetCertificateByKeyIDX509FileStore(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } store, _ := NewX509FileStore(tempDir) err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate from PEM: %v", err) } keyID, err := FingerprintCert(cert) if err != nil { t.Fatalf("failed to fingerprint the certificate: %v", err) } // Tries to retrieve cert by Subject Key IDs _, err = store.GetCertificateByCertID(keyID) if err != nil { t.Fatalf("expected certificate in store: %s", keyID) } } func TestGetVerifyOpsErrorsWithoutCertsX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Create empty Store store, _ := NewX509FileStore(tempDir) // Try to get VerifyOptions without certs added _, err = store.GetVerifyOptions("example.com") if err == nil { t.Fatalf("expecting an error when getting empty VerifyOptions") } } func TestVerifyLeafCertFromIntermediateX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Create a store and add a root store, _ := NewX509FileStore(tempDir) err = store.AddCertFromFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get the VerifyOptions from our Store opts, err := store.GetVerifyOptions("secure.example.com") // Get leaf certificate b, err := ioutil.ReadFile("../fixtures/secure.example.com.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } // Try to find a valid chain for cert _, err = cert.Verify(opts) if err != nil { t.Fatalf("couldn't find a valid chain for this certificate: %v", err) } } func TestVerifyIntermediateFromRootX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Create a store and add a root store, _ := NewX509FileStore(tempDir) err = store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get the VerifyOptions from our Store opts, err := store.GetVerifyOptions("Notary Testing CA") // Get leaf certificate b, err := ioutil.ReadFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } // Try to find a valid chain for cert _, err = cert.Verify(opts) if err != nil { t.Fatalf("couldn't find a valid chain for this certificate: %v", err) } } func TestNewX509FilteredFileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } store, err := NewX509FilteredFileStore(tempDir, func(cert *x509.Certificate) bool { return cert.IsCA }) if err != nil { t.Fatalf("failed to create new X509FilteredFileStore: %v", err) } // AddCert should succeed because this is a CA being added err = store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // AddCert should fail because this is a leaf cert being added err = store.AddCertFromFile("../fixtures/secure.example.com.crt") if err == nil { t.Fatalf("was expecting non-CA certificate to be rejected") } } func TestGetCertificatePoolX509FileStore(t *testing.T) { tempDir, err := ioutil.TempDir("", "cert-test") if err != nil { t.Fatal(err) } // Create a store and add a root store, _ := NewX509FileStore(tempDir) err = store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } pool := store.GetCertificatePool() numCerts := len(pool.Subjects()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in pool: %d", numCerts) } } notary-0.1/trustmanager/x509memstore.go000066400000000000000000000123561262207326400202250ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "errors" "github.com/Sirupsen/logrus" ) // X509MemStore implements X509Store as an in-memory object with no persistence type X509MemStore struct { validate Validator fingerprintMap map[CertID]*x509.Certificate nameMap map[string][]CertID } // NewX509MemStore returns a new X509MemStore. func NewX509MemStore() *X509MemStore { validate := ValidatorFunc(func(cert *x509.Certificate) bool { return true }) return &X509MemStore{ validate: validate, fingerprintMap: make(map[CertID]*x509.Certificate), nameMap: make(map[string][]CertID), } } // NewX509FilteredMemStore returns a new X509Memstore that validates certificates // that are added. func NewX509FilteredMemStore(validate func(*x509.Certificate) bool) *X509MemStore { s := &X509MemStore{ validate: ValidatorFunc(validate), fingerprintMap: make(map[CertID]*x509.Certificate), nameMap: make(map[string][]CertID), } return s } // AddCert adds a certificate to the store func (s *X509MemStore) AddCert(cert *x509.Certificate) error { if cert == nil { return errors.New("adding nil Certificate to X509Store") } if !s.validate.Validate(cert) { return &ErrCertValidation{} } certID, err := fingerprintCert(cert) if err != nil { return err } logrus.Debug("Adding cert with certID: ", certID) // In this store we overwrite the certificate if it already exists s.fingerprintMap[certID] = cert name := string(cert.RawSubject) s.nameMap[name] = append(s.nameMap[name], certID) return nil } // RemoveCert removes a certificate from a X509MemStore. func (s *X509MemStore) RemoveCert(cert *x509.Certificate) error { if cert == nil { return errors.New("removing nil Certificate to X509Store") } certID, err := fingerprintCert(cert) if err != nil { return err } delete(s.fingerprintMap, certID) name := string(cert.RawSubject) // Filter the fingerprint out of this name entry fpList := s.nameMap[name] newfpList := fpList[:0] for _, x := range fpList { if x != certID { newfpList = append(newfpList, x) } } s.nameMap[name] = newfpList return nil } // RemoveAll removes all the certificates from the store func (s *X509MemStore) RemoveAll() error { for _, cert := range s.fingerprintMap { if err := s.RemoveCert(cert); err != nil { return err } } return nil } // AddCertFromPEM adds a certificate to the store from a PEM blob func (s *X509MemStore) AddCertFromPEM(pemBytes []byte) error { cert, err := LoadCertFromPEM(pemBytes) if err != nil { return err } return s.AddCert(cert) } // AddCertFromFile tries to adds a X509 certificate to the store given a filename func (s *X509MemStore) AddCertFromFile(originFilname string) error { cert, err := LoadCertFromFile(originFilname) if err != nil { return err } return s.AddCert(cert) } // GetCertificates returns an array with all of the current X509 Certificates. func (s *X509MemStore) GetCertificates() []*x509.Certificate { certs := make([]*x509.Certificate, len(s.fingerprintMap)) i := 0 for _, v := range s.fingerprintMap { certs[i] = v i++ } return certs } // GetCertificatePool returns an x509 CertPool loaded with all the certificates // in the store. func (s *X509MemStore) GetCertificatePool() *x509.CertPool { pool := x509.NewCertPool() for _, v := range s.fingerprintMap { pool.AddCert(v) } return pool } // GetCertificateByCertID returns the certificate that matches a certain certID func (s *X509MemStore) GetCertificateByCertID(certID string) (*x509.Certificate, error) { return s.getCertificateByCertID(CertID(certID)) } // getCertificateByCertID returns the certificate that matches a certain certID or error func (s *X509MemStore) getCertificateByCertID(certID CertID) (*x509.Certificate, error) { // If it does not look like a hex encoded sha256 hash, error if len(certID) != 64 { return nil, errors.New("invalid Subject Key Identifier") } // Check to see if this subject key identifier exists if cert, ok := s.fingerprintMap[CertID(certID)]; ok { return cert, nil } return nil, &ErrNoCertificatesFound{query: string(certID)} } // GetCertificatesByCN returns all the certificates that match a specific // CommonName func (s *X509MemStore) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) { var certs []*x509.Certificate if ids, ok := s.nameMap[cn]; ok { for _, v := range ids { cert, err := s.getCertificateByCertID(v) if err != nil { // This error should never happen. This would mean that we have // an inconsistent X509MemStore return nil, err } certs = append(certs, cert) } } if len(certs) == 0 { return nil, &ErrNoCertificatesFound{query: cn} } return certs, nil } // GetVerifyOptions returns VerifyOptions with the certificates within the KeyStore // as part of the roots list. This never allows the use of system roots, returning // an error if there are no root CAs. func (s *X509MemStore) GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) { // If we have no Certificates loaded return error (we don't want to rever to using // system CAs). if len(s.fingerprintMap) == 0 { return x509.VerifyOptions{}, errors.New("no root CAs available") } opts := x509.VerifyOptions{ DNSName: dnsName, Roots: s.GetCertificatePool(), } return opts, nil } notary-0.1/trustmanager/x509memstore_test.go000066400000000000000000000200731262207326400212570ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "encoding/pem" "io/ioutil" "testing" ) func TestAddCert(t *testing.T) { // Read certificate from file b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Decode PEM block var block *pem.Block block, _ = pem.Decode(b) // Load X509 Certificate cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } // Create a Store and add the certificate to it store := NewX509MemStore() err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } // Retrieve all the certificates certs := store.GetCertificates() // Check to see if certificate is present and total number of certs is correct numCerts := len(certs) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } if certs[0] != cert { t.Fatalf("expected certificates to be the same") } } func TestAddCertFromFile(t *testing.T) { store := NewX509MemStore() err := store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestAddCertFromPEM(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } store := NewX509MemStore() err = store.AddCertFromPEM(b) if err != nil { t.Fatalf("failed to load certificate from PEM: %v", err) } numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestRemoveCert(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } store := NewX509MemStore() err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } // Number of certificates should be 1 since we added the cert numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // Remove the cert from the store err = store.RemoveCert(cert) if err != nil { t.Fatalf("failed to remove certificate: %v", err) } // Number of certificates should be 0 since we added and removed the cert numCerts = len(store.GetCertificates()) if numCerts != 0 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestRemoveAllX509MemStore(t *testing.T) { // Add three certificates to store store := NewX509MemStore() certFiles := [3]string{"../fixtures/root-ca.crt", "../fixtures/intermediate-ca.crt", "../fixtures/secure.example.com.crt"} for _, file := range certFiles { b, err := ioutil.ReadFile(file) if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate: %v", err) } } // Number of certificates should be 3 since we added the cert numCerts := len(store.GetCertificates()) if numCerts != 3 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // Remove the cert from the store err := store.RemoveAll() if err != nil { t.Fatalf("failed to remove all certificates: %v", err) } // Number of certificates should be 0 since we added and removed the cert numCerts = len(store.GetCertificates()) if numCerts != 0 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } } func TestInexistentGetCertificateByCertID(t *testing.T) { store := NewX509MemStore() err := store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } _, err = store.GetCertificateByCertID("4d06afd30b8bed131d2a84c97d00b37f422021598bfae34285ce98e77b708b5a") if err == nil { t.Fatalf("no error returned for inexistent certificate") } } func TestGetCertificateByKeyID(t *testing.T) { b, err := ioutil.ReadFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } store := NewX509MemStore() err = store.AddCert(cert) if err != nil { t.Fatalf("failed to load certificate from PEM: %v", err) } certID, err := FingerprintCert(cert) if err != nil { t.Fatalf("failed to fingerprint the certificate: %v", err) } // Tries to retrieve cert by Subject Key IDs _, err = store.GetCertificateByCertID(certID) if err != nil { t.Fatalf("expected certificate in store: %s", certID) } } func TestGetVerifyOpsErrorsWithoutCerts(t *testing.T) { // Create empty Store store := NewX509MemStore() // Try to get VerifyOptions without certs added _, err := store.GetVerifyOptions("example.com") if err == nil { t.Fatalf("expecting an error when getting empty VerifyOptions") } } func TestVerifyLeafCertFromIntermediate(t *testing.T) { // Create a store and add a root store := NewX509MemStore() err := store.AddCertFromFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get the VerifyOptions from our Store opts, err := store.GetVerifyOptions("secure.example.com") // Get leaf certificate b, err := ioutil.ReadFile("../fixtures/secure.example.com.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } // Try to find a valid chain for cert _, err = cert.Verify(opts) if err != nil { t.Fatalf("couldn't find a valid chain for this certificate: %v", err) } } func TestVerifyIntermediateFromRoot(t *testing.T) { // Create a store and add a root store := NewX509MemStore() err := store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get the VerifyOptions from our Store opts, err := store.GetVerifyOptions("Notary Testing CA") // Get leaf certificate b, err := ioutil.ReadFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } var block *pem.Block block, _ = pem.Decode(b) cert, err := x509.ParseCertificate(block.Bytes) if err != nil { t.Fatalf("couldn't parse certificate: %v", err) } // Try to find a valid chain for cert _, err = cert.Verify(opts) if err != nil { t.Fatalf("couldn't find a valid chain for this certificate: %v", err) } } func TestNewX509FilteredMemStore(t *testing.T) { store := NewX509FilteredMemStore(func(cert *x509.Certificate) bool { return cert.IsCA }) // AddCert should succeed because this is a CA being added err := store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } numCerts := len(store.GetCertificates()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in store: %d", numCerts) } // AddCert should fail because this is a leaf cert being added err = store.AddCertFromFile("../fixtures/secure.example.com.crt") if err == nil { t.Fatalf("was expecting non-CA certificate to be rejected") } } func TestGetCertificatePool(t *testing.T) { // Create a store and add a root store := NewX509MemStore() err := store.AddCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } pool := store.GetCertificatePool() numCerts := len(pool.Subjects()) if numCerts != 1 { t.Fatalf("unexpected number of certificates in pool: %d", numCerts) } } notary-0.1/trustmanager/x509store.go000066400000000000000000000107321262207326400175220ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "errors" "fmt" ) const certExtension string = "crt" // ErrNoCertificatesFound is returned when no certificates are found for a // GetCertificatesBy* type ErrNoCertificatesFound struct { query string } // ErrNoCertificatesFound is returned when no certificates are found for a // GetCertificatesBy* func (err ErrNoCertificatesFound) Error() string { return fmt.Sprintf("error, no certificates found in the keystore match: %s", err.query) } // ErrCertValidation is returned when a certificate doesn't pass the store specific // validations type ErrCertValidation struct { } // ErrCertValidation is returned when a certificate doesn't pass the store specific // validations func (err ErrCertValidation) Error() string { return fmt.Sprintf("store-specific certificate validations failed") } // ErrCertExists is returned when a Certificate already exists in the key store type ErrCertExists struct { } // ErrCertExists is returned when a Certificate already exists in the key store func (err ErrCertExists) Error() string { return fmt.Sprintf("certificate already in the store") } // ErrBadCertificateStore is returned when there is an internal inconsistency // in our x509 store type ErrBadCertificateStore struct { } // ErrBadCertificateStore is returned when there is an internal inconsistency // in our x509 store func (err ErrBadCertificateStore) Error() string { return fmt.Sprintf("inconsistent certificate store") } // X509Store is the interface for all X509Stores type X509Store interface { AddCert(cert *x509.Certificate) error AddCertFromPEM(pemCerts []byte) error AddCertFromFile(filename string) error RemoveCert(cert *x509.Certificate) error RemoveAll() error GetCertificateByCertID(certID string) (*x509.Certificate, error) GetCertificatesByCN(cn string) ([]*x509.Certificate, error) GetCertificates() []*x509.Certificate GetCertificatePool() *x509.CertPool GetVerifyOptions(dnsName string) (x509.VerifyOptions, error) } // CertID represent the ID used to identify certificates type CertID string // Validator is a convenience type to create validating function that filters // certificates that get added to the store type Validator interface { Validate(cert *x509.Certificate) bool } // ValidatorFunc is a convenience type to create functions that implement // the Validator interface type ValidatorFunc func(cert *x509.Certificate) bool // Validate implements the Validator interface to allow for any func() bool method // to be passed as a Validator func (vf ValidatorFunc) Validate(cert *x509.Certificate) bool { return vf(cert) } // Verify operates on an X509Store and validates the existence of a chain of trust // between a leafCertificate and a CA present inside of the X509 Store. // It requires at least two certificates in certList, a leaf Certificate and an // intermediate CA certificate. func Verify(s X509Store, dnsName string, certList []*x509.Certificate) error { // If we have no Certificates loaded return error (we don't want to revert to using // system CAs). if len(s.GetCertificates()) == 0 { return errors.New("no root CAs available") } // At a minimum we should be provided a leaf cert and an intermediate. if len(certList) < 2 { return errors.New("certificate and at least one intermediate needed") } // Get the VerifyOptions from the keystore for a base dnsName opts, err := s.GetVerifyOptions(dnsName) if err != nil { return err } // Create a Certificate Pool for our intermediate certificates intPool := x509.NewCertPool() var leafCert *x509.Certificate // Iterate through all the certificates for _, c := range certList { // If the cert is a CA, we add it to the intermediates pool. If not, we call // it the leaf cert if c.IsCA { intPool.AddCert(c) continue } // Certificate is not a CA, it must be our leaf certificate. // If we already found one, bail with error if leafCert != nil { return errors.New("more than one leaf certificate found") } leafCert = c } // We exited the loop with no leaf certificates if leafCert == nil { return errors.New("no leaf certificates found") } // We have one leaf certificate and at least one intermediate. Lets add this // Cert Pool as the Intermediates list on our VerifyOptions opts.Intermediates = intPool // Finally, let's call Verify on our leafCert with our fully configured options chains, err := leafCert.Verify(opts) if len(chains) == 0 || err != nil { return fmt.Errorf("certificate verification failed: %v", err) } return nil } notary-0.1/trustmanager/x509store_test.go000066400000000000000000000073501262207326400205630ustar00rootroot00000000000000package trustmanager import ( "crypto/x509" "testing" ) func TestVerifyLeafSuccessfully(t *testing.T) { // Get root certificate rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get intermediate certificate intermediateCA, err := LoadCertFromFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get leaf certificate leafCert, err := LoadCertFromFile("../fixtures/secure.example.com.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Create a store and add the CA root store := NewX509MemStore() err = store.AddCert(rootCA) if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get our certList with Leaf Cert and Intermediate certList := []*x509.Certificate{leafCert, intermediateCA} // Try to find a valid chain for cert err = Verify(store, "secure.example.com", certList) if err != nil { t.Fatalf("expected to find a valid chain for this certificate: %v", err) } } func TestVerifyLeafSuccessfullyWithMultipleIntermediates(t *testing.T) { // Get root certificate rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get intermediate certificate intermediateCA, err := LoadCertFromFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get leaf certificate leafCert, err := LoadCertFromFile("../fixtures/secure.example.com.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Create a store and add the CA root store := NewX509MemStore() err = store.AddCert(rootCA) if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get our certList with Leaf Cert and Intermediate certList := []*x509.Certificate{leafCert, intermediateCA, intermediateCA, rootCA} // Try to find a valid chain for cert err = Verify(store, "secure.example.com", certList) if err != nil { t.Fatalf("expected to find a valid chain for this certificate: %v", err) } } func TestVerifyLeafWithNoIntermediate(t *testing.T) { // Get root certificate rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get leaf certificate leafCert, err := LoadCertFromFile("../fixtures/secure.example.com.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Create a store and add the CA root store := NewX509MemStore() err = store.AddCert(rootCA) if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get our certList with Leaf Cert and Intermediate certList := []*x509.Certificate{leafCert, leafCert} // Try to find a valid chain for cert err = Verify(store, "secure.example.com", certList) if err == nil { t.Fatalf("expected error due to more than one leaf certificate") } } func TestVerifyLeafWithNoLeaf(t *testing.T) { // Get root certificate rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Get intermediate certificate intermediateCA, err := LoadCertFromFile("../fixtures/intermediate-ca.crt") if err != nil { t.Fatalf("couldn't load fixture: %v", err) } // Create a store and add the CA root store := NewX509MemStore() err = store.AddCert(rootCA) if err != nil { t.Fatalf("failed to load certificate from file: %v", err) } // Get our certList with Leaf Cert and Intermediate certList := []*x509.Certificate{intermediateCA, intermediateCA} // Try to find a valid chain for cert err = Verify(store, "secure.example.com", certList) if err == nil { t.Fatalf("expected error due to no leafs provided") } } notary-0.1/trustmanager/x509utils.go000066400000000000000000000346131262207326400175320ustar00rootroot00000000000000package trustmanager import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "errors" "fmt" "io" "io/ioutil" "math/big" "net/http" "net/url" "path/filepath" "time" "github.com/Sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/notary/tuf/data" ) // GetCertFromURL tries to get a X509 certificate given a HTTPS URL func GetCertFromURL(urlStr string) (*x509.Certificate, error) { url, err := url.Parse(urlStr) if err != nil { return nil, err } // Check if we are adding via HTTPS if url.Scheme != "https" { return nil, errors.New("only HTTPS URLs allowed") } // Download the certificate and write to directory resp, err := http.Get(url.String()) if err != nil { return nil, err } // Copy the content to certBytes defer resp.Body.Close() certBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } // Try to extract the first valid PEM certificate from the bytes cert, err := LoadCertFromPEM(certBytes) if err != nil { return nil, err } return cert, nil } // CertToPEM is an utility function returns a PEM encoded x509 Certificate func CertToPEM(cert *x509.Certificate) []byte { pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) return pemCert } // LoadCertFromPEM returns the first certificate found in a bunch of bytes or error // if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85. func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) { for len(pemBytes) > 0 { var block *pem.Block block, pemBytes = pem.Decode(pemBytes) if block == nil { return nil, errors.New("no certificates found in PEM data") } if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { continue } cert, err := x509.ParseCertificate(block.Bytes) if err != nil { continue } return cert, nil } return nil, errors.New("no certificates found in PEM data") } // FingerprintCert returns a TUF compliant fingerprint for a X509 Certificate func FingerprintCert(cert *x509.Certificate) (string, error) { certID, err := fingerprintCert(cert) if err != nil { return "", err } return string(certID), nil } func fingerprintCert(cert *x509.Certificate) (CertID, error) { block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} pemdata := pem.EncodeToMemory(&block) var tufKey data.PublicKey switch cert.PublicKeyAlgorithm { case x509.RSA: tufKey = data.NewRSAx509PublicKey(pemdata) case x509.ECDSA: tufKey = data.NewECDSAx509PublicKey(pemdata) default: return "", fmt.Errorf("got Unknown key type while fingerprinting certificate") } return CertID(tufKey.ID()), nil } // loadCertsFromDir receives a store AddCertFromFile for each certificate found func loadCertsFromDir(s *X509FileStore) error { certFiles := s.fileStore.ListFiles() for _, f := range certFiles { // ListFiles returns relative paths fullPath := filepath.Join(s.fileStore.BaseDir(), f) err := s.AddCertFromFile(fullPath) if err != nil { if _, ok := err.(*ErrCertValidation); ok { logrus.Debugf("ignoring certificate, did not pass validation: %s", f) continue } if _, ok := err.(*ErrCertExists); ok { logrus.Debugf("ignoring certificate, already exists in the store: %s", f) continue } return err } } return nil } // LoadCertFromFile loads the first certificate from the file provided. The // data is expected to be PEM Encoded and contain one of more certificates // with PEM type "CERTIFICATE" func LoadCertFromFile(filename string) (*x509.Certificate, error) { certs, err := LoadCertBundleFromFile(filename) if err != nil { return nil, err } return certs[0], nil } // LoadCertBundleFromFile loads certificates from the []byte provided. The // data is expected to be PEM Encoded and contain one of more certificates // with PEM type "CERTIFICATE" func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) { b, err := ioutil.ReadFile(filename) if err != nil { return nil, err } return LoadCertBundleFromPEM(b) } // LoadCertBundleFromPEM loads certificates from the []byte provided. The // data is expected to be PEM Encoded and contain one of more certificates // with PEM type "CERTIFICATE" func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { certificates := []*x509.Certificate{} var block *pem.Block block, pemBytes = pem.Decode(pemBytes) for ; block != nil; block, pemBytes = pem.Decode(pemBytes) { if block.Type == "CERTIFICATE" { cert, err := x509.ParseCertificate(block.Bytes) if err != nil { return nil, err } certificates = append(certificates, cert) } else { return nil, fmt.Errorf("invalid pem block type: %s", block.Type) } } if len(certificates) == 0 { return nil, fmt.Errorf("no valid certificates found") } return certificates, nil } // GetLeafCerts parses a list of x509 Certificates and returns all of them // that aren't CA func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate { var leafCerts []*x509.Certificate for _, cert := range certs { if cert.IsCA { continue } leafCerts = append(leafCerts, cert) } return leafCerts } // GetIntermediateCerts parses a list of x509 Certificates and returns all of the // ones marked as a CA, to be used as intermediates func GetIntermediateCerts(certs []*x509.Certificate) (intCerts []*x509.Certificate) { for _, cert := range certs { if cert.IsCA { intCerts = append(intCerts, cert) } } return intCerts } // ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It // only supports RSA (PKCS#1) and attempts to decrypt using the passphrase, if encrypted. func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) { block, _ := pem.Decode(pemBytes) if block == nil { return nil, errors.New("no valid private key found") } switch block.Type { case "RSA PRIVATE KEY": var privKeyBytes []byte var err error if x509.IsEncryptedPEMBlock(block) { privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) if err != nil { return nil, errors.New("could not decrypt private key") } } else { privKeyBytes = block.Bytes } rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes) if err != nil { return nil, fmt.Errorf("could not parse DER encoded key: %v", err) } tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey) if err != nil { return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err) } return tufRSAPrivateKey, nil case "EC PRIVATE KEY": var privKeyBytes []byte var err error if x509.IsEncryptedPEMBlock(block) { privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) if err != nil { return nil, errors.New("could not decrypt private key") } } else { privKeyBytes = block.Bytes } ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes) if err != nil { return nil, fmt.Errorf("could not parse DER encoded private key: %v", err) } tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey) if err != nil { return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) } return tufECDSAPrivateKey, nil case "ED25519 PRIVATE KEY": // We serialize ED25519 keys by concatenating the private key // to the public key and encoding with PEM. See the // ED25519ToPrivateKey function. var privKeyBytes []byte var err error if x509.IsEncryptedPEMBlock(block) { privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase)) if err != nil { return nil, errors.New("could not decrypt private key") } } else { privKeyBytes = block.Bytes } tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes) if err != nil { return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err) } return tufECDSAPrivateKey, nil default: return nil, fmt.Errorf("unsupported key type %q", block.Type) } } // GenerateRSAKey generates an RSA private key and returns a TUF PrivateKey func GenerateRSAKey(random io.Reader, bits int) (data.PrivateKey, error) { rsaPrivKey, err := rsa.GenerateKey(random, bits) if err != nil { return nil, fmt.Errorf("could not generate private key: %v", err) } tufPrivKey, err := RSAToPrivateKey(rsaPrivKey) if err != nil { return nil, err } logrus.Debugf("generated RSA key with keyID: %s", tufPrivKey.ID()) return tufPrivKey, nil } // RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) { // Get a DER-encoded representation of the PublicKey rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey) if err != nil { return nil, fmt.Errorf("failed to marshal public key: %v", err) } // Get a DER-encoded representation of the PrivateKey rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey) pubKey := data.NewRSAPublicKey(rsaPubBytes) return data.NewRSAPrivateKey(pubKey, rsaPrivBytes) } // GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) { ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random) if err != nil { return nil, err } tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey) if err != nil { return nil, err } logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID()) return tufPrivKey, nil } // GenerateED25519Key generates an ED25519 private key and returns a TUF // PrivateKey. The serialization format we use is just the public key bytes // followed by the private key bytes func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) { pub, priv, err := ed25519.GenerateKey(random) if err != nil { return nil, err } var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte copy(serialized[:], pub[:]) copy(serialized[ed25519.PublicKeySize:], priv[:]) tufPrivKey, err := ED25519ToPrivateKey(serialized[:]) if err != nil { return nil, err } logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID()) return tufPrivKey, nil } // ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) { // Get a DER-encoded representation of the PublicKey ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey) if err != nil { return nil, fmt.Errorf("failed to marshal public key: %v", err) } // Get a DER-encoded representation of the PrivateKey ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey) if err != nil { return nil, fmt.Errorf("failed to marshal private key: %v", err) } pubKey := data.NewECDSAPublicKey(ecdsaPubBytes) return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes) } // ED25519ToPrivateKey converts a serialized ED25519 key to a TUF // data.PrivateKey type func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) { if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize { return nil, errors.New("malformed ed25519 private key") } pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize]) return data.NewED25519PrivateKey(*pubKey, privKeyBytes) } func blockType(k data.PrivateKey) (string, error) { switch k.Algorithm() { case data.RSAKey, data.RSAx509Key: return "RSA PRIVATE KEY", nil case data.ECDSAKey, data.ECDSAx509Key: return "EC PRIVATE KEY", nil case data.ED25519Key: return "ED25519 PRIVATE KEY", nil default: return "", fmt.Errorf("algorithm %s not supported", k.Algorithm()) } } // KeyToPEM returns a PEM encoded key from a Private Key func KeyToPEM(privKey data.PrivateKey) ([]byte, error) { bt, err := blockType(privKey) if err != nil { return nil, err } return pem.EncodeToMemory(&pem.Block{Type: bt, Bytes: privKey.Private()}), nil } // EncryptPrivateKey returns an encrypted PEM key given a Privatekey // and a passphrase func EncryptPrivateKey(key data.PrivateKey, passphrase string) ([]byte, error) { bt, err := blockType(key) if err != nil { return nil, err } password := []byte(passphrase) cipherType := x509.PEMCipherAES256 encryptedPEMBlock, err := x509.EncryptPEMBlock(rand.Reader, bt, key.Private(), password, cipherType) if err != nil { return nil, err } return pem.EncodeToMemory(encryptedPEMBlock), nil } // CertToKey transforms a single input certificate into its corresponding // PublicKey func CertToKey(cert *x509.Certificate) data.PublicKey { block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} pemdata := pem.EncodeToMemory(&block) switch cert.PublicKeyAlgorithm { case x509.RSA: return data.NewRSAx509PublicKey(pemdata) case x509.ECDSA: return data.NewECDSAx509PublicKey(pemdata) default: logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm) return nil } } // CertsToKeys transforms each of the input certificates into it's corresponding // PublicKey func CertsToKeys(certs []*x509.Certificate) map[string]data.PublicKey { keys := make(map[string]data.PublicKey) for _, cert := range certs { newKey := CertToKey(cert) keys[newKey.ID()] = newKey } return keys } // NewCertificate returns an X509 Certificate following a template, given a GUN. func NewCertificate(gun string) (*x509.Certificate, error) { notBefore := time.Now() // Certificates will expire in 10 years notAfter := notBefore.Add(time.Hour * 24 * 365 * 10) serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, fmt.Errorf("failed to generate new certificate: %v", err) } return &x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ CommonName: gun, }, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning}, BasicConstraintsValid: true, }, nil } // X509PublicKeyID returns a public key ID as a string, given a // data.PublicKey that contains an X509 Certificate func X509PublicKeyID(certPubKey data.PublicKey) (string, error) { cert, err := LoadCertFromPEM(certPubKey.Public()) if err != nil { return "", err } pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) if err != nil { return "", err } var key data.PublicKey switch certPubKey.Algorithm() { case data.ECDSAx509Key: key = data.NewECDSAPublicKey(pubKeyBytes) case data.RSAx509Key: key = data.NewRSAPublicKey(pubKeyBytes) } return key.ID(), nil } notary-0.1/trustmanager/x509utils_test.go000066400000000000000000000141141262207326400205630ustar00rootroot00000000000000package trustmanager import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/x509" "io/ioutil" "strings" "testing" "time" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) func TestCertsToKeys(t *testing.T) { // Get root certificate rootCA, err := LoadCertFromFile("../fixtures/root-ca.crt") assert.NoError(t, err) // Get intermediate certificate intermediateCA, err := LoadCertFromFile("../fixtures/intermediate-ca.crt") assert.NoError(t, err) // Get leaf certificate leafCert, err := LoadCertFromFile("../fixtures/secure.example.com.crt") assert.NoError(t, err) // Get our certList with Leaf Cert and Intermediate certList := []*x509.Certificate{leafCert, intermediateCA, rootCA} // Call CertsToKEys keys := CertsToKeys(certList) assert.NotNil(t, keys) assert.Len(t, keys, 3) // Call GetLeafCerts newKeys := GetLeafCerts(certList) assert.NotNil(t, newKeys) assert.Len(t, newKeys, 1) // Call GetIntermediateCerts (checks for certs with IsCA true) newKeys = GetIntermediateCerts(certList) assert.NotNil(t, newKeys) assert.Len(t, newKeys, 2) } func TestNewCertificate(t *testing.T) { cert, err := NewCertificate("docker.com/alpine") assert.NoError(t, err) assert.Equal(t, cert.Subject.CommonName, "docker.com/alpine") assert.True(t, time.Now().Before(cert.NotAfter)) assert.True(t, time.Now().AddDate(10, 0, 1).After(cert.NotAfter)) } func TestKeyOperations(t *testing.T) { // Generate our ED25519 private key edKey, err := GenerateED25519Key(rand.Reader) assert.NoError(t, err) // Generate our EC private key ecKey, err := GenerateECDSAKey(rand.Reader) assert.NoError(t, err) // Generate our RSA private key rsaKey, err := GenerateRSAKey(rand.Reader, 512) // Encode our ED private key edPEM, err := KeyToPEM(edKey) assert.NoError(t, err) // Encode our EC private key ecPEM, err := KeyToPEM(ecKey) assert.NoError(t, err) // Encode our RSA private key rsaPEM, err := KeyToPEM(rsaKey) assert.NoError(t, err) // Check to see if ED key it is encoded stringEncodedEDKey := string(edPEM) assert.True(t, strings.Contains(stringEncodedEDKey, "-----BEGIN ED25519 PRIVATE KEY-----")) // Check to see if EC key it is encoded stringEncodedECKey := string(ecPEM) assert.True(t, strings.Contains(stringEncodedECKey, "-----BEGIN EC PRIVATE KEY-----")) // Check to see if RSA key it is encoded stringEncodedRSAKey := string(rsaPEM) assert.True(t, strings.Contains(stringEncodedRSAKey, "-----BEGIN RSA PRIVATE KEY-----")) // Decode our ED Key decodedEDKey, err := ParsePEMPrivateKey(edPEM, "") assert.NoError(t, err) assert.Equal(t, edKey.Private(), decodedEDKey.Private()) // Decode our EC Key decodedECKey, err := ParsePEMPrivateKey(ecPEM, "") assert.NoError(t, err) assert.Equal(t, ecKey.Private(), decodedECKey.Private()) // Decode our RSA Key decodedRSAKey, err := ParsePEMPrivateKey(rsaPEM, "") assert.NoError(t, err) assert.Equal(t, rsaKey.Private(), decodedRSAKey.Private()) // Encrypt our ED Key encryptedEDKey, err := EncryptPrivateKey(edKey, "ponies") assert.NoError(t, err) // Encrypt our EC Key encryptedECKey, err := EncryptPrivateKey(ecKey, "ponies") assert.NoError(t, err) // Encrypt our RSA Key encryptedRSAKey, err := EncryptPrivateKey(rsaKey, "ponies") assert.NoError(t, err) // Check to see if ED key it is encrypted stringEncryptedEDKey := string(encryptedEDKey) assert.True(t, strings.Contains(stringEncryptedEDKey, "-----BEGIN ED25519 PRIVATE KEY-----")) assert.True(t, strings.Contains(stringEncryptedEDKey, "Proc-Type: 4,ENCRYPTED")) // Check to see if EC key it is encrypted stringEncryptedECKey := string(encryptedECKey) assert.True(t, strings.Contains(stringEncryptedECKey, "-----BEGIN EC PRIVATE KEY-----")) assert.True(t, strings.Contains(stringEncryptedECKey, "Proc-Type: 4,ENCRYPTED")) // Check to see if RSA key it is encrypted stringEncryptedRSAKey := string(encryptedRSAKey) assert.True(t, strings.Contains(stringEncryptedRSAKey, "-----BEGIN RSA PRIVATE KEY-----")) assert.True(t, strings.Contains(stringEncryptedRSAKey, "Proc-Type: 4,ENCRYPTED")) // Decrypt our ED Key decryptedEDKey, err := ParsePEMPrivateKey(encryptedEDKey, "ponies") assert.NoError(t, err) assert.Equal(t, edKey.Private(), decryptedEDKey.Private()) // Decrypt our EC Key decryptedECKey, err := ParsePEMPrivateKey(encryptedECKey, "ponies") assert.NoError(t, err) assert.Equal(t, ecKey.Private(), decryptedECKey.Private()) // Decrypt our RSA Key decryptedRSAKey, err := ParsePEMPrivateKey(encryptedRSAKey, "ponies") assert.NoError(t, err) assert.Equal(t, rsaKey.Private(), decryptedRSAKey.Private()) } // X509PublickeyID returns the public key ID of a RSA X509 key rather than the // cert ID func TestRSAX509PublickeyID(t *testing.T) { fileBytes, err := ioutil.ReadFile("../fixtures/notary-server.key") assert.NoError(t, err) privKey, err := ParsePEMPrivateKey(fileBytes, "") assert.NoError(t, err) expectedTufID := privKey.ID() cert, err := LoadCertFromFile("../fixtures/notary-server.crt") assert.NoError(t, err) rsaKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey) assert.NoError(t, err) sameWayTufID := data.NewPublicKey(data.RSAKey, rsaKeyBytes).ID() actualTufKey := CertToKey(cert) actualTufID, err := X509PublicKeyID(actualTufKey) assert.Equal(t, sameWayTufID, actualTufID) assert.Equal(t, expectedTufID, actualTufID) } // X509PublickeyID returns the public key ID of an ECDSA X509 key rather than // the cert ID func TestECDSAX509PublickeyID(t *testing.T) { template, err := NewCertificate("something") assert.NoError(t, err) template.SignatureAlgorithm = x509.ECDSAWithSHA256 template.PublicKeyAlgorithm = x509.ECDSA privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) assert.NoError(t, err) tufPrivKey, err := ECDSAToPrivateKey(privKey) assert.NoError(t, err) derBytes, err := x509.CreateCertificate( rand.Reader, template, template, &privKey.PublicKey, privKey) assert.NoError(t, err) cert, err := x509.ParseCertificate(derBytes) assert.NoError(t, err) tufKey := CertToKey(cert) tufID, err := X509PublicKeyID(tufKey) assert.NoError(t, err) assert.Equal(t, tufPrivKey.ID(), tufID) } notary-0.1/trustmanager/yubikey/000077500000000000000000000000001262207326400170675ustar00rootroot00000000000000notary-0.1/trustmanager/yubikey/non_pkcs11.go000066400000000000000000000004651262207326400213770ustar00rootroot00000000000000// go list ./... and go test ./... will not pick up this package without this // file, because go ? ./... does not honor build tags. // e.g. "go list -tags pkcs11 ./..." will not list this package if all the // files in it have a build tag. // See https://github.com/golang/go/issues/11246 package yubikey notary-0.1/trustmanager/yubikey/pkcs11_darwin.go000066400000000000000000000003231262207326400220620ustar00rootroot00000000000000// +build pkcs11,darwin package yubikey var possiblePkcs11Libs = []string{ "/usr/local/lib/libykcs11.dylib", "/usr/local/docker/lib/libykcs11.dylib", "/usr/local/docker-experimental/lib/libykcs11.dylib", } notary-0.1/trustmanager/yubikey/pkcs11_interface.go000066400000000000000000000026671262207326400225530ustar00rootroot00000000000000// +build pkcs11 // an interface around the pkcs11 library, so that things can be mocked out // for testing package yubikey import "github.com/miekg/pkcs11" // IPKCS11 is an interface for wrapping github.com/miekg/pkcs11 type pkcs11LibLoader func(module string) IPKCS11Ctx func defaultLoader(module string) IPKCS11Ctx { return pkcs11.New(module) } // IPKCS11Ctx is an interface for wrapping the parts of // github.com/miekg/pkcs11.Ctx that yubikeystore requires type IPKCS11Ctx interface { Destroy() Initialize() error Finalize() error GetSlotList(tokenPresent bool) ([]uint, error) OpenSession(slotID uint, flags uint) (pkcs11.SessionHandle, error) CloseSession(sh pkcs11.SessionHandle) error Login(sh pkcs11.SessionHandle, userType uint, pin string) error Logout(sh pkcs11.SessionHandle) error CreateObject(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) ( pkcs11.ObjectHandle, error) DestroyObject(sh pkcs11.SessionHandle, oh pkcs11.ObjectHandle) error GetAttributeValue(sh pkcs11.SessionHandle, o pkcs11.ObjectHandle, a []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) FindObjectsInit(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) error FindObjects(sh pkcs11.SessionHandle, max int) ( []pkcs11.ObjectHandle, bool, error) FindObjectsFinal(sh pkcs11.SessionHandle) error SignInit(sh pkcs11.SessionHandle, m []*pkcs11.Mechanism, o pkcs11.ObjectHandle) error Sign(sh pkcs11.SessionHandle, message []byte) ([]byte, error) } notary-0.1/trustmanager/yubikey/pkcs11_linux.go000066400000000000000000000002631262207326400217400ustar00rootroot00000000000000// +build pkcs11,linux package yubikey var possiblePkcs11Libs = []string{ "/usr/lib/libykcs11.so", "/usr/lib/x86_64-linux-gnu/libykcs11.so", "/usr/local/lib/libykcs11.so", } notary-0.1/trustmanager/yubikey/yubikeystore.go000066400000000000000000000606651262207326400221710ustar00rootroot00000000000000// +build pkcs11 package yubikey import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/sha256" "crypto/x509" "errors" "fmt" "io" "math/big" "os" "github.com/Sirupsen/logrus" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/miekg/pkcs11" ) const ( USER_PIN = "123456" SO_USER_PIN = "010203040506070801020304050607080102030405060708" numSlots = 4 // number of slots in the yubikey KeymodeNone = 0 KeymodeTouch = 1 // touch enabled KeymodePinOnce = 2 // require pin entry once KeymodePinAlways = 4 // require pin entry all the time // the key size, when importing a key into yubikey, MUST be 32 bytes ecdsaPrivateKeySize = 32 sigAttempts = 5 ) // what key mode to use when generating keys var ( yubikeyKeymode = KeymodeTouch | KeymodePinOnce // order in which to prefer token locations on the yubikey. // corresponds to: 9c, 9e, 9d, 9a slotIDs = []int{2, 1, 3, 0} ) // SetYubikeyKeyMode - sets the mode when generating yubikey keys. // This is to be used for testing. It does nothing if not building with tag // pkcs11. func SetYubikeyKeyMode(keyMode int) error { // technically 7 (1 | 2 | 4) is valid, but KeymodePinOnce + // KeymdoePinAlways don't really make sense together if keyMode < 0 || keyMode > 5 { return errors.New("Invalid key mode") } yubikeyKeymode = keyMode return nil } // SetTouchToSignUI - allows configurable UX for notifying a user that they // need to touch the yubikey to sign. The callback may be used to provide a // mechanism for updating a GUI (such as removing a modal) after the touch // has been made func SetTouchToSignUI(notifier func(), callback func()) { touchToSignUI = notifier if callback != nil { touchDoneCallback = callback } } var touchToSignUI = func() { fmt.Println("Please touch the attached Yubikey to perform signing.") } var touchDoneCallback = func() { // noop } var pkcs11Lib string func init() { for _, loc := range possiblePkcs11Libs { _, err := os.Stat(loc) if err == nil { p := pkcs11.New(loc) if p != nil { pkcs11Lib = loc return } } } } type ErrBackupFailed struct { err string } func (err ErrBackupFailed) Error() string { return fmt.Sprintf("Failed to backup private key to: %s", err.err) } type yubiSlot struct { role string slotID []byte } // YubiPrivateKey represents a private key inside of a yubikey type YubiPrivateKey struct { data.ECDSAPublicKey passRetriever passphrase.Retriever slot []byte libLoader pkcs11LibLoader } type YubikeySigner struct { YubiPrivateKey } func NewYubiPrivateKey(slot []byte, pubKey data.ECDSAPublicKey, passRetriever passphrase.Retriever) *YubiPrivateKey { return &YubiPrivateKey{ ECDSAPublicKey: pubKey, passRetriever: passRetriever, slot: slot, libLoader: defaultLoader, } } func (ys *YubikeySigner) Public() crypto.PublicKey { publicKey, err := x509.ParsePKIXPublicKey(ys.YubiPrivateKey.Public()) if err != nil { return nil } return publicKey } func (y *YubiPrivateKey) setLibLoader(loader pkcs11LibLoader) { y.libLoader = loader } // CryptoSigner returns a crypto.Signer tha wraps the YubiPrivateKey. Needed for // Certificate generation only func (y *YubiPrivateKey) CryptoSigner() crypto.Signer { return &YubikeySigner{YubiPrivateKey: *y} } // Private is not implemented in hardware keys func (y *YubiPrivateKey) Private() []byte { // We cannot return the private material from a Yubikey // TODO(david): We probably want to return an error here return nil } func (y YubiPrivateKey) SignatureAlgorithm() data.SigAlgorithm { return data.ECDSASignature } func (y *YubiPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { ctx, session, err := SetupHSMEnv(pkcs11Lib, y.libLoader) if err != nil { return nil, err } defer cleanup(ctx, session) v := signed.Verifiers[data.ECDSASignature] for i := 0; i < sigAttempts; i++ { sig, err := sign(ctx, session, y.slot, y.passRetriever, msg) if err != nil { return nil, fmt.Errorf("failed to sign using Yubikey: %v", err) } if err := v.Verify(&y.ECDSAPublicKey, sig, msg); err == nil { return sig, nil } } return nil, errors.New("Failed to generate signature on Yubikey.") } // If a byte array is less than the number of bytes specified by // ecdsaPrivateKeySize, left-zero-pad the byte array until // it is the required size. func ensurePrivateKeySize(payload []byte) []byte { final := payload if len(payload) < ecdsaPrivateKeySize { final = make([]byte, ecdsaPrivateKeySize) copy(final[ecdsaPrivateKeySize-len(payload):], payload) } return final } // addECDSAKey adds a key to the yubikey func addECDSAKey( ctx IPKCS11Ctx, session pkcs11.SessionHandle, privKey data.PrivateKey, pkcs11KeyID []byte, passRetriever passphrase.Retriever, role string, ) error { logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID()) err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SO_USER_PIN) if err != nil { return err } defer ctx.Logout(session) // Create an ecdsa.PrivateKey out of the private key bytes ecdsaPrivKey, err := x509.ParseECPrivateKey(privKey.Private()) if err != nil { return err } ecdsaPrivKeyD := ensurePrivateKeySize(ecdsaPrivKey.D.Bytes()) template, err := trustmanager.NewCertificate(role) if err != nil { return fmt.Errorf("failed to create the certificate template: %v", err) } certBytes, err := x509.CreateCertificate(rand.Reader, template, template, ecdsaPrivKey.Public(), ecdsaPrivKey) if err != nil { return fmt.Errorf("failed to create the certificate: %v", err) } certTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), pkcs11.NewAttribute(pkcs11.CKA_VALUE, certBytes), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), } privateKeyTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY), pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}), pkcs11.NewAttribute(pkcs11.CKA_VALUE, ecdsaPrivKeyD), pkcs11.NewAttribute(pkcs11.CKA_VENDOR_DEFINED, yubikeyKeymode), } _, err = ctx.CreateObject(session, certTemplate) if err != nil { return fmt.Errorf("error importing: %v", err) } _, err = ctx.CreateObject(session, privateKeyTemplate) if err != nil { return fmt.Errorf("error importing: %v", err) } return nil } func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte) (*data.ECDSAPublicKey, string, error) { findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY), } attrTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, []byte{0}), pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, []byte{0}), pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0}), } if err := ctx.FindObjectsInit(session, findTemplate); err != nil { logrus.Debugf("Failed to init: %s", err.Error()) return nil, "", err } obj, _, err := ctx.FindObjects(session, 1) if err != nil { logrus.Debugf("Failed to find objects: %v", err) return nil, "", err } if err := ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize: %s", err.Error()) return nil, "", err } if len(obj) != 1 { logrus.Debugf("should have found one object") return nil, "", errors.New("no matching keys found inside of yubikey") } // Retrieve the public-key material to be able to create a new ECSAKey attr, err := ctx.GetAttributeValue(session, obj[0], attrTemplate) if err != nil { logrus.Debugf("Failed to get Attribute for: %v", obj[0]) return nil, "", err } // Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues. var rawPubKey []byte for _, a := range attr { if a.Type == pkcs11.CKA_EC_POINT { rawPubKey = a.Value } } ecdsaPubKey := ecdsa.PublicKey{Curve: elliptic.P256(), X: new(big.Int).SetBytes(rawPubKey[3:35]), Y: new(big.Int).SetBytes(rawPubKey[35:])} pubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPubKey) if err != nil { logrus.Debugf("Failed to Marshal public key") return nil, "", err } return data.NewECDSAPublicKey(pubBytes), data.CanonicalRootRole, nil } // Sign returns a signature for a given signature request func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, payload []byte) ([]byte, error) { err := login(ctx, session, passRetriever, pkcs11.CKU_USER, USER_PIN) if err != nil { return nil, fmt.Errorf("error logging in: %v", err) } defer ctx.Logout(session) // Define the ECDSA Private key template class := pkcs11.CKO_PRIVATE_KEY privateKeyTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_CLASS, class), pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), } if err := ctx.FindObjectsInit(session, privateKeyTemplate); err != nil { logrus.Debugf("Failed to init find objects: %s", err.Error()) return nil, err } obj, _, err := ctx.FindObjects(session, 1) if err != nil { logrus.Debugf("Failed to find objects: %v", err) return nil, err } if err = ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize find objects: %s", err.Error()) return nil, err } if len(obj) != 1 { return nil, errors.New("length of objects found not 1") } var sig []byte err = ctx.SignInit( session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_ECDSA, nil)}, obj[0]) if err != nil { return nil, err } // Get the SHA256 of the payload digest := sha256.Sum256(payload) if (yubikeyKeymode & KeymodeTouch) > 0 { touchToSignUI() defer touchDoneCallback() } // a call to Sign, whether or not Sign fails, will clear the SignInit sig, err = ctx.Sign(session, digest[:]) if err != nil { logrus.Debugf("Error while signing: %s", err) return nil, err } if sig == nil { return nil, errors.New("Failed to create signature") } return sig[:], nil } func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever passphrase.Retriever, keyID string) error { err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SO_USER_PIN) if err != nil { return err } defer ctx.Logout(session) template := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), //pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY), pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), } if err := ctx.FindObjectsInit(session, template); err != nil { logrus.Debugf("Failed to init find objects: %s", err.Error()) return err } obj, b, err := ctx.FindObjects(session, 1) if err != nil { logrus.Debugf("Failed to find objects: %s %v", err.Error(), b) return err } if err := ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize find objects: %s", err.Error()) return err } if len(obj) != 1 { logrus.Debugf("should have found exactly one object") return err } // Delete the certificate err = ctx.DestroyObject(session, obj[0]) if err != nil { logrus.Debugf("Failed to delete cert") return err } return nil } func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string]yubiSlot, err error) { keys = make(map[string]yubiSlot) findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), //pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID), pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE), } attrTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}), pkcs11.NewAttribute(pkcs11.CKA_VALUE, []byte{0}), } if err = ctx.FindObjectsInit(session, findTemplate); err != nil { logrus.Debugf("Failed to init: %s", err.Error()) return } objs, b, err := ctx.FindObjects(session, numSlots) for err == nil { var o []pkcs11.ObjectHandle o, b, err = ctx.FindObjects(session, numSlots) if err != nil { continue } if len(o) == 0 { break } objs = append(objs, o...) } if err != nil { logrus.Debugf("Failed to find: %s %v", err.Error(), b) if len(objs) == 0 { return nil, err } } if err = ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize: %s", err.Error()) return } if len(objs) == 0 { return nil, errors.New("No keys found in yubikey.") } logrus.Debugf("Found %d objects matching list filters", len(objs)) for _, obj := range objs { var ( cert *x509.Certificate slot []byte ) // Retrieve the public-key material to be able to create a new ECDSA attr, err := ctx.GetAttributeValue(session, obj, attrTemplate) if err != nil { logrus.Debugf("Failed to get Attribute for: %v", obj) continue } // Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues. for _, a := range attr { if a.Type == pkcs11.CKA_ID { slot = a.Value } if a.Type == pkcs11.CKA_VALUE { cert, err = x509.ParseCertificate(a.Value) if err != nil { continue } if !data.ValidRole(cert.Subject.CommonName) { continue } } } // we found nothing if cert == nil { continue } var ecdsaPubKey *ecdsa.PublicKey switch cert.PublicKeyAlgorithm { case x509.ECDSA: ecdsaPubKey = cert.PublicKey.(*ecdsa.PublicKey) default: logrus.Infof("Unsupported x509 PublicKeyAlgorithm: %d", cert.PublicKeyAlgorithm) continue } pubBytes, err := x509.MarshalPKIXPublicKey(ecdsaPubKey) if err != nil { logrus.Debugf("Failed to Marshal public key") continue } keys[data.NewECDSAPublicKey(pubBytes).ID()] = yubiSlot{ role: cert.Subject.CommonName, slotID: slot, } } return } func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, error) { findTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true), } attrTemplate := []*pkcs11.Attribute{ pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}), } if err := ctx.FindObjectsInit(session, findTemplate); err != nil { logrus.Debugf("Failed to init: %s", err.Error()) return nil, err } objs, b, err := ctx.FindObjects(session, numSlots) // if there are more objects than `numSlots`, get all of them until // there are no more to get for err == nil { var o []pkcs11.ObjectHandle o, b, err = ctx.FindObjects(session, numSlots) if err != nil { continue } if len(o) == 0 { break } objs = append(objs, o...) } taken := make(map[int]bool) if err != nil { logrus.Debugf("Failed to find: %s %v", err.Error(), b) return nil, err } if err = ctx.FindObjectsFinal(session); err != nil { logrus.Debugf("Failed to finalize: %s\n", err.Error()) return nil, err } for _, obj := range objs { // Retrieve the slot ID attr, err := ctx.GetAttributeValue(session, obj, attrTemplate) if err != nil { continue } // Iterate through attributes. If an ID attr was found, mark it as taken for _, a := range attr { if a.Type == pkcs11.CKA_ID { if len(a.Value) < 1 { continue } // a byte will always be capable of representing all slot IDs // for the Yubikeys slotNum := int(a.Value[0]) if slotNum >= numSlots { // defensive continue } taken[slotNum] = true } } } // iterate the token locations in our preferred order and use the first // available one. Otherwise exit the loop and return an error. for _, loc := range slotIDs { if !taken[loc] { return []byte{byte(loc)}, nil } } return nil, errors.New("Yubikey has no available slots.") } // YubiKeyStore is a KeyStore for private keys inside a Yubikey type YubiKeyStore struct { passRetriever passphrase.Retriever keys map[string]yubiSlot backupStore trustmanager.KeyStore libLoader pkcs11LibLoader } // NewYubiKeyStore returns a YubiKeyStore, given a backup key store to write any // generated keys to (usually a KeyFileStore) func NewYubiKeyStore(backupStore trustmanager.KeyStore, passphraseRetriever passphrase.Retriever) ( *YubiKeyStore, error) { s := &YubiKeyStore{ passRetriever: passphraseRetriever, keys: make(map[string]yubiSlot), backupStore: backupStore, libLoader: defaultLoader, } s.ListKeys() // populate keys field return s, nil } // Name returns a user friendly name for the location this store // keeps its data func (s YubiKeyStore) Name() string { return "yubikey" } func (s *YubiKeyStore) setLibLoader(loader pkcs11LibLoader) { s.libLoader = loader } func (s *YubiKeyStore) ListKeys() map[string]string { if len(s.keys) > 0 { return buildKeyMap(s.keys) } ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) if err != nil { logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error()) return nil } defer cleanup(ctx, session) keys, err := yubiListKeys(ctx, session) if err != nil { logrus.Debugf("Failed to list key from the yubikey: %s", err.Error()) return nil } s.keys = keys return buildKeyMap(keys) } // AddKey puts a key inside the Yubikey, as well as writing it to the backup store func (s *YubiKeyStore) AddKey(keyID, role string, privKey data.PrivateKey) error { added, err := s.addKey(keyID, role, privKey) if err != nil { return err } if added { err = s.backupStore.AddKey(privKey.ID(), role, privKey) if err != nil { defer s.RemoveKey(keyID) return ErrBackupFailed{err: err.Error()} } } return nil } // Only add if we haven't seen the key already. Return whether the key was // added. func (s *YubiKeyStore) addKey(keyID, role string, privKey data.PrivateKey) ( bool, error) { // We only allow adding root keys for now if role != data.CanonicalRootRole { return false, fmt.Errorf( "yubikey only supports storing root keys, got %s for key: %s", role, keyID) } ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) if err != nil { logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error()) return false, err } defer cleanup(ctx, session) if k, ok := s.keys[keyID]; ok { if k.role == role { // already have the key and it's associated with the correct role return false, nil } } slot, err := getNextEmptySlot(ctx, session) if err != nil { logrus.Debugf("Failed to get an empty yubikey slot: %s", err.Error()) return false, err } logrus.Debugf("Attempting to store key using yubikey slot %v", slot) err = addECDSAKey( ctx, session, privKey, slot, s.passRetriever, role) if err == nil { s.keys[privKey.ID()] = yubiSlot{ role: role, slotID: slot, } return true, nil } logrus.Debugf("Failed to add key to yubikey: %v", err) return false, err } // GetKey retrieves a key from the Yubikey only (it does not look inside the // backup store) func (s *YubiKeyStore) GetKey(keyID string) (data.PrivateKey, string, error) { ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) if err != nil { logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error()) return nil, "", err } defer cleanup(ctx, session) key, ok := s.keys[keyID] if !ok { return nil, "", errors.New("no matching keys found inside of yubikey") } pubKey, alias, err := getECDSAKey(ctx, session, key.slotID) if err != nil { logrus.Debugf("Failed to get key from slot %s: %s", key.slotID, err.Error()) return nil, "", err } // Check to see if we're returning the intended keyID if pubKey.ID() != keyID { return nil, "", fmt.Errorf("expected root key: %s, but found: %s", keyID, pubKey.ID()) } privKey := NewYubiPrivateKey(key.slotID, *pubKey, s.passRetriever) if privKey == nil { return nil, "", errors.New("could not initialize new YubiPrivateKey") } return privKey, alias, err } // RemoveKey deletes a key from the Yubikey only (it does not remove it from the // backup store) func (s *YubiKeyStore) RemoveKey(keyID string) error { ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader) if err != nil { logrus.Debugf("Failed to initialize PKCS11 environment: %s", err.Error()) return nil } defer cleanup(ctx, session) key, ok := s.keys[keyID] if !ok { return errors.New("Key not present in yubikey") } err = yubiRemoveKey(ctx, session, key.slotID, s.passRetriever, keyID) if err == nil { delete(s.keys, keyID) } else { logrus.Debugf("Failed to remove from the yubikey KeyID %s: %v", keyID, err) } return err } // ExportKey doesn't work, because you can't export data from a Yubikey func (s *YubiKeyStore) ExportKey(keyID string) ([]byte, error) { logrus.Debugf("Attempting to export: %s key inside of YubiKeyStore", keyID) return nil, errors.New("Keys cannot be exported from a Yubikey.") } // ImportKey imports a root key into a Yubikey func (s *YubiKeyStore) ImportKey(pemBytes []byte, keyPath string) error { logrus.Debugf("Attempting to import: %s key inside of YubiKeyStore", keyPath) privKey, _, err := trustmanager.GetPasswdDecryptBytes( s.passRetriever, pemBytes, "", "imported root") if err != nil { logrus.Debugf("Failed to get and retrieve a key from: %s", keyPath) return err } if keyPath != data.CanonicalRootRole { return fmt.Errorf("yubikey only supports storing root keys") } _, err = s.addKey(privKey.ID(), "root", privKey) return err } func cleanup(ctx IPKCS11Ctx, session pkcs11.SessionHandle) { err := ctx.CloseSession(session) if err != nil { logrus.Debugf("Error closing session: %s", err.Error()) } finalizeAndDestroy(ctx) } func finalizeAndDestroy(ctx IPKCS11Ctx) { err := ctx.Finalize() if err != nil { logrus.Debugf("Error finalizing: %s", err.Error()) } ctx.Destroy() } // SetupHSMEnv is a method that depends on the existences func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) ( IPKCS11Ctx, pkcs11.SessionHandle, error) { if libraryPath == "" { return nil, 0, fmt.Errorf("no library found.") } p := libLoader(libraryPath) if p == nil { return nil, 0, fmt.Errorf("failed to load library %s", libraryPath) } if err := p.Initialize(); err != nil { defer finalizeAndDestroy(p) return nil, 0, fmt.Errorf( "found library %s, but initialize error %s", libraryPath, err.Error()) } slots, err := p.GetSlotList(true) if err != nil { defer finalizeAndDestroy(p) return nil, 0, fmt.Errorf( "loaded library %s, but failed to list HSM slots %s", libraryPath, err) } // Check to see if we got any slots from the HSM. if len(slots) < 1 { defer finalizeAndDestroy(p) return nil, 0, fmt.Errorf( "loaded library %s, but no HSM slots found", libraryPath) } // CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application. // CKF_RW_SESSION: TRUE if the session is read/write; FALSE if the session is read-only session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION) if err != nil { defer cleanup(p, session) return nil, 0, fmt.Errorf( "loaded library %s, but failed to start session with HSM %s", libraryPath, err) } logrus.Debugf("Initialized PKCS11 library %s and started HSM session", libraryPath) return p, session, nil } // YubikeyAccessible returns true if a Yubikey can be accessed func YubikeyAccessible() bool { if pkcs11Lib == "" { return false } ctx, session, err := SetupHSMEnv(pkcs11Lib, defaultLoader) if err != nil { return false } defer cleanup(ctx, session) return true } func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever passphrase.Retriever, userFlag uint, defaultPassw string) error { // try default password err := ctx.Login(session, userFlag, defaultPassw) if err == nil { return nil } // default failed, ask user for password for attempts := 0; ; attempts++ { var ( giveup bool err error user string ) if userFlag == pkcs11.CKU_SO { user = "SO Pin" } else { user = "User Pin" } passwd, giveup, err := passRetriever(user, "yubikey", false, attempts) // Check if the passphrase retriever got an error or if it is telling us to give up if giveup || err != nil { return trustmanager.ErrPasswordInvalid{} } if attempts > 2 { return trustmanager.ErrAttemptsExceeded{} } // Try to convert PEM encoded bytes back to a PrivateKey using the passphrase err = ctx.Login(session, userFlag, passwd) if err == nil { return nil } } return nil } func buildKeyMap(keys map[string]yubiSlot) map[string]string { res := make(map[string]string) for k, v := range keys { res[k] = v.role } return res } notary-0.1/trustmanager/yubikey/yubikeystore_test.go000066400000000000000000000776061262207326400232330ustar00rootroot00000000000000// +build pkcs11 package yubikey import ( "crypto/rand" "errors" "fmt" "reflect" "testing" "github.com/docker/notary/passphrase" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/miekg/pkcs11" "github.com/stretchr/testify/assert" ) var ret = passphrase.ConstantRetriever("passphrase") // create a new store for clearing out keys, because we don't want to pollute // any cache func clearAllKeys(t *testing.T) { store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) for k := range store.ListKeys() { err := store.RemoveKey(k) assert.NoError(t, err) } } func TestEnsurePrivateKeySizePassesThroughRightSizeArrays(t *testing.T) { fullByteArray := make([]byte, ecdsaPrivateKeySize) for i := range fullByteArray { fullByteArray[i] = byte(1) } result := ensurePrivateKeySize(fullByteArray) assert.True(t, reflect.DeepEqual(fullByteArray, result)) } // The pad32Byte helper function left zero-pads byte arrays that are less than // ecdsaPrivateKeySize bytes func TestEnsurePrivateKeySizePadsLessThanRequiredSizeArrays(t *testing.T) { shortByteArray := make([]byte, ecdsaPrivateKeySize/2) for i := range shortByteArray { shortByteArray[i] = byte(1) } expected := append( make([]byte, ecdsaPrivateKeySize-ecdsaPrivateKeySize/2), shortByteArray...) result := ensurePrivateKeySize(shortByteArray) assert.True(t, reflect.DeepEqual(expected, result)) } func testAddKey(t *testing.T, store trustmanager.KeyStore) (data.PrivateKey, error) { privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) err = store.AddKey(privKey.ID(), data.CanonicalRootRole, privKey) return privKey, err } func addMaxKeys(t *testing.T, store trustmanager.KeyStore) []string { var keys []string // create the maximum number of keys for i := 0; i < numSlots; i++ { privKey, err := testAddKey(t, store) assert.NoError(t, err) keys = append(keys, privKey.ID()) } return keys } // We can add keys enough times to fill up all the slots in the Yubikey. // They are backed up, and we can then list them and get the keys. func TestYubiAddKeysAndRetrieve(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() // create 4 keys on the original store backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) keys := addMaxKeys(t, store) // create a new store, since we want to be sure the original store's cache // is not masking any issues cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) // All 4 keys should be in the original store, in the clean store (which // makes sure the keys are actually on the Yubikey and not on the original // store's cache, and on the backup store) for _, store := range []trustmanager.KeyStore{store, cleanStore, backup} { listedKeys := store.ListKeys() assert.Len(t, listedKeys, numSlots) for _, k := range keys { r, ok := listedKeys[k] assert.True(t, ok) assert.Equal(t, data.CanonicalRootRole, r) _, _, err := store.GetKey(k) assert.NoError(t, err) } } } // We can't add a key if there are no more slots func TestYubiAddKeyFailureIfNoMoreSlots(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() // create 4 keys on the original store backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) addMaxKeys(t, store) // add another key - should fail because there are no more slots badKey, err := testAddKey(t, store) assert.Error(t, err) // create a new store, since we want to be sure the original store's cache // is not masking any issues cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) // The key should not be in the original store, in the new clean store, or // in teh backup store. for _, store := range []trustmanager.KeyStore{store, cleanStore, backup} { // the key that wasn't created should not appear in ListKeys or GetKey _, _, err := store.GetKey(badKey.ID()) assert.Error(t, err) for k := range store.ListKeys() { assert.NotEqual(t, badKey, k) } } } // If some random key in the middle was removed, adding a key will work (keys // do not have to be deleted/added in order) func TestYubiAddKeyCanAddToMiddleSlot(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() // create 4 keys on the original store backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) keys := addMaxKeys(t, store) // delete one of the middle keys, and assert we can still create a new key keyIDToDelete := keys[numSlots/2] err = store.RemoveKey(keyIDToDelete) assert.NoError(t, err) newKey, err := testAddKey(t, store) assert.NoError(t, err) // create a new store, since we want to be sure the original store's cache // is not masking any issues cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) // The new key should be in the original store, in the new clean store, and // in the backup store. The old key should not be in the original store, // or the new clean store. for _, store := range []trustmanager.KeyStore{store, cleanStore, backup} { // new key should appear in all stores gottenKey, _, err := store.GetKey(newKey.ID()) assert.NoError(t, err) assert.Equal(t, gottenKey.ID(), newKey.ID()) listedKeys := store.ListKeys() _, ok := listedKeys[newKey.ID()] assert.True(t, ok) // old key should not be in the non-backup stores if store != backup { _, _, err := store.GetKey(keyIDToDelete) assert.Error(t, err) _, ok = listedKeys[keyIDToDelete] assert.False(t, ok) } } } type nonworkingBackup struct { trustmanager.KeyMemoryStore } // AddKey stores the contents of a PEM-encoded private key as a PEM block func (s *nonworkingBackup) AddKey(name, alias string, privKey data.PrivateKey) error { return errors.New("Nope!") } // If, when adding a key to the Yubikey, we can't back up the key, it should // be removed from the Yubikey too because otherwise there is no way for // the user to later get a backup of the key. func TestYubiAddKeyRollsBackIfCannotBackup(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := &nonworkingBackup{ KeyMemoryStore: *trustmanager.NewKeyMemoryStore(ret), } store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) _, err = testAddKey(t, store) assert.Error(t, err) assert.IsType(t, ErrBackupFailed{}, err) // there should be no keys on the yubikey assert.Len(t, cleanListKeys(t), 0) } // If, when adding a key to the Yubikey, and it already exists, we succeed // without adding it to the backup store. func TestYubiAddDuplicateKeySucceedsButDoesNotBackup(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() origStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, origStore) assert.NoError(t, err) backup := trustmanager.NewKeyMemoryStore(ret) cleanStore, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) assert.Len(t, cleanStore.ListKeys(), 1) err = cleanStore.AddKey(key.ID(), "root", key) assert.NoError(t, err) // there should be just 1 key on the yubikey assert.Len(t, cleanListKeys(t), 1) // nothing was added to the backup assert.Len(t, backup.ListKeys(), 0) } // RemoveKey removes a key from the yubikey, but not from the backup store. func TestYubiRemoveKey(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) err = store.RemoveKey(key.ID()) assert.NoError(t, err) // key remains in the backup store backupKey, role, err := backup.GetKey(key.ID()) assert.NoError(t, err) assert.Equal(t, data.CanonicalRootRole, role) assert.Equal(t, key.ID(), backupKey.ID()) // create a new store, since we want to be sure the original store's cache // is not masking any issues cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) // key is not in either the original store or the clean store for _, store := range []*YubiKeyStore{store, cleanStore} { _, _, err := store.GetKey(key.ID()) assert.Error(t, err) } } // ImportKey imports a key as root without adding it to the backup store func TestYubiImportNewKey(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) // generate key and import it privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) pemBytes, err := trustmanager.EncryptPrivateKey(privKey, "passphrase") assert.NoError(t, err) err = store.ImportKey(pemBytes, "root") assert.NoError(t, err) // key is not in backup store _, _, err = backup.GetKey(privKey.ID()) assert.Error(t, err) // create a new store, since we want to be sure the original store's cache // is not masking any issues cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) for _, store := range []*YubiKeyStore{store, cleanStore} { gottenKey, role, err := store.GetKey(privKey.ID()) assert.NoError(t, err) assert.Equal(t, data.CanonicalRootRole, role) assert.Equal(t, privKey.Public(), gottenKey.Public()) } } // Importing an existing key succeeds, but doesn't actually add the key, nor // does it write it to backup. func TestYubiImportExistingKey(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) backup := trustmanager.NewKeyMemoryStore(ret) newStore, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) // for sanity, ensure that the key is already in the Yubikey k, _, err := newStore.GetKey(key.ID()) assert.NoError(t, err) assert.NotNil(t, k) // import the key, which should have already been added to the yubikey pemBytes, err := trustmanager.EncryptPrivateKey(key, "passphrase") assert.NoError(t, err) err = newStore.ImportKey(pemBytes, "root") assert.NoError(t, err) // key is not in backup store _, _, err = backup.GetKey(key.ID()) assert.Error(t, err) } // Importing a key not as root fails, and it is not added to the backup store func TestYubiImportNonRootKey(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) // generate key and import it privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) pemBytes, err := trustmanager.EncryptPrivateKey(privKey, "passphrase") assert.NoError(t, err) err = store.ImportKey(pemBytes, privKey.ID()) assert.Error(t, err) // key is not in backup store _, _, err = backup.GetKey(privKey.ID()) assert.Error(t, err) } // One cannot export from hardware - it will not export from the backup func TestYubiExportKeyFails(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) _, err = store.ExportKey(key.ID()) assert.Error(t, err) assert.Equal(t, "Keys cannot be exported from a Yubikey.", err.Error()) } // If there are keys in the backup store but no keys in the Yubikey, // listing and getting cannot access the keys in the backup store func TestYubiListAndGetKeysIgnoresBackup(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := trustmanager.NewKeyMemoryStore(ret) key, err := testAddKey(t, backup) assert.NoError(t, err) store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.Len(t, store.ListKeys(), 0) _, _, err = store.GetKey(key.ID()) assert.Error(t, err) } // Get a YubiPrivateKey. Check that it has the right algorithm, etc, and // specifically that you cannot get the private bytes out. Assume we can // sign something. func TestYubiKeyAndSign(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) ecdsaPrivateKey, err := testAddKey(t, store) assert.NoError(t, err) yubiPrivateKey, _, err := store.GetKey(ecdsaPrivateKey.ID()) assert.NoError(t, err) assert.Equal(t, data.ECDSAKey, yubiPrivateKey.Algorithm()) assert.Equal(t, data.ECDSASignature, yubiPrivateKey.SignatureAlgorithm()) assert.Equal(t, ecdsaPrivateKey.Public(), yubiPrivateKey.Public()) assert.Nil(t, yubiPrivateKey.Private()) // The signature should be verified, but the importing the verifiers causes // an import cycle. A bigger refactor needs to be done to fix it. msg := []byte("Hello there") _, err = yubiPrivateKey.Sign(rand.Reader, msg, nil) assert.NoError(t, err) } // ----- Negative tests that use stubbed pkcs11 for error injection ----- type pkcs11Stubbable interface { setLibLoader(pkcs11LibLoader) } var setupErrors = []string{"Initialize", "GetSlotList", "OpenSession"} // Create a new store, so that we avoid any cache issues, and list keys func cleanListKeys(t *testing.T) map[string]string { cleanStore, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) return cleanStore.ListKeys() } // If an error occurs during login, which only some functions do, the function // under test will clean up after itself func testYubiFunctionCleansUpOnLoginError(t *testing.T, toStub pkcs11Stubbable, functionUnderTest func() error) { toStub.setLibLoader(func(string) IPKCS11Ctx { return NewStubCtx(map[string]bool{"Login": true}) }) err := functionUnderTest() assert.Error(t, err) // a lot of these functions wrap other errors assert.Contains(t, err.Error(), trustmanager.ErrAttemptsExceeded{}.Error()) // Set Up another time, to ensure we weren't left in a bad state // by the previous runs ctx, session, err := SetupHSMEnv(pkcs11Lib, defaultLoader) assert.NoError(t, err) cleanup(ctx, session) } // If one of the specified pkcs11 functions errors, the function under test // will clean up after itself func testYubiFunctionCleansUpOnSpecifiedErrors(t *testing.T, toStub pkcs11Stubbable, functionUnderTest func() error, dependentFunctions []string, functionShouldError bool) { for _, methodName := range dependentFunctions { toStub.setLibLoader(func(string) IPKCS11Ctx { return NewStubCtx( map[string]bool{methodName: true}) }) err := functionUnderTest() if functionShouldError { assert.Error(t, err, fmt.Sprintf("Didn't error when %s errored.", methodName)) // a lot of these functions wrap other errors assert.Contains(t, err.Error(), errInjected{methodName}.Error()) } else { assert.NoError(t, err) } } // Set Up another time, to ensure we weren't left in a bad state // by the previous runs ctx, session, err := SetupHSMEnv(pkcs11Lib, defaultLoader) assert.NoError(t, err) cleanup(ctx, session) } func TestYubiAddKeyCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() backup := trustmanager.NewKeyMemoryStore(ret) store, err := NewYubiKeyStore(backup, ret) assert.NoError(t, err) var _addkey = func() error { _, err := testAddKey(t, store) return err } testYubiFunctionCleansUpOnLoginError(t, store, _addkey) // all the PKCS11 functions AddKey depends on that aren't the login/logout testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _addkey, append( setupErrors, "FindObjectsInit", "FindObjects", "FindObjectsFinal", "CreateObject", ), true) // given that everything should have errored, there should be no keys on // the yubikey and no keys in backup assert.Len(t, backup.ListKeys(), 0) assert.Len(t, cleanListKeys(t), 0) // Logout should not cause a function failure - it s a cleanup failure, // which shouldn't break anything, and it should clean up after itself. // The key should be added to both stores testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _addkey, []string{"Logout"}, false) listedKeys := cleanListKeys(t) assert.Len(t, backup.ListKeys(), 1) assert.Len(t, listedKeys, 1) // Currently, if GetAttributeValue fails, the function succeeds, because if // we can't get the attribute value of an object, we don't know what slot // it's in, we assume its occupied slot is free (hence this failure will // cause the previous key to be overwritten). This behavior may need to // be revisited. testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _addkey, []string{"GetAttributeValue"}, false) newListedKeys := cleanListKeys(t) // because the original key got overwritten assert.Len(t, backup.ListKeys(), 2) assert.Len(t, newListedKeys, 1) for k := range newListedKeys { _, ok := listedKeys[k] assert.False(t, ok) } } func TestYubiGetKeyCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) var _getkey = func() error { _, _, err := store.GetKey(key.ID()) return err } // all the PKCS11 functions GetKey depends on testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _getkey, append( setupErrors, "FindObjectsInit", "FindObjects", "FindObjectsFinal", "GetAttributeValue", ), true) } func TestYubiImportKeyCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) privKey, err := trustmanager.GenerateECDSAKey(rand.Reader) assert.NoError(t, err) pemBytes, err := trustmanager.EncryptPrivateKey(privKey, "passphrase") assert.NoError(t, err) var _importkey = func() error { return store.ImportKey(pemBytes, "root") } testYubiFunctionCleansUpOnLoginError(t, store, _importkey) // all the PKCS11 functions ImportKey depends on that aren't the login/logout testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _importkey, append( setupErrors, "FindObjectsInit", "FindObjects", "FindObjectsFinal", "CreateObject", ), true) // given that everything should have errored, there should be no keys on // the yubikey assert.Len(t, cleanListKeys(t), 0) // Logout should not cause a function failure - it s a cleanup failure, // which shouldn't break anything, and it should clean up after itself. // The key should be added to both stores testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _importkey, []string{"Logout"}, false) listedKeys := cleanListKeys(t) assert.Len(t, listedKeys, 1) // Currently, if GetAttributeValue fails, the function succeeds, because if // we can't get the attribute value of an object, we don't know what slot // it's in, we assume its occupied slot is free (hence this failure will // cause the previous key to be overwritten). This behavior may need to // be revisited. for k := range listedKeys { err := store.RemoveKey(k) assert.NoError(t, err) } testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _importkey, []string{"GetAttributeValue"}, false) assert.Len(t, cleanListKeys(t), 1) } func TestYubiRemoveKeyCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) var _removekey = func() error { return store.RemoveKey(key.ID()) } testYubiFunctionCleansUpOnLoginError(t, store, _removekey) // RemoveKey just succeeds if we can't set up the yubikey testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _removekey, setupErrors, false) // all the PKCS11 functions RemoveKey depends on that aren't the login/logout // or setup/cleanup testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _removekey, []string{ "FindObjectsInit", "FindObjects", "FindObjectsFinal", "DestroyObject", }, true) // given that everything should have errored, there should still be 1 key // on the yubikey assert.Len(t, cleanListKeys(t), 1) // this will not fail, but it should clean up after itself, and the key // should be added to both stores testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _removekey, []string{"Logout"}, false) assert.Len(t, cleanListKeys(t), 0) } func TestYubiListKeyCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() // Do not call NewYubiKeyStore, because it list keys immediately to // build the cache. store := &YubiKeyStore{ passRetriever: ret, keys: make(map[string]yubiSlot), backupStore: trustmanager.NewKeyMemoryStore(ret), libLoader: defaultLoader, } var _listkeys = func() error { // ListKeys never fails store.ListKeys() return nil } // all the PKCS11 functions ListKey depends on - list keys never errors testYubiFunctionCleansUpOnSpecifiedErrors(t, store, _listkeys, append( setupErrors, "FindObjectsInit", "FindObjects", "FindObjectsFinal", "GetAttributeValue", ), false) } // export key fails anyway, don't bother testing func TestYubiSignCleansUpOnError(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) privKey, _, err := store.GetKey(key.ID()) assert.NoError(t, err) yubiPrivateKey, ok := privKey.(*YubiPrivateKey) assert.True(t, ok) var _sign = func() error { _, err = yubiPrivateKey.Sign(rand.Reader, []byte("Hello there"), nil) return err } testYubiFunctionCleansUpOnLoginError(t, yubiPrivateKey, _sign) // all the PKCS11 functions SignKey depends on that is not login/logout testYubiFunctionCleansUpOnSpecifiedErrors(t, yubiPrivateKey, _sign, append( setupErrors, "FindObjectsInit", "FindObjects", "FindObjectsFinal", "SignInit", "Sign", ), true) // this will not fail, but it should clean up after itself, and the key // should be added to both stores testYubiFunctionCleansUpOnSpecifiedErrors(t, yubiPrivateKey, _sign, []string{"Logout"}, false) } // If Sign gives us an invalid signature, we retry until successful up to // a maximum of 5 times. func TestYubiRetrySignUntilSuccess(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) message := []byte("Hello there") goodSig, err := key.Sign(rand.Reader, message, nil) assert.NoError(t, err) privKey, _, err := store.GetKey(key.ID()) assert.NoError(t, err) yubiPrivateKey, ok := privKey.(*YubiPrivateKey) assert.True(t, ok) badSigner := &SignInvalidSigCtx{ Ctx: *pkcs11.New(pkcs11Lib), goodSig: goodSig, failNum: 2, } yubiPrivateKey.setLibLoader(func(string) IPKCS11Ctx { return badSigner }) sig, err := yubiPrivateKey.Sign(rand.Reader, message, nil) assert.NoError(t, err) // because the SignInvalidSigCtx returns the good signature, we can just // deep equal instead of verifying assert.True(t, reflect.DeepEqual(goodSig, sig)) assert.Equal(t, 3, badSigner.signCalls) } // If Sign gives us an invalid signature, we retry until up to a maximum of 5 // times, and if it's still invalid, fail. func TestYubiRetrySignUntilFail(t *testing.T) { if !YubikeyAccessible() { t.Skip("Must have Yubikey access.") } clearAllKeys(t) SetYubikeyKeyMode(KeymodeNone) defer func() { SetYubikeyKeyMode(KeymodeTouch | KeymodePinOnce) }() store, err := NewYubiKeyStore(trustmanager.NewKeyMemoryStore(ret), ret) assert.NoError(t, err) key, err := testAddKey(t, store) assert.NoError(t, err) message := []byte("Hello there") goodSig, err := key.Sign(rand.Reader, message, nil) assert.NoError(t, err) privKey, _, err := store.GetKey(key.ID()) assert.NoError(t, err) yubiPrivateKey, ok := privKey.(*YubiPrivateKey) assert.True(t, ok) badSigner := &SignInvalidSigCtx{ Ctx: *pkcs11.New(pkcs11Lib), goodSig: goodSig, failNum: sigAttempts + 1, } yubiPrivateKey.setLibLoader(func(string) IPKCS11Ctx { return badSigner }) _, err = yubiPrivateKey.Sign(rand.Reader, message, nil) assert.Error(t, err) // because the SignInvalidSigCtx returns the good signature, we can just // deep equal instead of verifying assert.Equal(t, sigAttempts, badSigner.signCalls) } // ----- Stubbed pkcs11 for testing error conditions ------ // This is just a passthrough to the underlying pkcs11 library, with optional // error injection. This is to ensure that if errors occur during the process // of interacting with the Yubikey, that everything gets cleaned up sanely. // Note that this does not actually replicate an actual PKCS11 failure, since // who knows what the pkcs11 function call may have done to the key before it // errored. This just tests that we handle an error ok. type errInjected struct { methodName string } func (e errInjected) Error() string { return fmt.Sprintf("Injected failure in %s", e.methodName) } const ( uninitialized = 0 initialized = 1 sessioned = 2 loggedin = 3 ) type StubCtx struct { ctx IPKCS11Ctx functionShouldFail map[string]bool } func NewStubCtx(functionShouldFail map[string]bool) *StubCtx { realCtx := defaultLoader(pkcs11Lib) return &StubCtx{ ctx: realCtx, functionShouldFail: functionShouldFail, } } // Returns an error if we're supposed to error for this method func (s *StubCtx) checkErr(methodName string) error { if val, ok := s.functionShouldFail[methodName]; ok && val { return errInjected{methodName: methodName} } return nil } func (s *StubCtx) Destroy() { // can't error s.ctx.Destroy() } func (s *StubCtx) Initialize() error { err := s.checkErr("Initialize") if err != nil { return err } return s.ctx.Initialize() } func (s *StubCtx) Finalize() error { err := s.checkErr("Finalize") if err != nil { return err } return s.ctx.Finalize() } func (s *StubCtx) GetSlotList(tokenPresent bool) ([]uint, error) { err := s.checkErr("GetSlotList") if err != nil { return nil, err } return s.ctx.GetSlotList(tokenPresent) } func (s *StubCtx) OpenSession(slotID uint, flags uint) (pkcs11.SessionHandle, error) { err := s.checkErr("OpenSession") if err != nil { return pkcs11.SessionHandle(0), err } return s.ctx.OpenSession(slotID, flags) } func (s *StubCtx) CloseSession(sh pkcs11.SessionHandle) error { err := s.checkErr("CloseSession") if err != nil { return err } return s.ctx.CloseSession(sh) } func (s *StubCtx) Login(sh pkcs11.SessionHandle, userType uint, pin string) error { err := s.checkErr("Login") if err != nil { return err } return s.ctx.Login(sh, userType, pin) } func (s *StubCtx) Logout(sh pkcs11.SessionHandle) error { err := s.checkErr("Logout") if err != nil { return err } return s.ctx.Logout(sh) } func (s *StubCtx) CreateObject(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) ( pkcs11.ObjectHandle, error) { err := s.checkErr("CreateObject") if err != nil { return pkcs11.ObjectHandle(0), err } return s.ctx.CreateObject(sh, temp) } func (s *StubCtx) DestroyObject(sh pkcs11.SessionHandle, oh pkcs11.ObjectHandle) error { err := s.checkErr("DestroyObject") if err != nil { return err } return s.ctx.DestroyObject(sh, oh) } func (s *StubCtx) GetAttributeValue(sh pkcs11.SessionHandle, o pkcs11.ObjectHandle, a []*pkcs11.Attribute) ([]*pkcs11.Attribute, error) { err := s.checkErr("GetAttributeValue") if err != nil { return nil, err } return s.ctx.GetAttributeValue(sh, o, a) } func (s *StubCtx) FindObjectsInit(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) error { err := s.checkErr("FindObjectsInit") if err != nil { return err } return s.ctx.FindObjectsInit(sh, temp) } func (s *StubCtx) FindObjects(sh pkcs11.SessionHandle, max int) ( []pkcs11.ObjectHandle, bool, error) { err := s.checkErr("FindObjects") if err != nil { return nil, false, err } return s.ctx.FindObjects(sh, max) } func (s *StubCtx) FindObjectsFinal(sh pkcs11.SessionHandle) error { err := s.checkErr("FindObjectsFinal") if err != nil { return err } return s.ctx.FindObjectsFinal(sh) } func (s *StubCtx) SignInit(sh pkcs11.SessionHandle, m []*pkcs11.Mechanism, o pkcs11.ObjectHandle) error { err := s.checkErr("SignInit") if err != nil { return err } return s.ctx.SignInit(sh, m, o) } func (s *StubCtx) Sign(sh pkcs11.SessionHandle, message []byte) ([]byte, error) { // a call to Sign will clear SignInit whether or not it fails, so // replicate that by calling Sign, then optionally returning an error. sig, sigErr := s.ctx.Sign(sh, message) err := s.checkErr("Sign") if err != nil { return nil, err } return sig, sigErr } // a different stub Ctx object in which Sign returns an invalid signature some // number of times type SignInvalidSigCtx struct { pkcs11.Ctx // Signature verification is to mitigate against hardware failure while // signing - which might occur during testing. So to prevent spurious // errors, return a real known good signature in the success case. goodSig []byte failNum int // number of calls to fail before succeeding signCalls int // number of calls to Sign so far } func (s *SignInvalidSigCtx) Sign(sh pkcs11.SessionHandle, message []byte) ([]byte, error) { s.signCalls++ s.Ctx.Sign(sh, message) // clear out the SignInit if s.signCalls > s.failNum { return s.goodSig, nil } return []byte("12345"), nil } notary-0.1/tuf/000077500000000000000000000000001262207326400134705ustar00rootroot00000000000000notary-0.1/tuf/LICENSE000066400000000000000000000027751262207326400145100ustar00rootroot00000000000000Copyright (c) 2015, Docker Inc. Copyright (c) 2014-2015 Prime Directive, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Prime Directive, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. notary-0.1/tuf/README.md000066400000000000000000000034051262207326400147510ustar00rootroot00000000000000# GOTUF This is still a work in progress but will shortly be a fully compliant Go implementation of [The Update Framework (TUF)](http://theupdateframework.com/). ## Where's the CLI This repository provides a library only. The [Notary project](https://github.com/docker/notary) from Docker should be considered the official CLI to be used with this implementation of TUF. ## TODOs: - [X] Add Targets to existing repo - [X] Sign metadata files - [X] Refactor TufRepo to take care of signing ~~and verification~~ - [ ] Ensure consistent capitalization in naming (TUF\_\_\_ vs Tuf\_\_\_) - [X] Make caching of metadata files smarter - PR #5 - [ ] ~~Add configuration for CLI commands. Order of configuration priority from most to least: flags, config file, defaults~~ Notary should be the official CLI - [X] Reasses organization of data types. Possibly consolidate a few things into the data package but break up package into a few more distinct files - [ ] Comprehensive test cases - [ ] Delete files no longer in use - [ ] Fix up errors. Some have to be instantiated, others don't, the inconsistency is annoying. - [X] Bump version numbers in meta files (could probably be done better) ## Credits This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf), however in attempting to add delegations I found I was making such significant changes that I could not maintain backwards compatibility without the code becoming overly convoluted. Some features such as pluggable verifiers have alreayd been merged upstream to flynn/go-tuf and we are in discussion with [titanous](https://github.com/titanous) about working to merge the 2 implementations. This implementation retains the same 3 Clause BSD license present on the original flynn implementation. notary-0.1/tuf/client/000077500000000000000000000000001262207326400147465ustar00rootroot00000000000000notary-0.1/tuf/client/client.go000066400000000000000000000361641262207326400165650ustar00rootroot00000000000000package client import ( "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "path" "path/filepath" "strings" "github.com/Sirupsen/logrus" tuf "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/store" "github.com/docker/notary/tuf/utils" ) const maxSize int64 = 5 << 20 // Client is a usability wrapper around a raw TUF repo type Client struct { local *tuf.Repo remote store.RemoteStore keysDB *keys.KeyDB cache store.MetadataStore } // NewClient initialized a Client with the given repo, remote source of content, key database, and cache func NewClient(local *tuf.Repo, remote store.RemoteStore, keysDB *keys.KeyDB, cache store.MetadataStore) *Client { return &Client{ local: local, remote: remote, keysDB: keysDB, cache: cache, } } // Update performs an update to the TUF repo as defined by the TUF spec func (c *Client) Update() error { // 1. Get timestamp // a. If timestamp error (verification, expired, etc...) download new root and return to 1. // 2. Check if local snapshot is up to date // a. If out of date, get updated snapshot // i. If snapshot error, download new root and return to 1. // 3. Check if root correct against snapshot // a. If incorrect, download new root and return to 1. // 4. Iteratively download and search targets and delegations to find target meta logrus.Debug("updating TUF client") err := c.update() if err != nil { logrus.Debug("Error occurred. Root will be downloaded and another update attempted") if err := c.downloadRoot(); err != nil { logrus.Error("client Update (Root):", err) return err } // If we error again, we now have the latest root and just want to fail // out as there's no expectation the problem can be resolved automatically logrus.Debug("retrying TUF client update") return c.update() } return nil } func (c *Client) update() error { err := c.downloadTimestamp() if err != nil { logrus.Errorf("Client Update (Timestamp): %s", err.Error()) return err } err = c.downloadSnapshot() if err != nil { logrus.Errorf("Client Update (Snapshot): %s", err.Error()) return err } err = c.checkRoot() if err != nil { // In this instance the root has not expired base on time, but is // expired based on the snapshot dictating a new root has been produced. logrus.Debug(err) return tuf.ErrLocalRootExpired{} } // will always need top level targets at a minimum err = c.downloadTargets("targets") if err != nil { logrus.Errorf("Client Update (Targets): %s", err.Error()) return err } return nil } // checkRoot determines if the hash, and size are still those reported // in the snapshot file. It will also check the expiry, however, if the // hash and size in snapshot are unchanged but the root file has expired, // there is little expectation that the situation can be remedied. func (c Client) checkRoot() error { role := data.RoleName("root") size := c.local.Snapshot.Signed.Meta[role].Length hashSha256 := c.local.Snapshot.Signed.Meta[role].Hashes["sha256"] raw, err := c.cache.GetMeta("root", size) if err != nil { return err } hash := sha256.Sum256(raw) if !bytes.Equal(hash[:], hashSha256) { return fmt.Errorf("Cached root sha256 did not match snapshot root sha256") } if int64(len(raw)) != size { return fmt.Errorf("Cached root size did not match snapshot size") } root := &data.SignedRoot{} err = json.Unmarshal(raw, root) if err != nil { return ErrCorruptedCache{file: "root.json"} } if signed.IsExpired(root.Signed.Expires) { return tuf.ErrLocalRootExpired{} } return nil } // downloadRoot is responsible for downloading the root.json func (c *Client) downloadRoot() error { role := data.RoleName("root") size := maxSize var expectedSha256 []byte if c.local.Snapshot != nil { size = c.local.Snapshot.Signed.Meta[role].Length expectedSha256 = c.local.Snapshot.Signed.Meta[role].Hashes["sha256"] } // if we're bootstrapping we may not have a cached root, an // error will result in the "previous root version" being // interpreted as 0. var download bool var err error var cachedRoot []byte old := &data.Signed{} version := 0 if expectedSha256 != nil { // can only trust cache if we have an expected sha256 to trust cachedRoot, err = c.cache.GetMeta(role, size) } if cachedRoot == nil || err != nil { logrus.Debug("didn't find a cached root, must download") download = true } else { hash := sha256.Sum256(cachedRoot) if !bytes.Equal(hash[:], expectedSha256) { logrus.Debug("cached root's hash didn't match expected, must download") download = true } err := json.Unmarshal(cachedRoot, old) if err == nil { root, err := data.RootFromSigned(old) if err == nil { version = root.Signed.Version } else { logrus.Debug("couldn't parse Signed part of cached root, must download") download = true } } else { logrus.Debug("couldn't parse cached root, must download") download = true } } var s *data.Signed var raw []byte if download { raw, s, err = c.downloadSigned(role, size, expectedSha256) if err != nil { return err } } else { logrus.Debug("using cached root") s = old } if err := c.verifyRoot(role, s, version); err != nil { return err } if download { logrus.Debug("caching downloaded root") // Now that we have accepted new root, write it to cache if err = c.cache.SetMeta(role, raw); err != nil { logrus.Errorf("Failed to write root to local cache: %s", err.Error()) } } return nil } func (c Client) verifyRoot(role string, s *data.Signed, minVersion int) error { // this will confirm that the root has been signed by the old root role // as c.keysDB contains the root keys we bootstrapped with. // Still need to determine if there has been a root key update and // confirm signature with new root key logrus.Debug("verifying root with existing keys") err := signed.Verify(s, role, minVersion, c.keysDB) if err != nil { logrus.Debug("root did not verify with existing keys") return err } // This will cause keyDB to get updated, overwriting any keyIDs associated // with the roles in root.json logrus.Debug("updating known root roles and keys") root, err := data.RootFromSigned(s) if err != nil { logrus.Error(err.Error()) return err } err = c.local.SetRoot(root) if err != nil { logrus.Error(err.Error()) return err } // verify again now that the old keys have been replaced with the new keys. // TODO(endophage): be more intelligent and only re-verify if we detect // there has been a change in root keys logrus.Debug("verifying root with updated keys") err = signed.Verify(s, role, minVersion, c.keysDB) if err != nil { logrus.Debug("root did not verify with new keys") return err } logrus.Debug("successfully verified root") return nil } // downloadTimestamp is responsible for downloading the timestamp.json // Timestamps are special in that we ALWAYS attempt to download and only // use cache if the download fails (and the cache is still valid). func (c *Client) downloadTimestamp() error { logrus.Debug("downloadTimestamp") role := data.RoleName("timestamp") // We may not have a cached timestamp if this is the first time // we're interacting with the repo. This will result in the // version being 0 var download bool old := &data.Signed{} version := 0 cachedTS, err := c.cache.GetMeta(role, maxSize) if err == nil { err := json.Unmarshal(cachedTS, old) if err == nil { ts, err := data.TimestampFromSigned(old) if err == nil { version = ts.Signed.Version } } else { old = nil } } // unlike root, targets and snapshot, always try and download timestamps // from remote, only using the cache one if we couldn't reach remote. raw, s, err := c.downloadSigned(role, maxSize, nil) if err != nil || len(raw) == 0 { if err, ok := err.(store.ErrMetaNotFound); ok { return err } if old == nil { if err == nil { // couldn't retrieve data from server and don't have valid // data in cache. return store.ErrMetaNotFound{} } return err } logrus.Debug("using cached timestamp") s = old } else { download = true } err = signed.Verify(s, role, version, c.keysDB) if err != nil { return err } logrus.Debug("successfully verified timestamp") if download { c.cache.SetMeta(role, raw) } ts, err := data.TimestampFromSigned(s) if err != nil { return err } c.local.SetTimestamp(ts) return nil } // downloadSnapshot is responsible for downloading the snapshot.json func (c *Client) downloadSnapshot() error { logrus.Debug("downloadSnapshot") role := data.RoleName("snapshot") if c.local.Timestamp == nil { return ErrMissingMeta{role: "snapshot"} } size := c.local.Timestamp.Signed.Meta[role].Length expectedSha256, ok := c.local.Timestamp.Signed.Meta[role].Hashes["sha256"] if !ok { return ErrMissingMeta{role: "snapshot"} } var download bool old := &data.Signed{} version := 0 raw, err := c.cache.GetMeta(role, size) if raw == nil || err != nil { logrus.Debug("no snapshot in cache, must download") download = true } else { // file may have been tampered with on disk. Always check the hash! genHash := sha256.Sum256(raw) if !bytes.Equal(genHash[:], expectedSha256) { logrus.Debug("hash of snapshot in cache did not match expected hash, must download") download = true } err := json.Unmarshal(raw, old) if err == nil { snap, err := data.TimestampFromSigned(old) if err == nil { version = snap.Signed.Version } else { logrus.Debug("Could not parse Signed part of snapshot, must download") download = true } } else { logrus.Debug("Could not parse snapshot, must download") download = true } } var s *data.Signed if download { raw, s, err = c.downloadSigned(role, size, expectedSha256) if err != nil { return err } } else { logrus.Debug("using cached snapshot") s = old } err = signed.Verify(s, role, version, c.keysDB) if err != nil { return err } logrus.Debug("successfully verified snapshot") snap, err := data.SnapshotFromSigned(s) if err != nil { return err } c.local.SetSnapshot(snap) if download { err = c.cache.SetMeta(role, raw) if err != nil { logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error()) } } return nil } // downloadTargets is responsible for downloading any targets file // including delegates roles. func (c *Client) downloadTargets(role string) error { role = data.RoleName(role) // this will really only do something for base targets role if c.local.Snapshot == nil { return ErrMissingMeta{role: role} } snap := c.local.Snapshot.Signed root := c.local.Root.Signed r := c.keysDB.GetRole(role) if r == nil { return fmt.Errorf("Invalid role: %s", role) } keyIDs := r.KeyIDs s, err := c.getTargetsFile(role, keyIDs, snap.Meta, root.ConsistentSnapshot, r.Threshold) if err != nil { logrus.Error("Error getting targets file:", err) return err } t, err := data.TargetsFromSigned(s) if err != nil { return err } err = c.local.SetTargets(role, t) if err != nil { return err } return nil } func (c *Client) downloadSigned(role string, size int64, expectedSha256 []byte) ([]byte, *data.Signed, error) { raw, err := c.remote.GetMeta(role, size) if err != nil { return nil, nil, err } if expectedSha256 != nil { genHash := sha256.Sum256(raw) if !bytes.Equal(genHash[:], expectedSha256) { return nil, nil, ErrChecksumMismatch{role: role} } } s := &data.Signed{} err = json.Unmarshal(raw, s) if err != nil { return nil, nil, err } return raw, s, nil } func (c Client) getTargetsFile(role string, keyIDs []string, snapshotMeta data.Files, consistent bool, threshold int) (*data.Signed, error) { // require role exists in snapshots roleMeta, ok := snapshotMeta[role] if !ok { return nil, ErrMissingMeta{role: role} } expectedSha256, ok := snapshotMeta[role].Hashes["sha256"] if !ok { return nil, ErrMissingMeta{role: role} } // try to get meta file from content addressed cache var download bool old := &data.Signed{} version := 0 raw, err := c.cache.GetMeta(role, roleMeta.Length) if err != nil || raw == nil { logrus.Debugf("Couldn't not find cached %s, must download", role) download = true } else { // file may have been tampered with on disk. Always check the hash! genHash := sha256.Sum256(raw) if !bytes.Equal(genHash[:], expectedSha256) { download = true } err := json.Unmarshal(raw, old) if err == nil { targ, err := data.TargetsFromSigned(old) if err == nil { version = targ.Signed.Version } else { download = true } } else { download = true } } size := snapshotMeta[role].Length var s *data.Signed if download { rolePath, err := c.RoleTargetsPath(role, hex.EncodeToString(expectedSha256), consistent) if err != nil { return nil, err } raw, s, err = c.downloadSigned(rolePath, size, expectedSha256) if err != nil { return nil, err } } else { logrus.Debug("using cached ", role) s = old } err = signed.Verify(s, role, version, c.keysDB) if err != nil { return nil, err } logrus.Debugf("successfully verified %s", role) if download { // if we error when setting meta, we should continue. err = c.cache.SetMeta(role, raw) if err != nil { logrus.Errorf("Failed to write snapshot to local cache: %s", err.Error()) } } return s, nil } // RoleTargetsPath generates the appropriate filename for the targets file, // based on whether the repo is marked as consistent. func (c Client) RoleTargetsPath(role string, hashSha256 string, consistent bool) (string, error) { if consistent { dir := filepath.Dir(role) if strings.Contains(role, "/") { lastSlashIdx := strings.LastIndex(role, "/") role = role[lastSlashIdx+1:] } role = path.Join( dir, fmt.Sprintf("%s.%s.json", hashSha256, role), ) } return role, nil } // TargetMeta ensures the repo is up to date, downloading the minimum // necessary metadata files func (c Client) TargetMeta(path string) (*data.FileMeta, error) { c.Update() var meta *data.FileMeta pathDigest := sha256.Sum256([]byte(path)) pathHex := hex.EncodeToString(pathDigest[:]) // FIFO list of targets delegations to inspect for target roles := []string{data.ValidRoles["targets"]} var role string for len(roles) > 0 { // have to do these lines here because of order of execution in for statement role = roles[0] roles = roles[1:] // Download the target role file if necessary err := c.downloadTargets(role) if err != nil { // as long as we find a valid target somewhere we're happy. // continue and search other delegated roles if any continue } meta = c.local.TargetMeta(role, path) if meta != nil { // we found the target! return meta, nil } delegations := c.local.TargetDelegations(role, path, pathHex) for _, d := range delegations { roles = append(roles, d.Name) } } return meta, nil } // DownloadTarget downloads the target to dst from the remote func (c Client) DownloadTarget(dst io.Writer, path string, meta *data.FileMeta) error { reader, err := c.remote.GetTarget(path) if err != nil { return err } defer reader.Close() r := io.TeeReader( io.LimitReader(reader, meta.Length), dst, ) err = utils.ValidateTarget(r, meta) return err } notary-0.1/tuf/client/client_test.go000066400000000000000000000461021262207326400176150ustar00rootroot00000000000000package client import ( "crypto/sha256" "encoding/json" "testing" "time" "github.com/Sirupsen/logrus" tuf "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/testutils" "github.com/stretchr/testify/assert" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/store" ) func TestRotation(t *testing.T) { kdb := keys.NewDB() signer := signed.NewEd25519() repo := tuf.NewRepo(kdb, signer) remote := store.NewMemoryStore(nil, nil) cache := store.NewMemoryStore(nil, nil) // Generate initial root key and role and add to key DB rootKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating root key") rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating root role") kdb.AddKey(rootKey) err = kdb.AddRole(rootRole) assert.NoError(t, err, "Error adding root role to db") // Generate new key and role. These will appear in the root.json // but will not be added to the keyDB. replacementKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating replacement root key") replacementRole, err := data.NewRole("root", 1, []string{replacementKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating replacement root role") // Generate a new root with the replacement key and role testRoot, err := data.NewRoot( map[string]data.PublicKey{replacementKey.ID(): replacementKey}, map[string]*data.RootRole{"root": &replacementRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") // Sign testRoot with both old and new keys signedRoot, err := testRoot.ToSigned() err = signed.Sign(signer, signedRoot, rootKey, replacementKey) assert.NoError(t, err, "Failed to sign root") var origKeySig bool var replKeySig bool for _, sig := range signedRoot.Signatures { if sig.KeyID == rootKey.ID() { origKeySig = true } else if sig.KeyID == replacementKey.ID() { replKeySig = true } } assert.True(t, origKeySig, "Original root key signature not present") assert.True(t, replKeySig, "Replacement root key signature not present") client := NewClient(repo, remote, kdb, cache) err = client.verifyRoot("root", signedRoot, 0) assert.NoError(t, err, "Failed to verify key rotated root") } func TestRotationNewSigMissing(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) kdb := keys.NewDB() signer := signed.NewEd25519() repo := tuf.NewRepo(kdb, signer) remote := store.NewMemoryStore(nil, nil) cache := store.NewMemoryStore(nil, nil) // Generate initial root key and role and add to key DB rootKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating root key") rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating root role") kdb.AddKey(rootKey) err = kdb.AddRole(rootRole) assert.NoError(t, err, "Error adding root role to db") // Generate new key and role. These will appear in the root.json // but will not be added to the keyDB. replacementKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating replacement root key") replacementRole, err := data.NewRole("root", 1, []string{replacementKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating replacement root role") assert.NotEqual(t, rootKey.ID(), replacementKey.ID(), "Key IDs are the same") // Generate a new root with the replacement key and role testRoot, err := data.NewRoot( map[string]data.PublicKey{replacementKey.ID(): replacementKey}, map[string]*data.RootRole{"root": &replacementRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") _, ok := testRoot.Signed.Keys[rootKey.ID()] assert.False(t, ok, "Old root key appeared in test root") // Sign testRoot with both old and new keys signedRoot, err := testRoot.ToSigned() err = signed.Sign(signer, signedRoot, rootKey) assert.NoError(t, err, "Failed to sign root") var origKeySig bool var replKeySig bool for _, sig := range signedRoot.Signatures { if sig.KeyID == rootKey.ID() { origKeySig = true } else if sig.KeyID == replacementKey.ID() { replKeySig = true } } assert.True(t, origKeySig, "Original root key signature not present") assert.False(t, replKeySig, "Replacement root key signature was present and shouldn't be") client := NewClient(repo, remote, kdb, cache) err = client.verifyRoot("root", signedRoot, 0) assert.Error(t, err, "Should have errored on verify as replacement signature was missing.") } func TestRotationOldSigMissing(t *testing.T) { logrus.SetLevel(logrus.DebugLevel) kdb := keys.NewDB() signer := signed.NewEd25519() repo := tuf.NewRepo(kdb, signer) remote := store.NewMemoryStore(nil, nil) cache := store.NewMemoryStore(nil, nil) // Generate initial root key and role and add to key DB rootKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating root key") rootRole, err := data.NewRole("root", 1, []string{rootKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating root role") kdb.AddKey(rootKey) err = kdb.AddRole(rootRole) assert.NoError(t, err, "Error adding root role to db") // Generate new key and role. These will appear in the root.json // but will not be added to the keyDB. replacementKey, err := signer.Create("root", data.ED25519Key) assert.NoError(t, err, "Error creating replacement root key") replacementRole, err := data.NewRole("root", 1, []string{replacementKey.ID()}, nil, nil) assert.NoError(t, err, "Error creating replacement root role") assert.NotEqual(t, rootKey.ID(), replacementKey.ID(), "Key IDs are the same") // Generate a new root with the replacement key and role testRoot, err := data.NewRoot( map[string]data.PublicKey{replacementKey.ID(): replacementKey}, map[string]*data.RootRole{"root": &replacementRole.RootRole}, false, ) assert.NoError(t, err, "Failed to create new root") _, ok := testRoot.Signed.Keys[rootKey.ID()] assert.False(t, ok, "Old root key appeared in test root") // Sign testRoot with both old and new keys signedRoot, err := testRoot.ToSigned() err = signed.Sign(signer, signedRoot, replacementKey) assert.NoError(t, err, "Failed to sign root") var origKeySig bool var replKeySig bool for _, sig := range signedRoot.Signatures { if sig.KeyID == rootKey.ID() { origKeySig = true } else if sig.KeyID == replacementKey.ID() { replKeySig = true } } assert.False(t, origKeySig, "Original root key signature was present and shouldn't be") assert.True(t, replKeySig, "Replacement root key signature was not present") client := NewClient(repo, remote, kdb, cache) err = client.verifyRoot("root", signedRoot, 0) assert.Error(t, err, "Should have errored on verify as replacement signature was missing.") } func TestCheckRootExpired(t *testing.T) { repo := tuf.NewRepo(nil, nil) storage := store.NewMemoryStore(nil, nil) client := NewClient(repo, storage, nil, storage) root := &data.SignedRoot{} root.Signed.Expires = time.Now().AddDate(-1, 0, 0) signedRoot, err := root.ToSigned() assert.NoError(t, err) rootJSON, err := json.Marshal(signedRoot) assert.NoError(t, err) rootHash := sha256.Sum256(rootJSON) testSnap := &data.SignedSnapshot{ Signed: data.Snapshot{ Meta: map[string]data.FileMeta{ "root": { Length: int64(len(rootJSON)), Hashes: map[string][]byte{ "sha256": rootHash[:], }, }, }, }, } repo.SetRoot(root) repo.SetSnapshot(testSnap) storage.SetMeta("root", rootJSON) err = client.checkRoot() assert.Error(t, err) assert.IsType(t, tuf.ErrLocalRootExpired{}, err) } func TestChecksumMismatch(t *testing.T) { repo := tuf.NewRepo(nil, nil) localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, nil, localStorage) sampleTargets := data.NewTargets() orig, err := json.Marshal(sampleTargets) origSha256 := sha256.Sum256(orig) orig[0] = '}' // corrupt data, should be a { assert.NoError(t, err) remoteStorage.SetMeta("targets", orig) _, _, err = client.downloadSigned("targets", int64(len(orig)), origSha256[:]) assert.IsType(t, ErrChecksumMismatch{}, err) } func TestChecksumMatch(t *testing.T) { repo := tuf.NewRepo(nil, nil) localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, nil, localStorage) sampleTargets := data.NewTargets() orig, err := json.Marshal(sampleTargets) origSha256 := sha256.Sum256(orig) assert.NoError(t, err) remoteStorage.SetMeta("targets", orig) _, _, err = client.downloadSigned("targets", int64(len(orig)), origSha256[:]) assert.NoError(t, err) } func TestSizeMismatchLong(t *testing.T) { repo := tuf.NewRepo(nil, nil) localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, nil, localStorage) sampleTargets := data.NewTargets() orig, err := json.Marshal(sampleTargets) origSha256 := sha256.Sum256(orig) assert.NoError(t, err) l := int64(len(orig)) orig = append([]byte(" "), orig...) assert.Equal(t, l+1, int64(len(orig))) remoteStorage.SetMeta("targets", orig) _, _, err = client.downloadSigned("targets", l, origSha256[:]) // size just limits the data received, the error is caught // either during checksum verification or during json deserialization assert.IsType(t, ErrChecksumMismatch{}, err) } func TestSizeMismatchShort(t *testing.T) { repo := tuf.NewRepo(nil, nil) localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, nil, localStorage) sampleTargets := data.NewTargets() orig, err := json.Marshal(sampleTargets) origSha256 := sha256.Sum256(orig) assert.NoError(t, err) l := int64(len(orig)) orig = orig[1:] remoteStorage.SetMeta("targets", orig) _, _, err = client.downloadSigned("targets", l, origSha256[:]) // size just limits the data received, the error is caught // either during checksum verification or during json deserialization assert.IsType(t, ErrChecksumMismatch{}, err) } func TestDownloadTargetsHappy(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("targets", orig) assert.NoError(t, err) // call repo.SignSnapshot to update the targets role in the snapshot repo.SignSnapshot(data.DefaultExpires("snapshot")) err = client.downloadTargets("targets") assert.NoError(t, err) } func TestDownloadTargetChecksumMismatch(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample targets signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) origSha256 := sha256.Sum256(orig) orig[0] = '}' // corrupt data, should be a { err = remoteStorage.SetMeta("targets", orig) assert.NoError(t, err) // create local snapshot with targets file // It's necessary to do it this way rather than calling repo.SignSnapshot // so that we have the wrong sha256 in the snapshot. snap := data.SignedSnapshot{ Signed: data.Snapshot{ Meta: data.Files{ "targets": data.FileMeta{ Length: int64(len(orig)), Hashes: data.Hashes{ "sha256": origSha256[:], }, }, }, }, } repo.Snapshot = &snap err = client.downloadTargets("targets") assert.IsType(t, ErrChecksumMismatch{}, err) } // TestDownloadTargetsNoChecksum: it's never valid to download any targets // role (incl. delegations) when a checksum is not available. func TestDownloadTargetsNoChecksum(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample targets signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("targets", orig) assert.NoError(t, err) delete(repo.Snapshot.Signed.Meta["targets"].Hashes, "sha256") err = client.downloadTargets("targets") assert.IsType(t, ErrMissingMeta{}, err) } // TestDownloadTargetsNoSnapshot: it's never valid to download any targets // role (incl. delegations) when a checksum is not available. func TestDownloadTargetsNoSnapshot(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample targets signedOrig, err := repo.SignTargets("targets", data.DefaultExpires("targets")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("targets", orig) assert.NoError(t, err) repo.Snapshot = nil err = client.downloadTargets("targets") assert.IsType(t, ErrMissingMeta{}, err) } func TestBootstrapDownloadRootHappy(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample root signedOrig, err := repo.SignRoot(data.DefaultExpires("root")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("root", orig) assert.NoError(t, err) // unset snapshot as if we're bootstrapping from nothing repo.Snapshot = nil err = client.downloadRoot() assert.NoError(t, err) } func TestUpdateDownloadRootHappy(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample root, snapshot, and timestamp signedOrig, err := repo.SignRoot(data.DefaultExpires("root")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("root", orig) assert.NoError(t, err) // sign snapshot to make root meta in snapshot get updated signedOrig, err = repo.SignSnapshot(data.DefaultExpires("snapshot")) err = client.downloadRoot() assert.NoError(t, err) } func TestUpdateDownloadRootBadChecksum(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // sign snapshot to make sure we have a checksum for root _, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) assert.NoError(t, err) // create and "upload" sample root, snapshot, and timestamp signedOrig, err := repo.SignRoot(data.DefaultExpires("root")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("root", orig) assert.NoError(t, err) // don't sign snapshot again to ensure checksum is out of date (bad) err = client.downloadRoot() assert.IsType(t, ErrChecksumMismatch{}, err) } func TestDownloadTimestampHappy(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample timestamp signedOrig, err := repo.SignTimestamp(data.DefaultExpires("timestamp")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("timestamp", orig) assert.NoError(t, err) err = client.downloadTimestamp() assert.NoError(t, err) } func TestDownloadSnapshotHappy(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample snapshot and timestamp signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("snapshot", orig) assert.NoError(t, err) signedOrig, err = repo.SignTimestamp(data.DefaultExpires("timestamp")) assert.NoError(t, err) orig, err = json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("timestamp", orig) assert.NoError(t, err) err = client.downloadSnapshot() assert.NoError(t, err) } // TestDownloadSnapshotNoChecksum: It should never be valid to download a // snapshot if we don't have a checksum func TestDownloadSnapshotNoTimestamp(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample snapshot and timestamp signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("snapshot", orig) assert.NoError(t, err) repo.Timestamp = nil err = client.downloadSnapshot() assert.IsType(t, ErrMissingMeta{}, err) } func TestDownloadSnapshotNoChecksum(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // create and "upload" sample snapshot and timestamp signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("snapshot", orig) assert.NoError(t, err) delete(repo.Timestamp.Signed.Meta["snapshot"].Hashes, "sha256") err = client.downloadSnapshot() assert.IsType(t, ErrMissingMeta{}, err) } func TestDownloadSnapshotBadChecksum(t *testing.T) { kdb, repo, _ := testutils.EmptyRepo() localStorage := store.NewMemoryStore(nil, nil) remoteStorage := store.NewMemoryStore(nil, nil) client := NewClient(repo, remoteStorage, kdb, localStorage) // sign timestamp to ensure it has a checksum for snapshot _, err := repo.SignTimestamp(data.DefaultExpires("timestamp")) assert.NoError(t, err) // create and "upload" sample snapshot and timestamp signedOrig, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) assert.NoError(t, err) orig, err := json.Marshal(signedOrig) assert.NoError(t, err) err = remoteStorage.SetMeta("snapshot", orig) assert.NoError(t, err) // by not signing timestamp again we ensure it has the wrong checksum err = client.downloadSnapshot() assert.IsType(t, ErrChecksumMismatch{}, err) } notary-0.1/tuf/client/errors.go000066400000000000000000000012531262207326400166120ustar00rootroot00000000000000package client import ( "fmt" ) // ErrChecksumMismatch - a checksum failed verification type ErrChecksumMismatch struct { role string } func (e ErrChecksumMismatch) Error() string { return fmt.Sprintf("tuf: checksum for %s did not match", e.role) } // ErrMissingMeta - couldn't find the FileMeta object for a role or target type ErrMissingMeta struct { role string } func (e ErrMissingMeta) Error() string { return fmt.Sprintf("tuf: sha256 checksum required for %s", e.role) } // ErrCorruptedCache - local data is incorrect type ErrCorruptedCache struct { file string } func (e ErrCorruptedCache) Error() string { return fmt.Sprintf("cache is corrupted: %s", e.file) } notary-0.1/tuf/data/000077500000000000000000000000001262207326400144015ustar00rootroot00000000000000notary-0.1/tuf/data/keys.go000066400000000000000000000307341262207326400157120ustar00rootroot00000000000000package data import ( "crypto" "crypto/ecdsa" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/asn1" "encoding/hex" "errors" "io" "math/big" "github.com/Sirupsen/logrus" "github.com/agl/ed25519" "github.com/jfrazelle/go/canonical/json" ) // PublicKey is the necessary interface for public keys type PublicKey interface { ID() string Algorithm() string Public() []byte } // PrivateKey adds the ability to access the private key type PrivateKey interface { PublicKey Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) Private() []byte CryptoSigner() crypto.Signer SignatureAlgorithm() SigAlgorithm } // KeyPair holds the public and private key bytes type KeyPair struct { Public []byte `json:"public"` Private []byte `json:"private"` } // Keys represents a map of key ID to PublicKey object. It's necessary // to allow us to unmarshal into an interface via the json.Unmarshaller // interface type Keys map[string]PublicKey // UnmarshalJSON implements the json.Unmarshaller interface func (ks *Keys) UnmarshalJSON(data []byte) error { parsed := make(map[string]tufKey) err := json.Unmarshal(data, &parsed) if err != nil { return err } final := make(map[string]PublicKey) for k, tk := range parsed { final[k] = typedPublicKey(tk) } *ks = final return nil } // KeyList represents a list of keys type KeyList []PublicKey // UnmarshalJSON implements the json.Unmarshaller interface func (ks *KeyList) UnmarshalJSON(data []byte) error { parsed := make([]tufKey, 0, 1) err := json.Unmarshal(data, &parsed) if err != nil { return err } final := make([]PublicKey, 0, len(parsed)) for _, tk := range parsed { final = append(final, typedPublicKey(tk)) } *ks = final return nil } func typedPublicKey(tk tufKey) PublicKey { switch tk.Algorithm() { case ECDSAKey: return &ECDSAPublicKey{tufKey: tk} case ECDSAx509Key: return &ECDSAx509PublicKey{tufKey: tk} case RSAKey: return &RSAPublicKey{tufKey: tk} case RSAx509Key: return &RSAx509PublicKey{tufKey: tk} case ED25519Key: return &ED25519PublicKey{tufKey: tk} } return &UnknownPublicKey{tufKey: tk} } func typedPrivateKey(tk tufKey) (PrivateKey, error) { private := tk.Value.Private tk.Value.Private = nil switch tk.Algorithm() { case ECDSAKey: return NewECDSAPrivateKey( &ECDSAPublicKey{ tufKey: tk, }, private, ) case ECDSAx509Key: return NewECDSAPrivateKey( &ECDSAx509PublicKey{ tufKey: tk, }, private, ) case RSAKey: return NewRSAPrivateKey( &RSAPublicKey{ tufKey: tk, }, private, ) case RSAx509Key: return NewRSAPrivateKey( &RSAx509PublicKey{ tufKey: tk, }, private, ) case ED25519Key: return NewED25519PrivateKey( ED25519PublicKey{ tufKey: tk, }, private, ) } return &UnknownPrivateKey{ tufKey: tk, privateKey: privateKey{private: private}, }, nil } // NewPublicKey creates a new, correctly typed PublicKey, using the // UnknownPublicKey catchall for unsupported ciphers func NewPublicKey(alg string, public []byte) PublicKey { tk := tufKey{ Type: alg, Value: KeyPair{ Public: public, }, } return typedPublicKey(tk) } // NewPrivateKey creates a new, correctly typed PrivateKey, using the // UnknownPrivateKey catchall for unsupported ciphers func NewPrivateKey(pubKey PublicKey, private []byte) (PrivateKey, error) { tk := tufKey{ Type: pubKey.Algorithm(), Value: KeyPair{ Public: pubKey.Public(), Private: private, // typedPrivateKey moves this value }, } return typedPrivateKey(tk) } // UnmarshalPublicKey is used to parse individual public keys in JSON func UnmarshalPublicKey(data []byte) (PublicKey, error) { var parsed tufKey err := json.Unmarshal(data, &parsed) if err != nil { return nil, err } return typedPublicKey(parsed), nil } // UnmarshalPrivateKey is used to parse individual private keys in JSON func UnmarshalPrivateKey(data []byte) (PrivateKey, error) { var parsed tufKey err := json.Unmarshal(data, &parsed) if err != nil { return nil, err } return typedPrivateKey(parsed) } // tufKey is the structure used for both public and private keys in TUF. // Normally it would make sense to use a different structures for public and // private keys, but that would change the key ID algorithm (since the canonical // JSON would be different). This structure should normally be accessed through // the PublicKey or PrivateKey interfaces. type tufKey struct { id string Type string `json:"keytype"` Value KeyPair `json:"keyval"` } // Algorithm returns the algorithm of the key func (k tufKey) Algorithm() string { return k.Type } // ID efficiently generates if necessary, and caches the ID of the key func (k *tufKey) ID() string { if k.id == "" { pubK := tufKey{ Type: k.Algorithm(), Value: KeyPair{ Public: k.Public(), Private: nil, }, } data, err := json.MarshalCanonical(&pubK) if err != nil { logrus.Error("Error generating key ID:", err) } digest := sha256.Sum256(data) k.id = hex.EncodeToString(digest[:]) } return k.id } // Public returns the public bytes func (k tufKey) Public() []byte { return k.Value.Public } // Public key types // ECDSAPublicKey represents an ECDSA key using a raw serialization // of the public key type ECDSAPublicKey struct { tufKey } // ECDSAx509PublicKey represents an ECDSA key using an x509 cert // as the serialized format of the public key type ECDSAx509PublicKey struct { tufKey } // RSAPublicKey represents an RSA key using a raw serialization // of the public key type RSAPublicKey struct { tufKey } // RSAx509PublicKey represents an RSA key using an x509 cert // as the serialized format of the public key type RSAx509PublicKey struct { tufKey } // ED25519PublicKey represents an ED25519 key using a raw serialization // of the public key type ED25519PublicKey struct { tufKey } // UnknownPublicKey is a catchall for key types that are not supported type UnknownPublicKey struct { tufKey } // NewECDSAPublicKey initializes a new public key with the ECDSAKey type func NewECDSAPublicKey(public []byte) *ECDSAPublicKey { return &ECDSAPublicKey{ tufKey: tufKey{ Type: ECDSAKey, Value: KeyPair{ Public: public, Private: nil, }, }, } } // NewECDSAx509PublicKey initializes a new public key with the ECDSAx509Key type func NewECDSAx509PublicKey(public []byte) *ECDSAx509PublicKey { return &ECDSAx509PublicKey{ tufKey: tufKey{ Type: ECDSAx509Key, Value: KeyPair{ Public: public, Private: nil, }, }, } } // NewRSAPublicKey initializes a new public key with the RSA type func NewRSAPublicKey(public []byte) *RSAPublicKey { return &RSAPublicKey{ tufKey: tufKey{ Type: RSAKey, Value: KeyPair{ Public: public, Private: nil, }, }, } } // NewRSAx509PublicKey initializes a new public key with the RSAx509Key type func NewRSAx509PublicKey(public []byte) *RSAx509PublicKey { return &RSAx509PublicKey{ tufKey: tufKey{ Type: RSAx509Key, Value: KeyPair{ Public: public, Private: nil, }, }, } } // NewED25519PublicKey initializes a new public key with the ED25519Key type func NewED25519PublicKey(public []byte) *ED25519PublicKey { return &ED25519PublicKey{ tufKey: tufKey{ Type: ED25519Key, Value: KeyPair{ Public: public, Private: nil, }, }, } } // Private key types type privateKey struct { private []byte } type signer struct { signer crypto.Signer } // ECDSAPrivateKey represents a private ECDSA key type ECDSAPrivateKey struct { PublicKey privateKey signer } // RSAPrivateKey represents a private RSA key type RSAPrivateKey struct { PublicKey privateKey signer } // ED25519PrivateKey represents a private ED25519 key type ED25519PrivateKey struct { ED25519PublicKey privateKey } // UnknownPrivateKey is a catchall for unsupported key types type UnknownPrivateKey struct { tufKey privateKey } // NewECDSAPrivateKey initializes a new ECDSA private key func NewECDSAPrivateKey(public PublicKey, private []byte) (*ECDSAPrivateKey, error) { switch public.(type) { case *ECDSAPublicKey, *ECDSAx509PublicKey: default: return nil, errors.New("Invalid public key type provided to NewECDSAPrivateKey") } ecdsaPrivKey, err := x509.ParseECPrivateKey(private) if err != nil { return nil, err } return &ECDSAPrivateKey{ PublicKey: public, privateKey: privateKey{private: private}, signer: signer{signer: ecdsaPrivKey}, }, nil } // NewRSAPrivateKey initialized a new RSA private key func NewRSAPrivateKey(public PublicKey, private []byte) (*RSAPrivateKey, error) { switch public.(type) { case *RSAPublicKey, *RSAx509PublicKey: default: return nil, errors.New("Invalid public key type provided to NewRSAPrivateKey") } rsaPrivKey, err := x509.ParsePKCS1PrivateKey(private) if err != nil { return nil, err } return &RSAPrivateKey{ PublicKey: public, privateKey: privateKey{private: private}, signer: signer{signer: rsaPrivKey}, }, nil } // NewED25519PrivateKey initialized a new ED25519 private key func NewED25519PrivateKey(public ED25519PublicKey, private []byte) (*ED25519PrivateKey, error) { return &ED25519PrivateKey{ ED25519PublicKey: public, privateKey: privateKey{private: private}, }, nil } // Private return the serialized private bytes of the key func (k privateKey) Private() []byte { return k.private } // CryptoSigner returns the underlying crypto.Signer for use cases where we need the default // signature or public key functionality (like when we generate certificates) func (s signer) CryptoSigner() crypto.Signer { return s.signer } // CryptoSigner returns the ED25519PrivateKey which already implements crypto.Signer func (k ED25519PrivateKey) CryptoSigner() crypto.Signer { return nil } // CryptoSigner returns the UnknownPrivateKey which already implements crypto.Signer func (k UnknownPrivateKey) CryptoSigner() crypto.Signer { return nil } type ecdsaSig struct { R *big.Int S *big.Int } // Sign creates an ecdsa signature func (k ECDSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { ecdsaPrivKey, ok := k.CryptoSigner().(*ecdsa.PrivateKey) if !ok { return nil, errors.New("Signer was based on the wrong key type") } hashed := sha256.Sum256(msg) sigASN1, err := ecdsaPrivKey.Sign(rand, hashed[:], opts) if err != nil { return nil, err } sig := ecdsaSig{} _, err = asn1.Unmarshal(sigASN1, &sig) if err != nil { return nil, err } rBytes, sBytes := sig.R.Bytes(), sig.S.Bytes() octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3 // MUST include leading zeros in the output rBuf := make([]byte, octetLength-len(rBytes), octetLength) sBuf := make([]byte, octetLength-len(sBytes), octetLength) rBuf = append(rBuf, rBytes...) sBuf = append(sBuf, sBytes...) return append(rBuf, sBuf...), nil } // Sign creates an rsa signature func (k RSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { hashed := sha256.Sum256(msg) if opts == nil { opts = &rsa.PSSOptions{ SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: crypto.SHA256, } } return k.CryptoSigner().Sign(rand, hashed[:], opts) } // Sign creates an ed25519 signature func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { priv := [ed25519.PrivateKeySize]byte{} copy(priv[:], k.private[ed25519.PublicKeySize:]) return ed25519.Sign(&priv, msg)[:], nil } // Sign on an UnknownPrivateKey raises an error because the client does not // know how to sign with this key type. func (k UnknownPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) { return nil, errors.New("Unknown key type, cannot sign.") } // SignatureAlgorithm returns the SigAlgorithm for a ECDSAPrivateKey func (k ECDSAPrivateKey) SignatureAlgorithm() SigAlgorithm { return ECDSASignature } // SignatureAlgorithm returns the SigAlgorithm for a RSAPrivateKey func (k RSAPrivateKey) SignatureAlgorithm() SigAlgorithm { return RSAPSSSignature } // SignatureAlgorithm returns the SigAlgorithm for a ED25519PrivateKey func (k ED25519PrivateKey) SignatureAlgorithm() SigAlgorithm { return EDDSASignature } // SignatureAlgorithm returns the SigAlgorithm for an UnknownPrivateKey func (k UnknownPrivateKey) SignatureAlgorithm() SigAlgorithm { return "" } // PublicKeyFromPrivate returns a new tufKey based on a private key, with // the private key bytes guaranteed to be nil. func PublicKeyFromPrivate(pk PrivateKey) PublicKey { return typedPublicKey(tufKey{ Type: pk.Algorithm(), Value: KeyPair{ Public: pk.Public(), Private: nil, }, }) } notary-0.1/tuf/data/roles.go000066400000000000000000000111231262207326400160520ustar00rootroot00000000000000package data import ( "fmt" "strings" ) // Canonical base role names const ( CanonicalRootRole = "root" CanonicalTargetsRole = "targets" CanonicalSnapshotRole = "snapshot" CanonicalTimestampRole = "timestamp" ) // ValidRoles holds an overrideable mapping of canonical role names // to any custom roles names a user wants to make use of. This allows // us to be internally consistent while using different roles in the // public TUF files. var ValidRoles = map[string]string{ CanonicalRootRole: CanonicalRootRole, CanonicalTargetsRole: CanonicalTargetsRole, CanonicalSnapshotRole: CanonicalSnapshotRole, CanonicalTimestampRole: CanonicalTimestampRole, } // ErrInvalidRole represents an error regarding a role. Typically // something like a role for which sone of the public keys were // not found in the TUF repo. type ErrInvalidRole struct { Role string } func (e ErrInvalidRole) Error() string { return fmt.Sprintf("tuf: invalid role %s", e.Role) } // SetValidRoles is a utility function to override some or all of the roles func SetValidRoles(rs map[string]string) { // iterate ValidRoles for k := range ValidRoles { if v, ok := rs[k]; ok { ValidRoles[k] = v } } } // RoleName returns the (possibly overridden) role name for the provided // canonical role name func RoleName(canonicalRole string) string { if r, ok := ValidRoles[canonicalRole]; ok { return r } return canonicalRole } // CanonicalRole does a reverse lookup to get the canonical role name // from the (possibly overridden) role name func CanonicalRole(role string) string { name := strings.ToLower(role) if _, ok := ValidRoles[name]; ok { // The canonical version is always lower case // se ensure we return name, not role return name } targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) if strings.HasPrefix(name, targetsBase) { role = strings.TrimPrefix(role, targetsBase) role = fmt.Sprintf("%s/%s", CanonicalTargetsRole, role) return role } for r, v := range ValidRoles { if role == v { return r } } return "" } // ValidRole only determines the name is semantically // correct. For target delegated roles, it does NOT check // the the appropriate parent roles exist. func ValidRole(name string) bool { name = strings.ToLower(name) if v, ok := ValidRoles[name]; ok { return name == v } targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) if strings.HasPrefix(name, targetsBase) { return true } for _, v := range ValidRoles { if name == v { return true } } return false } // RootRole is a cut down role as it appears in the root.json type RootRole struct { KeyIDs []string `json:"keyids"` Threshold int `json:"threshold"` } // Role is a more verbose role as they appear in targets delegations type Role struct { RootRole Name string `json:"name"` Paths []string `json:"paths,omitempty"` PathHashPrefixes []string `json:"path_hash_prefixes,omitempty"` Email string `json:"email,omitempty"` } // NewRole creates a new Role object from the given parameters func NewRole(name string, threshold int, keyIDs, paths, pathHashPrefixes []string) (*Role, error) { if len(paths) > 0 && len(pathHashPrefixes) > 0 { return nil, ErrInvalidRole{Role: name} } if threshold < 1 { return nil, ErrInvalidRole{Role: name} } if !ValidRole(name) { return nil, ErrInvalidRole{Role: name} } return &Role{ RootRole: RootRole{ KeyIDs: keyIDs, Threshold: threshold, }, Name: name, Paths: paths, PathHashPrefixes: pathHashPrefixes, }, nil } // IsValid checks if the role has defined both paths and path hash prefixes, // having both is invalid func (r Role) IsValid() bool { return !(len(r.Paths) > 0 && len(r.PathHashPrefixes) > 0) } // ValidKey checks if the given id is a recognized signing key for the role func (r Role) ValidKey(id string) bool { for _, key := range r.KeyIDs { if key == id { return true } } return false } // CheckPaths checks if a given path is valid for the role func (r Role) CheckPaths(path string) bool { for _, p := range r.Paths { if strings.HasPrefix(path, p) { return true } } return false } // CheckPrefixes checks if a given hash matches the prefixes for the role func (r Role) CheckPrefixes(hash string) bool { for _, p := range r.PathHashPrefixes { if strings.HasPrefix(hash, p) { return true } } return false } // IsDelegation checks if the role is a delegation or a root role func (r Role) IsDelegation() bool { targetsBase := fmt.Sprintf("%s/", ValidRoles[CanonicalTargetsRole]) return strings.HasPrefix(r.Name, targetsBase) } notary-0.1/tuf/data/roles_test.go000066400000000000000000000036241262207326400171200ustar00rootroot00000000000000package data import ( "testing" "github.com/stretchr/testify/assert" ) func TestCanonicalRole(t *testing.T) { testRoles := map[string]string{ CanonicalRootRole: "testRoot", CanonicalTargetsRole: "testTargets", CanonicalSnapshotRole: "testSnapshot", CanonicalTimestampRole: "testTimestamp", "garbageRole": "testGarbageRole", } SetValidRoles(testRoles) // make sure roles were set correctly assert.Equal(t, "testRoot", ValidRoles[CanonicalRootRole]) assert.Equal(t, "testTargets", ValidRoles[CanonicalTargetsRole]) assert.Equal(t, "testSnapshot", ValidRoles[CanonicalSnapshotRole]) assert.Equal(t, "testTimestamp", ValidRoles[CanonicalTimestampRole]) // check SetValidRoles doesn't allow non-valid roles in assert.Equal(t, "", ValidRoles["garbageRole"]) // check when looking up CanonicalRole from configured role assert.Equal(t, CanonicalRootRole, CanonicalRole("testRoot")) assert.Equal(t, CanonicalTargetsRole, CanonicalRole("testTargets")) assert.Equal(t, CanonicalSnapshotRole, CanonicalRole("testSnapshot")) assert.Equal(t, CanonicalTimestampRole, CanonicalRole("testTimestamp")) assert.Equal(t, "", CanonicalRole("testGarbageRole")) // check when looking up CanonicalRole with canonical role assert.Equal(t, CanonicalRootRole, CanonicalRole(CanonicalRootRole)) assert.Equal(t, CanonicalTargetsRole, CanonicalRole(CanonicalTargetsRole)) assert.Equal(t, CanonicalSnapshotRole, CanonicalRole(CanonicalSnapshotRole)) assert.Equal(t, CanonicalTimestampRole, CanonicalRole(CanonicalTimestampRole)) assert.Equal(t, "", CanonicalRole("garbageRole")) assert.Equal(t, "", CanonicalRole("not found")) // reset ValidRoles so other tests aren't messed up ValidRoles = map[string]string{ CanonicalRootRole: CanonicalRootRole, CanonicalTargetsRole: CanonicalTargetsRole, CanonicalSnapshotRole: CanonicalSnapshotRole, CanonicalTimestampRole: CanonicalTimestampRole, } } notary-0.1/tuf/data/root.go000066400000000000000000000036401262207326400157160ustar00rootroot00000000000000package data import ( "time" "github.com/jfrazelle/go/canonical/json" ) // SignedRoot is a fully unpacked root.json type SignedRoot struct { Signatures []Signature Signed Root Dirty bool } // Root is the Signed component of a root.json type Root struct { Type string `json:"_type"` Version int `json:"version"` Expires time.Time `json:"expires"` Keys Keys `json:"keys"` Roles map[string]*RootRole `json:"roles"` ConsistentSnapshot bool `json:"consistent_snapshot"` } // NewRoot initializes a new SignedRoot with a set of keys, roles, and the consistent flag func NewRoot(keys map[string]PublicKey, roles map[string]*RootRole, consistent bool) (*SignedRoot, error) { signedRoot := &SignedRoot{ Signatures: make([]Signature, 0), Signed: Root{ Type: TUFTypes["root"], Version: 0, Expires: DefaultExpires("root"), Keys: keys, Roles: roles, ConsistentSnapshot: consistent, }, Dirty: true, } return signedRoot, nil } // ToSigned partially serializes a SignedRoot for further signing func (r SignedRoot) ToSigned() (*Signed, error) { s, err := json.MarshalCanonical(r.Signed) if err != nil { return nil, err } signed := json.RawMessage{} err = signed.UnmarshalJSON(s) if err != nil { return nil, err } sigs := make([]Signature, len(r.Signatures)) copy(sigs, r.Signatures) return &Signed{ Signatures: sigs, Signed: signed, }, nil } // RootFromSigned fully unpacks a Signed object into a SignedRoot func RootFromSigned(s *Signed) (*SignedRoot, error) { r := Root{} err := json.Unmarshal(s.Signed, &r) if err != nil { return nil, err } sigs := make([]Signature, len(s.Signatures)) copy(sigs, s.Signatures) return &SignedRoot{ Signatures: sigs, Signed: r, }, nil } notary-0.1/tuf/data/snapshot.go000066400000000000000000000047531262207326400166000ustar00rootroot00000000000000package data import ( "bytes" "time" "github.com/Sirupsen/logrus" "github.com/jfrazelle/go/canonical/json" ) // SignedSnapshot is a fully unpacked snapshot.json type SignedSnapshot struct { Signatures []Signature Signed Snapshot Dirty bool } // Snapshot is the Signed component of a snapshot.json type Snapshot struct { Type string `json:"_type"` Version int `json:"version"` Expires time.Time `json:"expires"` Meta Files `json:"meta"` } // NewSnapshot initilizes a SignedSnapshot with a given top level root // and targets objects func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) { logrus.Debug("generating new snapshot...") targetsJSON, err := json.Marshal(targets) if err != nil { logrus.Debug("Error Marshalling Targets") return nil, err } rootJSON, err := json.Marshal(root) if err != nil { logrus.Debug("Error Marshalling Root") return nil, err } rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), "sha256") if err != nil { return nil, err } targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), "sha256") if err != nil { return nil, err } return &SignedSnapshot{ Signatures: make([]Signature, 0), Signed: Snapshot{ Type: TUFTypes["snapshot"], Version: 0, Expires: DefaultExpires("snapshot"), Meta: Files{ ValidRoles["root"]: rootMeta, ValidRoles["targets"]: targetsMeta, }, }, }, nil } func (sp *SignedSnapshot) hashForRole(role string) []byte { return sp.Signed.Meta[role].Hashes["sha256"] } // ToSigned partially serializes a SignedSnapshot for further signing func (sp SignedSnapshot) ToSigned() (*Signed, error) { s, err := json.MarshalCanonical(sp.Signed) if err != nil { return nil, err } signed := json.RawMessage{} err = signed.UnmarshalJSON(s) if err != nil { return nil, err } sigs := make([]Signature, len(sp.Signatures)) copy(sigs, sp.Signatures) return &Signed{ Signatures: sigs, Signed: signed, }, nil } // AddMeta updates a role in the snapshot with new meta func (sp *SignedSnapshot) AddMeta(role string, meta FileMeta) { sp.Signed.Meta[role] = meta sp.Dirty = true } // SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) { sp := Snapshot{} err := json.Unmarshal(s.Signed, &sp) if err != nil { return nil, err } sigs := make([]Signature, len(s.Signatures)) copy(sigs, s.Signatures) return &SignedSnapshot{ Signatures: sigs, Signed: sp, }, nil } notary-0.1/tuf/data/targets.go000066400000000000000000000062641262207326400164110ustar00rootroot00000000000000package data import ( "crypto/sha256" "encoding/hex" "github.com/jfrazelle/go/canonical/json" ) // SignedTargets is a fully unpacked targets.json, or target delegation // json file type SignedTargets struct { Signatures []Signature Signed Targets Dirty bool } // Targets is the Signed components of a targets.json or delegation json file type Targets struct { SignedCommon Targets Files `json:"targets"` Delegations Delegations `json:"delegations,omitempty"` } // NewTargets intiializes a new empty SignedTargets object func NewTargets() *SignedTargets { return &SignedTargets{ Signatures: make([]Signature, 0), Signed: Targets{ SignedCommon: SignedCommon{ Type: TUFTypes["targets"], Version: 0, Expires: DefaultExpires("targets"), }, Targets: make(Files), Delegations: *NewDelegations(), }, Dirty: true, } } // GetMeta attempts to find the targets entry for the path. It // will return nil in the case of the target not being found. func (t SignedTargets) GetMeta(path string) *FileMeta { for p, meta := range t.Signed.Targets { if p == path { return &meta } } return nil } // GetDelegations filters the roles and associated keys that may be // the signers for the given target path. If no appropriate roles // can be found, it will simply return nil for the return values. // The returned slice of Role will have order maintained relative // to the role slice on Delegations per TUF spec proposal on using // order to determine priority. func (t SignedTargets) GetDelegations(path string) []*Role { var roles []*Role pathHashBytes := sha256.Sum256([]byte(path)) pathHash := hex.EncodeToString(pathHashBytes[:]) for _, r := range t.Signed.Delegations.Roles { if !r.IsValid() { // Role has both Paths and PathHashPrefixes. continue } if r.CheckPaths(path) { roles = append(roles, r) continue } if r.CheckPrefixes(pathHash) { roles = append(roles, r) continue } //keysDB.AddRole(r) } return roles } // AddTarget adds or updates the meta for the given path func (t *SignedTargets) AddTarget(path string, meta FileMeta) { t.Signed.Targets[path] = meta t.Dirty = true } // AddDelegation will add a new delegated role with the given keys, // ensuring the keys either already exist, or are added to the map // of delegation keys func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error { return nil } // ToSigned partially serializes a SignedTargets for further signing func (t SignedTargets) ToSigned() (*Signed, error) { s, err := json.MarshalCanonical(t.Signed) if err != nil { return nil, err } signed := json.RawMessage{} err = signed.UnmarshalJSON(s) if err != nil { return nil, err } sigs := make([]Signature, len(t.Signatures)) copy(sigs, t.Signatures) return &Signed{ Signatures: sigs, Signed: signed, }, nil } // TargetsFromSigned fully unpacks a Signed object into a SignedTargets func TargetsFromSigned(s *Signed) (*SignedTargets, error) { t := Targets{} err := json.Unmarshal(s.Signed, &t) if err != nil { return nil, err } sigs := make([]Signature, len(s.Signatures)) copy(sigs, s.Signatures) return &SignedTargets{ Signatures: sigs, Signed: t, }, nil } notary-0.1/tuf/data/timestamp.go000066400000000000000000000035441262207326400167410ustar00rootroot00000000000000package data import ( "bytes" "time" "github.com/jfrazelle/go/canonical/json" ) // SignedTimestamp is a fully unpacked timestamp.json type SignedTimestamp struct { Signatures []Signature Signed Timestamp Dirty bool } // Timestamp is the Signed component of a timestamp.json type Timestamp struct { Type string `json:"_type"` Version int `json:"version"` Expires time.Time `json:"expires"` Meta Files `json:"meta"` } // NewTimestamp initializes a timestamp with an existing snapshot func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) { snapshotJSON, err := json.Marshal(snapshot) if err != nil { return nil, err } snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), "sha256") if err != nil { return nil, err } return &SignedTimestamp{ Signatures: make([]Signature, 0), Signed: Timestamp{ Type: TUFTypes["timestamp"], Version: 0, Expires: DefaultExpires("timestamp"), Meta: Files{ ValidRoles["snapshot"]: snapshotMeta, }, }, }, nil } // ToSigned partially serializes a SignedTimestamp such that it can // be signed func (ts SignedTimestamp) ToSigned() (*Signed, error) { s, err := json.MarshalCanonical(ts.Signed) if err != nil { return nil, err } signed := json.RawMessage{} err = signed.UnmarshalJSON(s) if err != nil { return nil, err } sigs := make([]Signature, len(ts.Signatures)) copy(sigs, ts.Signatures) return &Signed{ Signatures: sigs, Signed: signed, }, nil } // TimestampFromSigned parsed a Signed object into a fully unpacked // SignedTimestamp func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) { ts := Timestamp{} err := json.Unmarshal(s.Signed, &ts) if err != nil { return nil, err } sigs := make([]Signature, len(s.Signatures)) copy(sigs, s.Signatures) return &SignedTimestamp{ Signatures: sigs, Signed: ts, }, nil } notary-0.1/tuf/data/types.go000066400000000000000000000130731262207326400161000ustar00rootroot00000000000000package data import ( "crypto/sha256" "crypto/sha512" "fmt" "hash" "io" "io/ioutil" "strings" "time" "github.com/Sirupsen/logrus" "github.com/jfrazelle/go/canonical/json" ) // SigAlgorithm for types of signatures type SigAlgorithm string func (k SigAlgorithm) String() string { return string(k) } const defaultHashAlgorithm = "sha256" // Signature types const ( EDDSASignature SigAlgorithm = "eddsa" RSAPSSSignature SigAlgorithm = "rsapss" RSAPKCS1v15Signature SigAlgorithm = "rsapkcs1v15" ECDSASignature SigAlgorithm = "ecdsa" PyCryptoSignature SigAlgorithm = "pycrypto-pkcs#1 pss" ) // Key types const ( ED25519Key = "ed25519" RSAKey = "rsa" RSAx509Key = "rsa-x509" ECDSAKey = "ecdsa" ECDSAx509Key = "ecdsa-x509" ) // TUFTypes is the set of metadata types var TUFTypes = map[string]string{ CanonicalRootRole: "Root", CanonicalTargetsRole: "Targets", CanonicalSnapshotRole: "Snapshot", CanonicalTimestampRole: "Timestamp", } // SetTUFTypes allows one to override some or all of the default // type names in TUF. func SetTUFTypes(ts map[string]string) { for k, v := range ts { TUFTypes[k] = v } } // ValidTUFType checks if the given type is valid for the role func ValidTUFType(typ, role string) bool { if ValidRole(role) { // All targets delegation roles must have // the valid type is for targets. role = CanonicalRole(role) if role == "" { // role is unknown and does not map to // a type return false } if strings.HasPrefix(role, CanonicalTargetsRole+"/") { role = CanonicalTargetsRole } } // most people will just use the defaults so have this optimal check // first. Do comparison just in case there is some unknown vulnerability // if a key and value in the map differ. if v, ok := TUFTypes[role]; ok { return typ == v } return false } // Signed is the high level, partially deserialized metadata object // used to verify signatures before fully unpacking, or to add signatures // before fully packing type Signed struct { Signed json.RawMessage `json:"signed"` Signatures []Signature `json:"signatures"` } // SignedCommon contains the fields common to the Signed component of all // TUF metadata files type SignedCommon struct { Type string `json:"_type"` Expires time.Time `json:"expires"` Version int `json:"version"` } // SignedMeta is used in server validation where we only need signatures // and common fields type SignedMeta struct { Signed SignedCommon `json:"signed"` Signatures []Signature `json:"signatures"` } // Signature is a signature on a piece of metadata type Signature struct { KeyID string `json:"keyid"` Method SigAlgorithm `json:"method"` Signature []byte `json:"sig"` } // Files is the map of paths to file meta container in targets and delegations // metadata files type Files map[string]FileMeta // Hashes is the map of hash type to digest created for each metadata // and target file type Hashes map[string][]byte // FileMeta contains the size and hashes for a metadata or target file. Custom // data can be optionally added. type FileMeta struct { Length int64 `json:"length"` Hashes Hashes `json:"hashes"` Custom json.RawMessage `json:"custom,omitempty"` } // NewFileMeta generates a FileMeta object from the reader, using the // hash algorithms provided func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) { if len(hashAlgorithms) == 0 { hashAlgorithms = []string{defaultHashAlgorithm} } hashes := make(map[string]hash.Hash, len(hashAlgorithms)) for _, hashAlgorithm := range hashAlgorithms { var h hash.Hash switch hashAlgorithm { case "sha256": h = sha256.New() case "sha512": h = sha512.New() default: return FileMeta{}, fmt.Errorf("Unknown Hash Algorithm: %s", hashAlgorithm) } hashes[hashAlgorithm] = h r = io.TeeReader(r, h) } n, err := io.Copy(ioutil.Discard, r) if err != nil { return FileMeta{}, err } m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))} for hashAlgorithm, h := range hashes { m.Hashes[hashAlgorithm] = h.Sum(nil) } return m, nil } // Delegations holds a tier of targets delegations type Delegations struct { Keys Keys `json:"keys"` Roles []*Role `json:"roles"` } // NewDelegations initializes an empty Delegations object func NewDelegations() *Delegations { return &Delegations{ Keys: make(map[string]PublicKey), Roles: make([]*Role, 0), } } // defines number of days in which something should expire var defaultExpiryTimes = map[string]int{ CanonicalRootRole: 365, CanonicalTargetsRole: 90, CanonicalSnapshotRole: 7, CanonicalTimestampRole: 1, } // SetDefaultExpiryTimes allows one to change the default expiries. func SetDefaultExpiryTimes(times map[string]int) { for key, value := range times { if _, ok := defaultExpiryTimes[key]; !ok { logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key) continue } defaultExpiryTimes[key] = value } } // DefaultExpires gets the default expiry time for the given role func DefaultExpires(role string) time.Time { var t time.Time if t, ok := defaultExpiryTimes[role]; ok { return time.Now().AddDate(0, 0, t) } return t.UTC().Round(time.Second) } type unmarshalledSignature Signature // UnmarshalJSON does a custom unmarshalling of the signature JSON func (s *Signature) UnmarshalJSON(data []byte) error { uSignature := unmarshalledSignature{} err := json.Unmarshal(data, &uSignature) if err != nil { return err } uSignature.Method = SigAlgorithm(strings.ToLower(string(uSignature.Method))) *s = Signature(uSignature) return nil } notary-0.1/tuf/data/types_test.go000066400000000000000000000044121262207326400171340ustar00rootroot00000000000000package data import ( "bytes" "encoding/hex" "testing" "github.com/jfrazelle/go/canonical/json" "github.com/stretchr/testify/assert" ) func TestGenerateFileMetaDefault(t *testing.T) { // default is sha512 r := bytes.NewReader([]byte("foo")) meta, err := NewFileMeta(r, "sha512") assert.NoError(t, err, "Unexpected error.") assert.Equal(t, meta.Length, int64(3), "Meta did not have expected Length field value") hashes := meta.Hashes assert.Len(t, hashes, 1, "Only expected one hash to be present") hash, ok := hashes["sha512"] if !ok { t.Fatal("missing sha512 hash") } assert.Equal(t, "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", hex.EncodeToString(hash), "Hashes not equal") } func TestGenerateFileMetaExplicit(t *testing.T) { r := bytes.NewReader([]byte("foo")) meta, err := NewFileMeta(r, "sha256", "sha512") assert.NoError(t, err) assert.Equal(t, meta.Length, int64(3)) hashes := meta.Hashes assert.Len(t, hashes, 2) for name, val := range map[string]string{ "sha256": "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", "sha512": "f7fbba6e0636f890e56fbbf3283e524c6fa3204ae298382d624741d0dc6638326e282c41be5e4254d8820772c5518a2c5a8c0c7f7eda19594a7eb539453e1ed7", } { hash, ok := hashes[name] if !ok { t.Fatalf("missing %s hash", name) } assert.Equal(t, hex.EncodeToString(hash), val) } } func TestSignatureUnmarshalJSON(t *testing.T) { signatureJSON := `{"keyid":"97e8e1b51b6e7cf8720a56b5334bd8692ac5b28233c590b89fab0b0cd93eeedc","method":"RSA","sig":"2230cba525e4f5f8fc744f234221ca9a92924da4cc5faf69a778848882fcf7a20dbb57296add87f600891f2569a9c36706314c240f9361c60fd36f5a915a0e9712fc437b761e8f480868d7a4444724daa0d29a2669c0edbd4046046649a506b3d711d0aa5e70cb9d09dec7381e7de27a3168e77731e08f6ed56fcce2478855e837816fb69aff53412477748cd198dce783850080d37aeb929ad0f81460ebd31e61b772b6c7aa56977c787d4281fa45dbdefbb38d449eb5bccb2702964a52c78811545939712c8280dee0b23b2fa9fbbdd6a0c42476689ace655eba0745b4a21ba108bcd03ad00fdefff416dc74e08486a0538f8fd24989e1b9fc89e675141b7c"}` var sig Signature err := json.Unmarshal([]byte(signatureJSON), &sig) assert.NoError(t, err) // Check that the method string is lowercased assert.Equal(t, sig.Method.String(), "rsa") } notary-0.1/tuf/db/000077500000000000000000000000001262207326400140555ustar00rootroot00000000000000notary-0.1/tuf/db/files.db000066400000000000000000000700001262207326400154630ustar00rootroot00000000000000SQLite format 3@ -  6  6 "'tablekeyskeysCREATE TABLE keys ( id int auto_increment, namespace varchar(255) not null, role varchar(255) not null, key text not null, primary key (id) )';indexsqlite_autoindex_keys_1keysP!!ktablefilehashesfilehashesCREATE TABLE filehashes( namespace varchar(255) not null, path varchar(255) not null, alg varchar(10) not null, hash varchar(128) not null, primary key (namespace, path, alg))3G!indexsqlite_autoindex_filehashes_1filehashes<KtablefilemetafilemetaCREATE TABLE filemeta( namespace varchar(255) not null, path varchar(255) not null, size int not null, custom text default null, primary key (namespace, path))/Cindexsqlite_autoindex_filemeta_1filemeta  3 docker.io/testImage/bar.txt 3 docker.io/te   /foo.txt  3docker.io/testImage/bar.txt 3docker.io/testI   /foo.txt *3docker.io/testImage/bar.txtsha2560102*3docker.io/testImage/bar.t /bar.*3docker.io/testImage/foo.txtsha5 /foo.txtsha2560102 /foo.txtsha5120304 '3docker.io/testImage/bar.txtsha256Odocker.io/testImage/foo.txtsha256'3docker.io/testImage/foo.txtsha256 /foo.txtsha256  /foo.txtsha512    3docker.io/testImagesnapshot{"keytype":"ed25519","keyval":{"public":"dc765e788a189f2c8c694803b9ff3d26006d9f86670fbf13caa053564a368316","private":"a928f7141eebfabd24bd85e9fe39b2aa7a1db963927bdd6749abaa5e3948dddcdc765e788a189f2c8c694803b9ff3d26006d9f86670fbf13caa053564a368316"}}3docker.io/testImagetargets{"keytype":"ed25519","keyval":{"public":"d962263b798ce5bf0ad1a01f9fc7150ee04dbb84ff92a footimestamp{"keytype":"ed25519","keyval":{"public":"a1632c8fd8cde7d1a28a9958eef1969676f0f30e507a8d6d03ad041ead286093","private":"e436019f9a69998e49a4ebf6e16020f7cb2298a51e5db8589ae714ab444aef73a1632c8fd8cde7d1a28a9958eef1969676f0f30e507a8d6d03ad041ead286093"}} foosnapshot{"keytype":"ed25519","keyval":{"public":"cc06aa4112e81e5bfc6a3c6f6f5de88927ce92f420fab2da99bb943f9eb34b24","private":"21e6960e4c0c6c220813c8acb67a197b193ca2c175619f4b4825492718c4f00bcc06aa4112e81e5bfc6a3c6f6f5de88927ce92f420fab2da99bb943f9eb34b24"}} footargets{"keytype":"ed25519","keyval":{"public":"1f71073fce0b7e6d4e4eca9bc3e70c023c5ee360fda00412a5fa6cb6d7ee4c2e","private":"50c668ddde5bdff776c5722ad8f338d1588ce1521bf36f386b3a47ed823b119e1f71073fce0b7e6d4e4eca9bc3e70c023c5ee360fda00412a5fa6cb6d7ee4c2e"}}fooroot{"keytype":"ed25519","keyval":{"public": 3edocker.io/testImagesnapshot{"keytype":"ed25519","keyval":{"public":"82b48eadda9239cb6b26bca3d40429f6a311477d8db27c801861dc79c1bee8fc"}} 3edocker.io/testImagetargets{"keytype":"ed25519","keyval":{"public":"9ae5f9b143ad91445524e1705cba79fcf9d657eabe99c40fde7ededa934a4a4b"}} 3edocker.io/testImageroot{"keytype":"ed25519","keyval":{"public":"df197bbc52f39cb7f19d9ce569eae575517dcfff25019a5e5c591ff9c393bcfe"}} 3edocker.io/testImagesnapshot{"keytype":"ed25519","keyval":{"public":"d63e7e3c8b37c01ea0695e49df0f9944fb2eb428888b64b3a0f98f2de8a16486"}} 3edocker.io/testImagetargets{"keytype":"ed25519","keyval":{"public":"f102c6942e5e352cd5e{ etimestamp{"keytype":"ed25519","keyval":{"public":"19a39d7e20b432d24cc580bfd220dd92060c79e774f1e0a85d7a411bd432477c"}}z esnapshot{"keytype":"ed25519","keyval":{"public":"dc50659e685f3476f66e054443fac9ae73933f1d55828fa66117d14321826f0d"}}y etargets{"keytype":"ed25519","keyval":{"public":"bdfce22fa4353a34715d8366029120d574d4ac75aa46f8d4954a576b7436cbfc"}}yv eroot{"keytype":"ed25519","keyval":{"public":"3934ed97c843ed22c5142bf7e9f38a6e6fbf8beb78616a86a08dfe03f5019178"}}v eroot{"keytype":"ed25519","keyval":{"public":"5a3643b6b4caeb00e9620e9099cbad3d9ed5c85de5c0f6c530b477992e59df54"}}   notary-0.1/tuf/encrypted/000077500000000000000000000000001262207326400154655ustar00rootroot00000000000000notary-0.1/tuf/encrypted/encrypted.go000066400000000000000000000123461262207326400200170ustar00rootroot00000000000000// Package encrypted provides a simple, secure system for encrypting data // symmetrically with a passphrase. // // It uses scrypt derive a key from the passphrase and the NaCl secret box // cipher for authenticated encryption. package encrypted import ( "crypto/rand" "encoding/json" "errors" "fmt" "io" "golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/scrypt" ) const saltSize = 32 const ( boxKeySize = 32 boxNonceSize = 24 ) const ( // N parameter was chosen to be ~100ms of work using the default implementation // on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook // Pro (it takes ~113ms). scryptN = 32768 scryptR = 8 scryptP = 1 ) const ( nameScrypt = "scrypt" nameSecretBox = "nacl/secretbox" ) type data struct { KDF scryptKDF `json:"kdf"` Cipher secretBoxCipher `json:"cipher"` Ciphertext []byte `json:"ciphertext"` } type scryptParams struct { N int `json:"N"` R int `json:"r"` P int `json:"p"` } func newScryptKDF() (scryptKDF, error) { salt := make([]byte, saltSize) if err := fillRandom(salt); err != nil { return scryptKDF{}, err } return scryptKDF{ Name: nameScrypt, Params: scryptParams{ N: scryptN, R: scryptR, P: scryptP, }, Salt: salt, }, nil } type scryptKDF struct { Name string `json:"name"` Params scryptParams `json:"params"` Salt []byte `json:"salt"` } func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) } // CheckParams checks that the encoded KDF parameters are what we expect them to // be. If we do not do this, an attacker could cause a DoS by tampering with // them. func (s *scryptKDF) CheckParams() error { if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP { return errors.New("encrypted: unexpected kdf parameters") } return nil } func newSecretBoxCipher() (secretBoxCipher, error) { nonce := make([]byte, boxNonceSize) if err := fillRandom(nonce); err != nil { return secretBoxCipher{}, err } return secretBoxCipher{ Name: nameSecretBox, Nonce: nonce, }, nil } type secretBoxCipher struct { Name string `json:"name"` Nonce []byte `json:"nonce"` encrypted bool } func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { var keyBytes [boxKeySize]byte var nonceBytes [boxNonceSize]byte if len(key) != len(keyBytes) { panic("incorrect key size") } if len(s.Nonce) != len(nonceBytes) { panic("incorrect nonce size") } copy(keyBytes[:], key) copy(nonceBytes[:], s.Nonce) // ensure that we don't re-use nonces if s.encrypted { panic("Encrypt must only be called once for each cipher instance") } s.encrypted = true return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) } func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { var keyBytes [boxKeySize]byte var nonceBytes [boxNonceSize]byte if len(key) != len(keyBytes) { panic("incorrect key size") } if len(s.Nonce) != len(nonceBytes) { // return an error instead of panicking since the nonce is user input return nil, errors.New("encrypted: incorrect nonce size") } copy(keyBytes[:], key) copy(nonceBytes[:], s.Nonce) res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) if !ok { return nil, errors.New("encrypted: decryption failed") } return res, nil } // Encrypt takes a passphrase and plaintext, and returns a JSON object // containing ciphertext and the details necessary to decrypt it. func Encrypt(plaintext, passphrase []byte) ([]byte, error) { k, err := newScryptKDF() if err != nil { return nil, err } key, err := k.Key(passphrase) if err != nil { return nil, err } c, err := newSecretBoxCipher() if err != nil { return nil, err } data := &data{ KDF: k, Cipher: c, } data.Ciphertext = c.Encrypt(plaintext, key) return json.Marshal(data) } // Marshal encrypts the JSON encoding of v using passphrase. func Marshal(v interface{}, passphrase []byte) ([]byte, error) { data, err := json.MarshalIndent(v, "", "\t") if err != nil { return nil, err } return Encrypt(data, passphrase) } // Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and // tries to decrypt it using passphrase. If successful, it returns the // plaintext. func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { data := &data{} if err := json.Unmarshal(ciphertext, data); err != nil { return nil, err } if data.KDF.Name != nameScrypt { return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) } if data.Cipher.Name != nameSecretBox { return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) } if err := data.KDF.CheckParams(); err != nil { return nil, err } key, err := data.KDF.Key(passphrase) if err != nil { return nil, err } return data.Cipher.Decrypt(data.Ciphertext, key) } // Unmarshal decrypts the data using passphrase and unmarshals the resulting // plaintext into the value pointed to by v. func Unmarshal(data []byte, v interface{}, passphrase []byte) error { decrypted, err := Decrypt(data, passphrase) if err != nil { return err } return json.Unmarshal(decrypted, v) } func fillRandom(b []byte) error { _, err := io.ReadFull(rand.Reader, b) return err } notary-0.1/tuf/encrypted/encrypted_test.go000066400000000000000000000025041262207326400210510ustar00rootroot00000000000000package encrypted import ( "encoding/json" "testing" "github.com/stretchr/testify/assert" ) var plaintext = []byte("reallyimportant") func TestRoundtrip(t *testing.T) { passphrase := []byte("supersecret") enc, err := Encrypt(plaintext, passphrase) assert.NoError(t, err) // successful decrypt dec, err := Decrypt(enc, passphrase) assert.NoError(t, err) assert.Equal(t, dec, plaintext) // wrong passphrase passphrase[0] = 0 dec, err = Decrypt(enc, passphrase) assert.Error(t, err) assert.Nil(t, dec) } func TestTamperedRoundtrip(t *testing.T) { passphrase := []byte("supersecret") enc, err := Encrypt(plaintext, passphrase) assert.NoError(t, err) data := &data{} err = json.Unmarshal(enc, data) assert.NoError(t, err) data.Ciphertext[0] = 0 data.Ciphertext[1] = 0 enc, _ = json.Marshal(data) dec, err := Decrypt(enc, passphrase) assert.Error(t, err) assert.Nil(t, dec) } func TestDecrypt(t *testing.T) { enc := []byte(`{"kdf":{"name":"scrypt","params":{"N":32768,"r":8,"p":1},"salt":"N9a7x5JFGbrtB2uBR81jPwp0eiLR4A7FV3mjVAQrg1g="},"cipher":{"name":"nacl/secretbox","nonce":"2h8HxMmgRfuYdpswZBQaU3xJ1nkA/5Ik"},"ciphertext":"SEW6sUh0jf2wfdjJGPNS9+bkk2uB+Cxamf32zR8XkQ=="}`) passphrase := []byte("supersecret") dec, err := Decrypt(enc, passphrase) assert.NoError(t, err) assert.Equal(t, dec, plaintext) } notary-0.1/tuf/keys/000077500000000000000000000000001262207326400144435ustar00rootroot00000000000000notary-0.1/tuf/keys/db.go000066400000000000000000000033121262207326400153560ustar00rootroot00000000000000package keys import ( "errors" "github.com/docker/notary/tuf/data" ) // Various basic key database errors var ( ErrWrongType = errors.New("tuf: invalid key type") ErrExists = errors.New("tuf: key already in db") ErrWrongID = errors.New("tuf: key id mismatch") ErrInvalidKey = errors.New("tuf: invalid key") ErrInvalidKeyID = errors.New("tuf: invalid key id") ErrInvalidThreshold = errors.New("tuf: invalid role threshold") ) // KeyDB is an in memory database of public keys and role associations. // It is populated when parsing TUF files and used during signature // verification to look up the keys for a given role type KeyDB struct { roles map[string]*data.Role keys map[string]data.PublicKey } // NewDB initializes an empty KeyDB func NewDB() *KeyDB { return &KeyDB{ roles: make(map[string]*data.Role), keys: make(map[string]data.PublicKey), } } // AddKey adds a public key to the database func (db *KeyDB) AddKey(k data.PublicKey) { db.keys[k.ID()] = k } // AddRole adds a role to the database. Any keys associated with the // role must have already been added. func (db *KeyDB) AddRole(r *data.Role) error { if !data.ValidRole(r.Name) { return data.ErrInvalidRole{Role: r.Name} } if r.Threshold < 1 { return ErrInvalidThreshold } // validate all key ids are in the keys maps for _, id := range r.KeyIDs { if _, ok := db.keys[id]; !ok { return ErrInvalidKeyID } } db.roles[r.Name] = r return nil } // GetKey pulls a key out of the database by its ID func (db *KeyDB) GetKey(id string) data.PublicKey { return db.keys[id] } // GetRole retrieves a role based on its name func (db *KeyDB) GetRole(name string) *data.Role { return db.roles[name] } notary-0.1/tuf/resources/000077500000000000000000000000001262207326400155025ustar00rootroot00000000000000notary-0.1/tuf/resources/tuf_update_flow.jpg000066400000000000000000001573301262207326400214040ustar00rootroot00000000000000JFIFHHExifMM*V^(ifHH8Photoshop 3.08BIM8BIM%ُ B~ XICC_PROFILE HLinomntrRGB XYZ  1acspMSFTIEC sRGB-HP cprtP3desclwtptbkptrXYZgXYZ,bXYZ@dmndTpdmddvuedLview$lumimeas $tech0 rTRC< gTRC< bTRC< textCopyright (c) 1998 Hewlett-Packard CompanydescsRGB IEC61966-2.1sRGB IEC61966-2.1XYZ QXYZ XYZ o8XYZ bXYZ $descIEC http://www.iec.chIEC http://www.iec.chdesc.IEC 61966-2.1 Default RGB colour space - sRGB.IEC 61966-2.1 Default RGB colour space - sRGBdesc,Reference Viewing Condition in IEC61966-2.1,Reference Viewing Condition in IEC61966-2.1view_. \XYZ L VPWmeassig CRT curv #(-27;@EJOTY^chmrw| %+28>ELRY`gnu| &/8AKT]gqz !-8COZfr~ -;HUcq~ +:IXgw'7HYj{+=Oat 2FZn  % : O d y  ' = T j " 9 Q i  * C \ u & @ Z t .Id %A^z &Ca~1Om&Ed#Cc'Ij4Vx&IlAe@e Ek*Qw;c*R{Gp@j>i  A l !!H!u!!!"'"U"""# #8#f###$$M$|$$% %8%h%%%&'&W&&&''I'z''( (?(q(())8)k))**5*h**++6+i++,,9,n,,- -A-v--..L.../$/Z///050l0011J1112*2c223 3F3334+4e4455M555676r667$7`7788P8899B999:6:t::;-;k;;<' >`>>?!?a??@#@d@@A)AjAAB0BrBBC:C}CDDGDDEEUEEF"FgFFG5G{GHHKHHIIcIIJ7J}JK KSKKL*LrLMMJMMN%NnNOOIOOP'PqPQQPQQR1R|RSS_SSTBTTU(UuUVV\VVWDWWX/X}XYYiYZZVZZ[E[[\5\\]']x]^^l^__a_``W``aOaabIbbcCccd@dde=eef=ffg=ggh?hhiCiijHjjkOkklWlmm`mnnknooxop+ppq:qqrKrss]sttptu(uuv>vvwVwxxnxy*yyzFz{{c{|!||}A}~~b~#G k͂0WGrׇ;iΉ3dʋ0cʍ1fΏ6n֑?zM _ɖ4 uL$h՛BdҞ@iءG&vVǥ8nRĩ7u\ЭD-u`ֲK³8%yhYѹJº;.! zpg_XQKFAǿ=ȼ:ɹ8ʷ6˶5̵5͵6ζ7ϸ9к<Ѿ?DINU\dlvۀ܊ݖޢ)߯6DScs 2F[p(@Xr4Pm8Ww)Km" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyzC  C 2 ?(((((((((((((((((((((((((((((((((º%|KiÈyT xEs_q/;TִmE ^ZJA2*J:2PEi?k1վh">- =ݪ8R,@@C ǭvQEQEQEQEQEQEQEQEQEQEQEQEQEQE(((((((((((((((((((((((((((((((((>)|9<)^|q_nĖ:Hէ/*>pU+7n`ˎV>.~?[R٠Nw3zEs^z:[Oڍl|Y0Ҩ,2 ~> ՚yc[<%wd-8}$̣o"K,[',Ty'׎|㾝KEѼG{82B\yB5(>6v"VbI[3X~43.oqy_wVu,ieIx\!;ha|Sk8E1!nCb޲Hv[Lȯ'ʍ1حC~4|?>\kq_+log}h2;D>YXʷ,=$~ߵ?h_$ 2Sh: 73or$U%n/Oρ x{)dZ[l}.rW\6S?VZ{}kqcGhmƯ`xǙ>_#51x'o˝eWl6Pr\Ɗq ~sϯ~_|u.x+[mf܍J!,qckW ((((((((((((((((((((((((((((((((((((((((((((((((gşmg%̜0,wbOh~ ؙ7A@EPEPEPEPEPEPEPEPEPEPEPEPEPEPEP((((((((((((((((((((((((((((((((((~ ؙ7A^_;~wQ~ɟoF Ԅ 'BO9bK4kOMXx!/YvF6w^qmZ U4FK0sl\z!=篼]M3XYZjaL6ġČ"fF"7?Ҿ*0tKZ4}GEPգq<#*9!zh#NK<:,p ۡwG*#`*N\ (+|3m__Zi$quy 4V\vy*+1SS_˿y8=a&j>naWF2DfH q^(e4~# Kq, A\d *ot MF(3q|M~*Ӭu?:SMe.AJ[U'h-dIy\8|A|ౖ;8&>!ҟzq4p9Bp>uĚï׆.on^d 6u"1 #4$=Fx9;m!W[KY<[h^K7 a:(EEWH+7?ڔRi:Lǩ5 MFFТM!(߈/GzL 3ܼ`]6_;C%w %g+1EQEQEQEQEQEQEQEQEQE((((((((((((((((((((((((((((((((((~ ؙ7A^^'ɬ3n=(((((((((((((((((((((((((((((((((((((((((((((((((׼Uxb]*vZ6zudvY.Gtp!Ka~)u/Ꮔ.ڦ {byaي,ПN_ ҭ;tUn%>sly^7|$OZ|^[[K*'TvDpH,GZ֊(((((((((((((((((((((wį%}6SZp!4{(]4Xƥ\v*;^}.Mk7XFJpb܊ojZuýV~.h~(֟wiMl3MCy⸴hʅ py.ϊ<¸{xf@ZotK&֭X%Qmq w{\IQ*8Yjw:\ Tp&dH߻mʌ̟.?(~~x6Zt}DH-׈4hS-;~qa x{Ʒ)MOj6pO}~XED`7ffbIT' ~_kg4/WuRgEtBs%1˂*mWh/>xN x#ͧ]h' 2^2ZO4]v]B;nFraDI6`V~f:  ,\xڵ{GPN漣k WÒir#丌]'aϘk "x/IO.$ԣ;Mv]WFxZ-ڍx!B4^WQIm'fH'][ dS{_1f8u-)!f-?g/u ixy#ungř8-tʀ~vi{j:t\&mBݶ,pI+,e|3N|N|5..b67q-޳ɩ[l5jO.%7eޱ'4Z+_j } s7~kźAi`a$-4ȂE1ʹd,C(d[<1p$h2wDoOib٧I֯c*+?ᓿe#~ {N'~?F+/|^u~ $~m/帻<4A s@ ,gw;Y7'1_;hQ/@:Yi6NWNZ-h eRv _ ?q5w+uh{as2miɱEzO2wDoOibdg߃?ӿyNv|(o Ӭ<=}~_J䷍Ṗ !T1(hl'Ʒqwm“?@K&5h {7?dg߃?ӿ߲=wh^(w‚<9{2ksǪ>9-w$fv 4OZe<=iyv ktX!"Ide x$P'~?F(N3 ;V?XA;YTma[X @4h#ʽ=w~|pen0eK}:G٫Epv=!+~>arZMڿRErZkypvuRĀ #]RGk˻N# 粢)$ _i_u-'LJ6ZLJct^u9,;Hl5t-,md΅FbDdQk? o|ix~5Y./t;;TPyMŅ;鶝>XGh~]`NAVF FҴV;krcsX%1`&.~"jsFCxoV_\_Xz),%k{cmc @ѿ#JY> -zTJwgѕV$N @W=<7jVO 'NWVgtFݤYI&(V_0ZW0񍝧-{}4ChOzWȴU7e%T~k*tWBu["ėRYivkMʥS#n@6uhּCyOo4i(>=ȯ=>#'z+ [ƓMESG6cdv#m9uO xE\Z@SWk3VY/C&g;ij=Tc+|3DF.u]\C]E]ݵ/b' >rҍ@Q@Q@1@៌Y?Pbڍw}o A+dܤ,|uvHn'?F(7?<׿'~?F(N3 ;WɟůCWDM[>76$6\ kqn&<}9@2wDoOibdg߃?ӿ|FC 4d^״+_ A5'݀1dhˤ]~_n+mgJHX.4 ;2qn|K+ ,gw;Y7'1^Gy} i֧SG5%֯OHRhCaTXNNh[kxP|?nJ&{L-PQZIOGԯ~eb7a\g֚j<;xFMkV]ͨ$VvQ\3j6iF"S}E|6?Q׃ k`Ϫx=kap1r۫f>BQ! Kcm COw֧]c/owyEqUw?o}&q$Z+;m5On{o:^$շh Λ$K$ׁ FYJ*涣Icm⸼] OA4f+fmK#kw*Ғʊÿ/lCkZٵm")txaiQ[fit"[BY^ Amc{}GWOX]df9s,BXB}E|1cZ&t>&{'?<[!$CHQ P`K[&2yE՟7ͷ kWܾ'u/6ZGp]DTm >XI{-)mvMrR9d*W#n ( ( (.mf`FIDDfbxOJ|GĿi/#.2GI2\3]eQE()Cסk>#<16I($Z:,6B g,p xOw_uOhE23B̛%`ph؟ǿ|/ c>־:=ޏ}k /xfy`VBcVݞ/ ώmǟ/j%ӥdWk ĥ[aBW9-|F{_|EsVڵSeDA0$/38 3UU-| jwmض(o01~>E YAi]BDY%I%&@!~8_ k6ڥ퟇R&s08yʟ)R[5 .>W}8:|91ɚ_5/迳o^˨mGx Yo66Uv.-V@x{U.i- &m$R =CDPHnl&sf>5+b5VLJ }20aWzo*öS_g[.H2"n[j"7W*#[Pm '=*Z(ſkL_}g.[As8O7@3c7zo%~10?׿:$ȡAP}_ݿЭ֏X ׬O>~sYҏ:3x;h/ jV&H5m2KnHj:0ʺñ//#DÞEiFHm;G%pYٝىfbY$?r|F_c[ ?}E| xy euYwilv56(zG[@j?}/φ~3OWPrG46?x#ir4g ?S@j?|?x{ir_[@jSzŢM-Ω/7Q,K~hUu9VPA}x$WjWe#⏊ ~-/{-hQl<2]F#svk(Z?m|x&!F/4 朮 wY q;.v/.i7F9|vj&gUy=wr+V>ׇh |ynͯ귚vwCraXs9ąGYY" ~~v~j0x?ý[n7YmN]9o $ęߍ>?ix]񼷖x <%O տ7Pp9 m;WU{/N[3\I&k5K!0f\«y~_e,<78|I|75mN %t'PgpZ : U022 X3(?W׀xN2ӏ(u@ U -3_<[yf+Jjzw8wfocwO .`8mHi -3_[G7K95o\KL^9$Pc;# ]@+K|:GW*u@ UUo>&h?c1 I2ƃP0(ſ'|OGq Oėv*Kx d<("7?<׿_3~WQ8xTTKr~wFϻ-~;xᗄY<1qw.V]i:I}iX*Uʶ'~.h>&' :{ƨ]b\bPDrh#ĩ>U5mgu4H$%pWw ick~{9#/M;A3I0 n7ؚL$U1殏tp4%xBKsmiV3AXMwZ(F3N9Tyh~:)g5kľ}6{ǹ3ncY@Eش_p@ǟ'įGoxN,'iK Z5YudwlႯյ=_{߄v|;MNih2]\)ȍ$ĎI(?k-r N;a b!.39$f**V4 ~i9R.io_*]?!B;>'a] oR`WCeFQ e 2Ҥ%b`vp۾g\-.; Mclg Ũ%[{֑N&gʢ/[kGig+Y|=L]&ed78*Wvh-^xTjݭ׉c2vBK"G4@BFr)Nͮm v/ebD76f$WBpus-6:4~,|%=$mn $; ߽v/Ş&x K.j3/ge^C$Ida W1+8 ͯ i? ⅰ֭ecm-mOxۼs h"yB0/i}mC}ೆK> K ̲!kf0R2Oxx5(" Rb=aW_Gc:w-t)X?Yeeہ(U`e ~ C2gOJxl1t0K+0=~@:~ƈ?,2 #Q1U\W|O;5o YȝX+t-]J9DIy>|9l|[\Դ;C| r$RФcUaKaTK?Q|C=Iyz˸(l;0~N뜌^i^ uzneqt72kK! ( ڹ>?0L&u{ HN(H?jWz[V7ZxV"f4@ņOwq (o5OZ[|nFuϑ#gI"+-$Uۑ~ ^޵oy ާ%g,!Iܠbhe:&tw_ˤm;O5ֳJRvsgQPx?eڗNi3u燅nq>7\6LŽC+3'_~7{Y4zvxd X]dP(, X~<|P =_? Zd ^1ҋڢ6_wofeoMo]Ix}Nx!mg-泄UaѶ]>AɜoSВP'f_4-~=V孾oue(e;΀ (?((((((((((((((?f'Z~;ԫTu{Ln|{ oRlm+ NwF "kXIa" xWW~KL?FѢZE5O@<\w1€uu>" xWD'__P^ /g}G.OB|g{xؙ/8r\<-;H=1<6lɴmGuqyqw4%g{h"Q#;I |((.+jr>$} fmP~ycv]Mȳ1I.$B|ŸF;ٵ-#^4i!J4yy,;x _zͺF.#_(~Ŗn#2NH,+b6r}##L?nSSWwviwEahYha̰صr]]eP(XڴPx< _w<;4)^ۼ>kEy6zdu^,Oh0Mk@E` u2fD9F*|7TP#nmuJ {N#׉y wiwvVg _L{_/Wqwi{cԮeU2дUN`>EyG~O}ZmV_ĂX@[UHc!9Ӿ_XM_7c^'Y0FIʲda#l_%5Z@'"Fy⛋m2 0v{ƺ].'0Ć^c/k?t_@ն${ɒIiJஐ (((7?<׿ׁ;h/|;ғĭ}k:9kkIRauxdWI ,핟>" x[< _ogաүO>+S&F\ʶƲF(t*`D'__Q /g}@\4=o×>ߧxNѴz{tp COַ62EǑ䓖D'__Q /g}@o;_x{D#3ZZ~;ȭe KQq%sAp- &Vd!WyRvx`7[BbdB`u |\W2i1=+M}X|]]j_ݤbt3 " xWՅ[|LլhnGD쵛X:ƙ2YswQq,1b"G2FQ@Oy&7mZhFx؋Q7.e+w i[p48m|҅w,Arq[O^2j cD+$" H x16d𯁴=%,he)cRdF;( gJt=_}'2vZ$7Hܣ)Y[dhg 1[ÿ>xFL𷆴&FZE7?[Tl2B0_]x Vai~ӬtfHc &'UGT(((((((((((((((((((((((((6'fk c×w^Uk Rh缕6Q nS_h%U 4}A$m/ 1I"3Â+i9R. ( ( ( ( ( ( ( ( ( ( ( ( )88ff8 $z^_Q.b_[E43]7 "5QLlF=6E A4Q^'⯎ѵN.ɤhlǧnwH6#++{ko-xNı;c\ɑmR^HO/4ne-葭;黑-{bS; w~(kR7R!clsr:[ (Km#x# |,:8\($x$LZCnxjίzM_j>g`EYgWtR)m@4QEx-$Hiu#?U׿EPEPEPEPEPEPEPEPEPEPE6[jV5jj ʫ4xd`;8 *u}+C6X`"Ɔ[Ss7I+ FIG~~XI .ոH4[{X!H*L4AHt0kh I-S˽@F-a`Hx\s@~|*C×xV-:ub6Fu+ue@Dut~(KObs'&S"+G -hkVkK8R1ޠw¾tK x3H\vp=NdIO$]PEP^'ɬ3nrn#&"d;lvivQ(+#JxOyN.m"%.t0Xȭ-`c.>)U:³WX66 +NZiv/#LkYYP7@=m|o;EO |/e,)ci/~)z{kMUm MKN[+ c"L: ʯ@QEQEQEQEQEQEQEQEQEQEQEQE((((((((((SE >ZW熩KQ֥=wR6ߍ}=Iq{ T!N(fET Iay9 KPKn3;>7lOKҼ*(&Xk n o>Qa TӛH0ܛ2_2XW3孷$=iĿå:uƪ\Fѵs]O:AHCm Ƨڍj 4F018vfI݅_;|];(j>lmU/tX徎H顴푙3ˌ׬3W?k ee5Ԫ4ܱٳhu J&ݙA%UYcWT!zGj;L#7?Yؿj>Ϸ;+hُMvǓՏ7k?\2ir[3d{&p|qq8-}E~p|X-MKSS6ǫ˧@׷3Is'Mp^C$'<5\≤rEuK[jp]ؐ\NƷWegFtȣn_DhP PTzaot=Moi*աҮ|Irlqtxd#`;g Auj5?A mv]nZ9Yե7 ,~1oy*.o|orΚh6{x=K +o/_> x|D\2xC><= f_Vdko*y%?ٌ8ׇeM˻ߒЌㇷjǾ1~0[hWKiA #Y{aimhۤS^"R1E?1_|O ]BQ47/-;BqÃ@Z=|-[hUypZYAH =((((o_> : c֮cO6my2ܶ=Mz> !}~NA=n}*y>xd%=cnW/͝hz< }cidWf);@g3\&o4{?<_eޫAiOH/O%U D@6>'O.}bÖ8͜z5i5v@.$dY66 @ZOH>]6dku(BI4@(ךۇ@S0xoMma \Gmp6DGEC'ѫ|xxnn}g< t J;;2U s΅G)Z; -g`lug^a?X' 5y`_mp8Į"HcB@hi? 44lqkvI#kyiOUXcx2ѝ-A(߇> { hz'4ScYq )K6ʻd၃_|/?_Ak:K)gG0g,>_'<_vlGIfƭh[nY5F$ire*W^/LE'ZxZ՞Y4;9((((((((((((((((((((((((((((?io'ZG~ԫKi9R. ( ( ( ( ( ( (SWҴ;'Ե4Hk(< ~YZZCj62j-y#<8Ñe\G=}_Z n|=}ַ~4Mrh㷸 P?X* o)򧌯uGG~ψ-Uݿ!KԬ~+i1BZO6P2Ǐگ]CCǩ&̓Ow5H$6mq0V.v2qv~ZmݥZM:6*q 2<3iwp:]1?ᏄS}G_k GMF ĒvܸxVQNM+| a'X5gYXxWL[M |6h%֫q)EeX= .?K{k[ b=B;iR;-6E-mDțbx+xA}6OafI4—hbd9l|Ī6Ie_kykDivwyyib:*---l-a-c("F000 EQEQEQEQEw<=+?d5&x{MPEQEQEQEQEQEQEQEQEQEQEQEQEQEQE(((((((((((((((((?io'ZG~ԫKi9R. ( ( ( ( (_O5)4 ф+H"RWd FsC1 @;ASEo>3-̋1"fv x5ڎY"ҴiA[NnmHuHZJ3C.#JE#u|7> {/x{YuRRm`^($}6EUK&JVR(UV$gx`k&=N[9"L"p? H|go X>дԴV[t?EX)u$Ktco'ʁDH|=|C;?i ݜSмE\$2\xP&Y<(4ewӾ4D+'"!no*aH׍1lI>蚞O %.; ]h8ta){ }OXuqwu,օ.K8#mOw;SiZ–qjW:uJ6FIn.>׾UrƌN\xji?ݾo=6%hI)LR1EUt~3/PzF5ahhz\2,a :,Ik[W"[F}' mDž53BէlGLt5xE{<w!*^C+ I_R5OR`fu)?/vR-a+<&쁥xza}WbskVTJ: w{,Lo@T}@6O[Z궫h,BE2I4v#*QPEPEPEPEPEPEP^'ɬ3n>9xT_tk.WF4q6˿9`J\I0'k0>7/?SvHF3zd1j񼖽2,T pT͐((((((((((((((((((((((((((((3_|AY|#G9{6.\jo}}lvܭݑH`0d/"X"3GJ?f'Z~;ԧTs#ui(s#ui+ω?^ XnbJԯ#}\km,(ic5?3GJ?3GJ^|QwzV \Z[C#!"|&hZď Cok:W3څ!qEc% *0?3GJbx;1MXҿK ?ټ7gٿ}<ڛ?ڛ?<ڛ?ڛ?I_Q?ĺ3yRË) lȍH2?3G:|g ߊvP3~`EWX/%$3x$mΑmxؙ/8r=*i=ƫEceh$Q"wbT T+:O fIV% r(P::vO>$~?!ԙ<>JQ_ж6<՛/V"\KQX" rJVxة${JDj<_uud߇gҴUMF!MCL6yegOl-*nYC1jNtmgRD񶧨%̖gK Qƪ4g2$q(Y/dUEώO .QwYfKm: I淚^=E+L˽F`# ;?Awk{mm4 ZJ-hRO$Gik;)|WCGU>adE6x$7e7G$7e7^Ex$7e7G$7e7^m_^kמm'ִ袞'})%mmpkn<?ڛ?oIO[xeIh nc5+W}8xgmwLFbؑVuίzM_j>g`EYgWtR)mZ4QEg[jMg{w#ZX<M#@4QET][\I40LIl9UXW }Uye j^ 𞝭kb,v\v¬]d`:**Kd[QH쑕z8z (hdb%.*$ hG"J,LAҟ@Q@Q@Q@Q@Q@(F|9o E#ss"XC JAW#q[tcwuOZx/CL =sTc-û'} GڻOZo.te }k,2]) ʪ@l+~~uxw0:⩮!WI123,S7v߇|;^8j$v-qt-O)KY}f6(5!YAۜs߲G_]=Om_M} ;m%^$$*]ꅙ[*@~4k}ƿ}o=r\ۈ$*]QYوR2XC>5=/loWRMjUuHaiL.C c |/W]TOOejKu̮ʧF)- a>|~u|;|t뫘8M@D*uʟ$s_̶yoⵚ-?],X|&Gnb89<濪?_h;= l*6oȡA!p1{(!QJg,4&s}s;nvImRG$\C73n>?>*O~⋏y? k ]j%,mʑLw X0ۀ /~_P'[I\1~!`% /Fy=>Ϣ?O?dDZO!s-G \C0[5KK'쯭x~h&0xWq%U$ݐ7q%[>͢bVCRVhlYDמ--%#iXFDhyU&,&>;.%" Dn!pA@XQ_ xG3xWwZwYcM{-%wرJD(F\~Vpj:uֽsku _xtee`AȠh~văW_sLtWfF70EUQfbxD fش_jr"1 psŵ\Ș͍o?:X&N[K0\2ꊩ!VRb`qW{m喫E9#YeYYu<A#*fݵ?$E[Pt YM;Ѭ4t[Kv/*1fgE~k?ߌx'~xO>mc/j}_NΝZOxVI:Mq0g6ee$ɯ?e5r,WiylHW~݅X߻?g /gΧx}q%ķ"7^jC$A ;$(~dnݾiu͕BP"G,.)Èo4 V1n'L 5ER` H!9Me~ Ꮒ3~ i{wj6I$趌#Ep%^&C%źm| 6[sD5.6l%"|?5 Sg?uѵkmb~޷DWaBx""ݤ r1|#~ 4/ vH 4k'VNK[,I`fS_rذe?k}4Eh-|*-[Y"R(HFb`.v_&Kƾ2ex07j1V 82g|lT8/'Mٮ@nZzƹ'u;YmH ۔Q@|Ǿa<ާh73*ªB(,B$_WWO7uM)b<:42 pЂAmgǍ_~xvM2Pʲ"p ./.eE2(Gf~XbgGM7"ѫXuXtk9TZJBs|Fy}^W z-a~MM6rFH$ ch>*|*Լ=mahwS4MB$X%cg )RMWNK_xEˈm&-Gr&<l}(_5oww^=4~4:G_(Kl܌U2g-CzGkoZ5_nw~j:cI͵w"`+Xtm1.u$C*q$`Up "?xE4 k>#l5cb+&'`d6~.&-!'W$'ωׂ:Fb-[T108pfgXy/iIqjzg?aKcru;Xn'?[,ejnm?l> ~!^𾙩CG_^Ϥ'lʭwq|QZuWxF::(i%>sg;xSR/\xڟĭ|=ᕴֵyM ט$)_>-x 37[wj!YPFѱVH&7=PuOJ/uoZmu'<sh4fOѫxF$]AtPᐲ0Znj<=_^~Kh1Ӷ%as2 :Ǐtխ|Q5υ,out_A[u*J(^}/sƿt ;zA<#iuk⫏k,wZ֋*QJ)x9nCtmme_%/[LaM=|!ṿ+ySRiij~`t;ffrY*3O_8|5[cᮩvxSڶsͥIVE$-k v"Bgmr|M?n>)|E<;x!5O/OQH1tŧ$BfϖLft {-ncF>0Ȍ2 BhW%:ڝE7XCh67$b|2dvȧ8aV Py!%ψ47b[i "kEV;`UO~|VO ~_+?POh4Ź/潻OFwHe%<\I{->ö2t+y ^_]sU @'iqAx|6i?}.i!YNc g~@ ~BЊ-#%Qm<㷵@6ٗ 8y>Q+oUC Z|=~ KƵj௏G>9xdYxT'v7>msIJNzi|#G:l-e?L"O6R@4Gh#\`V>B}Oes,A5?}|c"Vw<N:Wοf_|3X=:Vf\Z-ΡCeppW~ g{0G?5OZ>UdE!@A9Y$Gz?j.5/<HmN|r4¶>#' sa&̷-)$;ÏFo4}uF-`d`_nݎ_ŸOYaײk"K$rI0XF,Sў ڷ/#jvSMrOyx0 n'hǯ[ ᧎to`q{ ̈ߵL7W$ \ϏiO> Եo ZZO$L XDo \G+|#[ D5? Zk> O-mz®b9M||6>$>#~j>=VQu]N4rҐ@I3  vhOӟm(Aեo- ztRCnT) ,y+AɜoSВP((KƾGč O |A,|IJP[d0Ñ[Ey2[M<3ܶ"IwJG-+GE~֙"YW,ᳶi$]P!f $d]}3^s7sMK!@-p^5>|G k#hk:y6 (h# G= i:v0][ITdIU*q]PEPEP^0o:h7z5ec=>[aZ’KS3!T ˏOuh<S2:Xk.Rp1؝#Jc?3ߏ$5h]LkOZEȿE4]aoË/ ;*VաY N)^6o2D'8b'9-5̤ڪ-n ¨ᶈ?05_ H"Xo:$\Ja9o({o[᷈x=QԼ-xPIqy,-XTF9g+o?Mlo`keej3#Dɯw<#»S2E"Ѭ 7})_<ֿl]CW՝Nh}Cqms8$* s|U9\7xA|$־[YC, ? ?f򼓷:,?C*ԭ4ynGU:4Kb W(|5&/ ikj|F/fv|ݝ/N+6mm5 ohpxjl~̙1)Kn^?''E}߈#Ol]e3YyN8pf¾7_\~[JԢ峎TU}BEXwD!f7.w.a}"X!{uud@Cdxv3.v+^Rҭ&YKKx#f dڍ9w^^<~6XT+⫋;YMRK<`K7g¶m|&xO~=#J.-7H3E 1k2KsBJ3A!F,|Ӽ!{%u'Y.uD۵&eػI]m@7E3Oo.#ݴZzlKi&6#+n, :mKG~)j"~: e}uקKJ]4Ѽ wfTM?ƟʰirkIM¨V4v %XYK(pF $~/WK5h{p$Q+a5gz-_~mmcnڌR2NSm*wgn=|ƾ5y [VOk:D1]aa-+ o)c&GQH-y+ZO{zo-5N).bZf1䬁e.{[0<uJ xlkIFN%q'',&_ %Y!4 CH4Ժml`!wScػ.FywWxK)}KIֵMmVҞ8T\!SK9}r̿ xoϞ< ^MGQӆ$$uPnJOm+i _3u֖_<7xo[ǨEv˂J$:AOomiWzl,ΝoixFv>P1W~>uf|GKXw,bI~Qp"#i c^"]h\j4RJ%[+٭͌3+,rAc/:~XkԚ2٧o$OA6< |>щ?m[[vYPKbH"|ܑ11\tQ\?ĺ_.|[Y|lLBv30'w:(<ßtLo|G^_<i?_a@&n4/F5v b`2@pN W>xZfηX~d唔KTOi54P/?e_> Ƌ./&REgѬ]N=:V ̑]J.^8C Rx^O-խ:$vNizt)l_4M}it4mι|^$J`*-j %h= ~mkYO鷺ΗW2haEf{C5HbfiR:!0#ߍ7v6G!  _ǯK[Q}S!&_;`2I\S֏_>7{=1_5),u/Fk Ω2NQSЅ (wϋ~ikk{YmKeg[{gpVE` eP^K:?s'Z]{|H[xV9j&l̓ 3Z.R"W aUI8w|Bo/4+o.45[Y?eD|4^ER|كuÍsF4P>'iQ[UdXلed_J(?٣Ὴ|=o"5Kz:GK)O;E[#Y f4E m|ޱ$RwRw69ω#f.u$"T"/~1xYWÉ}/vrXjwt7'?ø>4i+$v9V۝rkϟ x0WUo/"; NDepPk?~&@ >_jO"EZ?.6o GWĻkzf{ZgIr[1,}Eyw >5>'mqufv.dNp8݁WEQE((((((((((((((/CkĚǻ 8^q ,gw<;Y7'1G2wDoOibdg߃?ӿ߲=(?G»(|-VoR+kΙ:[#]I$moGou!5?/__6)3B|Y<=q1\Atx5\WB!]ƃ-~ )Y+yn)arm2PR?Chм6u+4ܷ+Bu>-t}2)oac8ªNZآ NfGzl%%n@34V̸+<H *AsGƋ?Eiz\\\"Tx &?2~2xKWz߄m146p- bH$M2fj vdڈ mq=,(5>w~X"FڽӿDms_X]G=ڳ+Z\A'U>Ky{e|Oм(|5wM w ˙nL̐XO~.8Ih1]%Fx>9~mV' ?m]Oe FLY4і۳p}uZ始d4Z^N[^\Aj7rhfQK'%gHba~FVIY~?i'}7\ͦ%)7gYaoݤі \+k?>x]!5y{ &auwRm 2"A෇m5yi7 ~i]6c6r=B$Rb-z_>CmG^ּx*Wsgz&}_9gDy|gKY]>}.VEk=qoR- &8M6i^82-֚UVhcEeVnjǦE ۭO*G7i+:. F~:̖>rs,BxhxtYH*[Ÿ<9⨧J\y巹}{$TVo-l[_ 5Iǃu'u׈|uf'Ou&w f倌;#'!-kIMX=oBo_BZkskJ29o-ōтWz!פ 5]fKx-3&#"^$ u%j6>.\]k}-#3G\@'iA#y\yG/߳o?k[kQxךmIk[[ E0b,)?7> Hn.?Y%mSîZ]>T4 YmmndymIsB%!$+;kJզ!$hⱼhd՚ ;'ɺ %!(p.a⏆Z =k\K/?]6ST>ף-acqe*e 1 ۣ*ox{65٭[WT71?zi^5{w }(}eW}ųvhUTF22;_Ρ%k6__}t6[-źb!>f'8rPߏ?h)YIl. 1_ …m`:"N6b{eȳdPf|Yg?_ V7Ե K+;IfVhn_>May)emqW\>HHa$Kiszno7"b֦KH :k>ȲܤQQ 7̉Dc _<ӟ ֺ֣m촭UTas."c(m3y}/~>#5͟<@w6z; CKfۢE=(Zu}/ƚbY.m$B42mfRVe'k|gߊi]VwkqF[ D]xZa.[DxmXI ƥUTp}^'ɬ3nf_~*?/?.UJ(?~_]4:W-+iuAwZWU࿂MZojW@3EΥ=ıRʌm\qPEPEPEPEPEPEPEPEPEPEPEPEP(((((((((((((((((ڧ{f?L |L|5'Y/lH]Y(w¾(((On~(|+/gUe`L+s a䘌@9)޽B⟎m4]V]K w@ฝ-$ϱL`*"~<|(ᣠ\Zc6fSțt[:-nQ8#$?AQ@xltæ=+ ̍!k}bxmn| PH;+߀?σ-֯Qq\:};MֶDacG}E~zxQ ﴴn|PO,LmІnxb6?qw Z  jOkEg6) xI?x T-}E||q}ᴱ4KO-u[褾8RY\\bFA,I6T~|<׆53M xmJh-֒nvM˕a~ |d/4Zi6S)ַ^ uM. Ae[B "ʥeT+& _/jSYXuRY,KHCP e-\#_56 񬟳 xLm>W7<B/Eo#4HLčM k---٭HQ\I:)~ࢀ>cY;كEQi:.zve=k\2Hrc(`̱m|>xu/g|U?$k^&sinf0%Π DŇVE_P|5ώ?ωiYKw5ˢ& ŝ\L3)Q 򋖌4[KR5J{GCf>,Vy3 m9LZC-Ѥ˟txn t9/txFGkˑj~忇8'ˆ p | qN^::-x_ges=Ђ\hQECLvb ~+' }f{zj2x\c G"is"]M(CQZREQEQEW~k?L~ ؙ7A@EPEPEPEPEPEPEPEPEPEPEPEPEPEPEP(((((((((((((((((z}=~a}~e_@W:!:cf[߇QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEW_lC|,)"xCee8 .#|-k/+#@;FæA' w|?ݷإ<5!sqWg{u%șV7HP)oRSٴ/?uKmS]տ>A{;ilU]vi|I 0r4^|EI'ҭ4&+%կ{e}1;"BRL0K0k]~/]oUt9/?![Kڬf+!iܣ0HGmKeWn>ynOuM.lq5',Z"G 7x?n? Maj#'Ylⴎ6PLSʧ sd|&"Gp7Z}ϥ#wtd0)$0 S~όOy ot vi[qKdT@>10x7F5.wŬpitIfWass hWO6<~=xD-57Va&]KvӓAzq6wa~o;Mo(LJZ38ޯܺDzo5dHuج>ߦq]S~gB1*H '4 Zcxi;&$dl;#]w.8j?NMcZ} AеXugJ4K}cE2X}ˣ|)?3Gs~OݏpP~}a[FYl^]FK;km\0K+fgyX @>nJhZgdS#F)A4.#ʺ gy/|-Mkkۘ%DnRH%F[k2,V@jzfV1<\HE QH*Ib@@zvaڮqI<)2:22AGVGo<4,۹X8xJU(B.9)0è݃((((((((((((((((((((((((C;Rt\~5|~Ny4{}Qճ(((((((((((((((((g3!hj~ ؙ7A@ƫaGo x| Ck֝g\%re^Nn]^&w7/No /K[ϰ$&+ek4;Hd!I^DL+2=1p3`H?~?kw~ Đkz]SHI7?.Q+oU"04U 00= [C4<5ZhjR.7XIl3GtzJ&_QQ+ʵռcCTXaԗL{M#qnz a<ݞg@~(G4xcþ->%GjA D_)a:AR%ʉfRgf7Gmx4,/\GƗ8sβ6Sr$m$CDq’<{lp2{gJyk"V@>t?hozB㫿x*_w6Z;]kB6OlXۜn#|s4)Gi>M{j/in`pXژm꼰0Q2,X0gmR =G@϶ dm̺W7KKn.KEuFhe(ݷs_x]nuOO y][.!Z%A[bm9H'28a 71}O|FjqxKA{j'/twZMAn!]AK)'KfB/4KOxoǨɎQ=j7H *K"h):0V{JȎT(r1ƽK 4:t:3Y[æ^jw%DQ۷|q5쟮x]ouOviz޺٭xo-meM"## xAԿg>j|+%JE#f*B@>((((((((((((((((((((((((((((((((篍m׋~|MoFo^ Pf[<-q<|FN]m;=n[g#XXd{0:|g'|/pF cb`?~aUA`(((((((((((((cg{?x?~u-b#v4VɧHXe쑔O3 ;P^ ,gw;Y7'1@Ex2wDoOibdg߃?ӿu|} ᖹUּgnj-Մ.b# kbO)?ǚd[{͟ޟItmfl+hhz%ljux]3]It]ApA3 uC q@(((((((w2CGk>(|4r5Ox_Ǐw4W_~HԾ*RԎ&?go-n?O$h +j/jyV?_M\+X[Ҽa/x?1bioo VPsN ӏ8ѼI|H4;mN|%v]WI.!rV F yY0*'*`>L-|X B"j]!0K[w1Jξf>?xqx+\.ks65XymܣBa\?J7 8o$:F1ZR F>ln#D]$@+w^[k:TEյq]^__!ssFY\g8?aZ]=Ň].4،/}gnm":zH-1Ļ;cO>É9=ϊMcZNX[5݄6hW29x@~,4~g#&g|vwcu i/ZݥucxO1hz֙0ob[,T%9O^MkcBk:kZy\G$,.9yOG[/3S|;kIӖSiu3EHK@9N4srmxF_ =7/Ek+g-[Sy1jZ]ǖ hgX}A?l-ƺ凇ӥmE֡tW,n1 KwueFV  3\_|e*k M~7nG>:q|]HY+~z| &'Q׭tѤO˫mn j6]s^#O=b=]6RϣLZ[c##BgG? ܨq+ڝ8c1|~unீ>'M4qwZ:}n!)]2HLWOZHs}wTxF| xwY1@5=b;9Qt+gbv,J__ڝ' ENd _oV_Zoxú|T5[kZ+[ۋdq,*!i$ܤ 2\ns~6-yR+-L)$,DvQv|[=k[ \<#x]V¯^xM)'iZ|c^) )b,K|]#&> 5[[VF5%/=d-vc*}k?}|W<9u=._>VXk>LfpC6׺ <cFBhդ/&@Ğ[O7~xP GGӢ}R;GmB[:-db(]v+p’l;D]*?4#zAum0^#x/4_-u$Xb.+Yh#۞o k^, 3"u->So&_*= `dgi'_xWrFFd >t},Ah#@:(((7?<׿׀xsN ӏ(OƟ<1&k]k>:4/nk Ysa}5gD?mKwޱqc[\ivέ5;DD6z;W>1xm<' OmGO DWM ճoh)c6Y^W=x|yZ֯ iEKoe)LsuqzoF6=m SE4ڭ.8s%nWYrYTP%tMOTR|?y-BKiA4>t6! x&ķLu?[; 7-B!6vz h-?Þ3-&y`Ҍm"Q paW J]czvS:6km隅ݞ~RC Bre_,በ_ػA umJ{a-m̴k)kۈ` TdcM?RDM㽊KɄ7?lYF39f.ѼM|GW|k[o\xᮥv-].rY /A_tuӵ vYr# FQ kfjcsnM|1_|2օsj>$^]%O=قm+H` ,` 1f%i_ÖچcNl^X <>k& 9pF~jLG-^Ѽi4Akzݕ2R.Tdj0eq֋OΝ}ƾ&\x_R|-6l-7MkiЉk wlx yB|9gUInw%nSJЧӴ}!9򰋙e)&/Mg^0#--'lo-t # c M4VW˅+Me񦂺΍{Nf 2YёW|>|Og#QԣْK>Mhˆyk e,KBo>dҿe XioOj~ t_M<VesͱXh_%O>|?~.ּau-֮֟-$@tb rryo ( ( (<^߆>#G{ޔ%k6 Yv[ZM5ŝŜҕ 33"HYdFlO3<'_!>3 \<+(?r|E? /O3¿<'_!>3 ~2ha>->Q; lxnYe󵨤UFE9z;MgtD'__Q /g}^Ex.OB|g>" xWPD'__Vm3ViSoucsdugo ZuF14ň@\ӔPEPEPEPEPEPEPEPEPEPEPEPEP((((((((((((((((((O5kht+o[ƗEʾjd%9a&KHuX+yṍgeTĐʡu 9V(AwHQWY?$vNX>ċsPV[ --ޗ}'&Fsf1+312-zO(Qtk-YD7]pi\GI$^0#K xWPk׿Ѭ[Rmђ77+NG&#l|= x=xGH @P:*tb}'ÝY.]+Os%e~C4ePlhHЌGeV PxG¯jv8Kqaa.P^$V*3 Ryo_ _3|B M⋔&l7Y昮9+>^IGc kX4WK{+kBKlȌǒkߓ`D,Umofԡ # IKA'sg=i>m6YXƐEQp*QEQEQEQEQEQEQEQEW~k?L~ ؙ7A@EPEPEPEPEPEPEPEPEPEPEPEPEPEPEP((((((((((((((((((((((((((((((((((~ ؙ7A^^'ɬ3n=(((((((((((((((((((((((((((((((((((((((((((((((((~ ؙ7A^^'ɬ3n=(((((((((((((((((((((((((((((((((((((((((((((((((~ ؙ7AZ_|9᫋7vͽ0fhBoihArnQ^7.MѴM#IE aҬGWnXBM}EPEPEPEPEPEPEPEPEPEPEPEPEPEPEP(((((((((((((((((((((((((((((((((/w׈>'VFݤw0G y@Hd~_L'h:g,YwᏃ~/K[~=I.n#ICFUKBi~ W,J]eYX`ֲ= Z<1apDE( /ߴ_g5=SM6ZA-5H4iX 1|*esWO|Lgᗊ~skBouF*X.+H@?F]5/#Qԓ(GIĢ(((((((((((((((((((((((((((((((((((((((((((((((of¿ gYY.,""|ƈ&Ow dataSize { return errors.New("Wrong length of data for Ed25519 Key Import") } public := data.NewED25519PublicKey(raw[:ed25519.PublicKeySize]) private, err := data.NewED25519PrivateKey(*public, raw[ed25519.PublicKeySize:]) e.keys[private.ID()] = edCryptoKey{ role: "root", privKey: private, } return nil } notary-0.1/tuf/signed/errors.go000066400000000000000000000036521262207326400166120ustar00rootroot00000000000000package signed import ( "fmt" "strings" ) // ErrInsufficientSignatures - do not have enough signatures on a piece of // metadata type ErrInsufficientSignatures struct { Name string } func (e ErrInsufficientSignatures) Error() string { return fmt.Sprintf("tuf: insufficient signatures: %s", e.Name) } // ErrExpired indicates a piece of metadata has expired type ErrExpired struct { Role string Expired string } func (e ErrExpired) Error() string { return fmt.Sprintf("%s expired at %v", e.Role, e.Expired) } // ErrLowVersion indicates the piece of metadata has a version number lower than // a version number we're already seen for this role type ErrLowVersion struct { Actual int Current int } func (e ErrLowVersion) Error() string { return fmt.Sprintf("version %d is lower than current version %d", e.Actual, e.Current) } // ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold type ErrRoleThreshold struct{} func (e ErrRoleThreshold) Error() string { return "valid signatures did not meet threshold" } // ErrInvalidKeyType indicates the types for the key and signature it's associated with are // mismatched. Probably a sign of malicious behaviour type ErrInvalidKeyType struct{} func (e ErrInvalidKeyType) Error() string { return "key type is not valid for signature" } // ErrInvalidKeyLength indicates that while we may support the cipher, the provided // key length is not specifically supported, i.e. we support RSA, but not 1024 bit keys type ErrInvalidKeyLength struct { msg string } func (e ErrInvalidKeyLength) Error() string { return fmt.Sprintf("key length is not supported: %s", e.msg) } // ErrNoKeys indicates no signing keys were found when trying to sign type ErrNoKeys struct { keyIDs []string } func (e ErrNoKeys) Error() string { return fmt.Sprintf("could not find necessary signing keys, at least one of these keys must be available: %s", strings.Join(e.keyIDs, ", ")) } notary-0.1/tuf/signed/interface.go000066400000000000000000000037241262207326400172360ustar00rootroot00000000000000package signed import ( "github.com/docker/notary/tuf/data" "io" ) // SigningService defines the necessary functions to determine // if a user is able to sign with a key, and to perform signing. type SigningService interface { // Sign takes a slice of keyIDs and a piece of data to sign // and returns a slice of signatures and an error Sign(keyIDs []string, data []byte) ([]data.Signature, error) } // KeyService provides management of keys locally. It will never // accept or provide private keys. Communication between the KeyService // and a SigningService happen behind the Create function. type KeyService interface { // Create issues a new key pair and is responsible for loading // the private key into the appropriate signing service. // The role isn't currently used for anything, but it's here to support // future features Create(role, algorithm string) (data.PublicKey, error) // GetKey retrieves the public key if present, otherwise it returns nil GetKey(keyID string) data.PublicKey // GetPrivateKey retrieves the private key and role if present, otherwise // it returns nil GetPrivateKey(keyID string) (data.PrivateKey, string, error) // RemoveKey deletes the specified key RemoveKey(keyID string) error // ListKeys returns a list of key IDs for the role ListKeys(role string) []string // ListAllKeys returns a map of all available signing key IDs to role ListAllKeys() map[string]string // ImportRootKey imports a root key to the highest priority keystore associated with // the cryptoservice ImportRootKey(source io.Reader) error } // CryptoService defines a unified Signing and Key Service as this // will be most useful for most applications. type CryptoService interface { SigningService KeyService } // Verifier defines an interface for verfying signatures. An implementer // of this interface should verify signatures for one and only one // signing scheme. type Verifier interface { Verify(key data.PublicKey, sig []byte, msg []byte) error } notary-0.1/tuf/signed/sign.go000066400000000000000000000050311262207326400162270ustar00rootroot00000000000000package signed // The Sign function is a choke point for all code paths that do signing. // We use this fact to do key ID translation. There are 2 types of key ID: // - Scoped: the key ID based purely on the data that appears in the TUF // files. This may be wrapped by a certificate that scopes the // key to be used in a specific context. // - Canonical: the key ID based purely on the public key bytes. This is // used by keystores to easily identify keys that may be reused // in many scoped locations. // Currently these types only differ in the context of Root Keys in Notary // for which the root key is wrapped using an x509 certificate. import ( "crypto/rand" "fmt" "github.com/Sirupsen/logrus" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" ) // Sign takes a data.Signed and a key, calculated and adds the signature // to the data.Signed func Sign(service CryptoService, s *data.Signed, keys ...data.PublicKey) error { logrus.Debugf("sign called with %d keys", len(keys)) signatures := make([]data.Signature, 0, len(s.Signatures)+1) signingKeyIDs := make(map[string]struct{}) ids := make([]string, 0, len(keys)) privKeys := make(map[string]data.PrivateKey) // Get all the private key objects related to the public keys for _, key := range keys { canonicalID, err := utils.CanonicalKeyID(key) ids = append(ids, canonicalID) if err != nil { continue } k, _, err := service.GetPrivateKey(canonicalID) if err != nil { continue } privKeys[key.ID()] = k } // Check to ensure we have at least one signing key if len(privKeys) == 0 { return ErrNoKeys{keyIDs: ids} } // Do signing and generate list of signatures for keyID, pk := range privKeys { sig, err := pk.Sign(rand.Reader, s.Signed, nil) if err != nil { logrus.Debugf("Failed to sign with key: %s. Reason: %v", keyID, err) continue } signingKeyIDs[keyID] = struct{}{} signatures = append(signatures, data.Signature{ KeyID: keyID, Method: pk.SignatureAlgorithm(), Signature: sig[:], }) } // Check we produced at least on signature if len(signatures) < 1 { return ErrInsufficientSignatures{ Name: fmt.Sprintf( "cryptoservice failed to produce any signatures for keys with IDs: %v", ids), } } for _, sig := range s.Signatures { if _, ok := signingKeyIDs[sig.KeyID]; ok { // key is in the set of key IDs for which a signature has been created continue } signatures = append(signatures, sig) } s.Signatures = signatures return nil } notary-0.1/tuf/signed/sign_test.go000066400000000000000000000172621262207326400172770ustar00rootroot00000000000000package signed import ( "crypto/rand" "crypto/x509" "encoding/pem" "io" "testing" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) const ( testKeyPEM1 = "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAnKuXZeefa2LmgxaL5NsM\nzKOHNe+x/nL6ik+lDBCTV6OdcwAhHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5\nVSCuRJ53UronENl6lsa5mFKP8StYLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDf\nBEPIRp28ev/NViwGOEkBu2UAbwCIdnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK\n6pdzJXlhr9yap3UpgQ/iO9JtoEYB2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq\n3xmN4p+R4VGzfdQN+8Kl/IPjqWB535twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrM\nBI8ztvPiogz+MvXb8WvarZ6TMTh8ifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X\n7sXoaqszEtXdq5ef5zKVxkiyIQZcbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj\n1ANMFPxDQpHudCLxwCzjCb+sVa20HBRPTnzo8LSZkI6jAgMBAAE=\n-----END PUBLIC KEY-----" testKeyID1 = "51324b59d4888faa91219ebbe5a3876bb4efb21f0602ddf363cd4c3996ded3d4" ) type FailingCryptoService struct { testKey data.PrivateKey } func (mts *FailingCryptoService) Sign(keyIDs []string, _ []byte) ([]data.Signature, error) { sigs := make([]data.Signature, 0, len(keyIDs)) return sigs, nil } func (mts *FailingCryptoService) Create(_, _ string) (data.PublicKey, error) { return mts.testKey, nil } func (mts *FailingCryptoService) ListKeys(role string) []string { return []string{mts.testKey.ID()} } func (mts *FailingCryptoService) ListAllKeys() map[string]string { return map[string]string{ mts.testKey.ID(): "root", mts.testKey.ID(): "targets", mts.testKey.ID(): "snapshot", mts.testKey.ID(): "timestamp", } } func (mts *FailingCryptoService) GetKey(keyID string) data.PublicKey { if keyID == "testID" { return data.PublicKeyFromPrivate(mts.testKey) } return nil } func (mts *FailingCryptoService) GetPrivateKey(keyID string) (data.PrivateKey, string, error) { if mts.testKey != nil { return mts.testKey, "testRole", nil } return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID} } func (mts *FailingCryptoService) RemoveKey(keyID string) error { return nil } func (mts *FailingCryptoService) ImportRootKey(r io.Reader) error { return nil } type MockCryptoService struct { testKey data.PrivateKey } func (mts *MockCryptoService) Sign(keyIDs []string, _ []byte) ([]data.Signature, error) { sigs := make([]data.Signature, 0, len(keyIDs)) for _, keyID := range keyIDs { sigs = append(sigs, data.Signature{KeyID: keyID}) } return sigs, nil } func (mts *MockCryptoService) Create(_ string, _ string) (data.PublicKey, error) { return mts.testKey, nil } func (mts *MockCryptoService) GetKey(keyID string) data.PublicKey { if keyID == "testID" { return data.PublicKeyFromPrivate(mts.testKey) } return nil } func (mts *MockCryptoService) ListKeys(role string) []string { return []string{mts.testKey.ID()} } func (mts *MockCryptoService) ListAllKeys() map[string]string { return map[string]string{ mts.testKey.ID(): "root", mts.testKey.ID(): "targets", mts.testKey.ID(): "snapshot", mts.testKey.ID(): "timestamp", } } func (mts *MockCryptoService) GetPrivateKey(keyID string) (data.PrivateKey, string, error) { return mts.testKey, "testRole", nil } func (mts *MockCryptoService) RemoveKey(keyID string) error { return nil } func (mts *MockCryptoService) ImportRootKey(r io.Reader) error { return nil } var _ CryptoService = &MockCryptoService{} type StrictMockCryptoService struct { MockCryptoService } func (mts *StrictMockCryptoService) Sign(keyIDs []string, _ []byte) ([]data.Signature, error) { sigs := make([]data.Signature, 0, len(keyIDs)) for _, keyID := range keyIDs { if keyID == mts.testKey.ID() { sigs = append(sigs, data.Signature{KeyID: keyID}) } } return sigs, nil } func (mts *StrictMockCryptoService) GetKey(keyID string) data.PublicKey { if keyID == mts.testKey.ID() { return data.PublicKeyFromPrivate(mts.testKey) } return nil } func (mts *StrictMockCryptoService) ListKeys(role string) []string { return []string{mts.testKey.ID()} } func (mts *StrictMockCryptoService) ListAllKeys() map[string]string { return map[string]string{ mts.testKey.ID(): "root", mts.testKey.ID(): "targets", mts.testKey.ID(): "snapshot", mts.testKey.ID(): "timestamp", } } func (mts *StrictMockCryptoService) ImportRootKey(r io.Reader) error { return nil } // Test signing and ensure the expected signature is added func TestBasicSign(t *testing.T) { cs := NewEd25519() key, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) testData := data.Signed{} err = Sign(cs, &testData, key) assert.NoError(t, err) if len(testData.Signatures) != 1 { t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures)) } if testData.Signatures[0].KeyID != key.ID() { t.Fatalf("Wrong signature ID returned: %s", testData.Signatures[0].KeyID) } } // Signing with the same key multiple times should not produce multiple sigs // with the same key ID func TestReSign(t *testing.T) { cs := NewEd25519() key, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) testData := data.Signed{} Sign(cs, &testData, key) Sign(cs, &testData, key) if len(testData.Signatures) != 1 { t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures)) } if testData.Signatures[0].KeyID != key.ID() { t.Fatalf("Wrong signature ID returned: %s", testData.Signatures[0].KeyID) } } // Should not remove signatures for valid keys that were not resigned with func TestMultiSign(t *testing.T) { cs := NewEd25519() testData := data.Signed{} key1, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) Sign(cs, &testData, key1) // reinitializing cs means it won't know about key1. We want // to attempt to sign passing both key1 and key2, while expecting // that the signature for key1 is left intact and the signature // for key2 is added cs = NewEd25519() key2, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) Sign( cs, &testData, key1, key2, ) if len(testData.Signatures) != 2 { t.Fatalf("Incorrect number of signatures: %d", len(testData.Signatures)) } keyIDs := map[string]struct{}{key1.ID(): {}, key2.ID(): {}} count := 0 for _, sig := range testData.Signatures { count++ if _, ok := keyIDs[sig.KeyID]; !ok { t.Fatalf("Got a signature we didn't expect: %s", sig.KeyID) } } assert.Equal(t, 2, count) } func TestSignReturnsNoSigs(t *testing.T) { failingCryptoService := &FailingCryptoService{} testData := data.Signed{} testKey, _ := pem.Decode([]byte(testKeyPEM1)) key := data.NewPublicKey(data.RSAKey, testKey.Bytes) err := Sign(failingCryptoService, &testData, key) if err == nil { t.Fatalf("Expected failure due to no signature being returned by the crypto service") } if len(testData.Signatures) != 0 { t.Fatalf("Incorrect number of signatures, expected 0: %d", len(testData.Signatures)) } } func TestSignWithX509(t *testing.T) { // generate a key becase we need a cert privKey, err := trustmanager.GenerateRSAKey(rand.Reader, 1024) assert.NoError(t, err) // make a RSA x509 key template, err := trustmanager.NewCertificate("test") assert.NoError(t, err) signer := privKey.CryptoSigner() derBytes, err := x509.CreateCertificate( rand.Reader, template, template, signer.Public(), signer) assert.NoError(t, err) cert, err := x509.ParseCertificate(derBytes) assert.NoError(t, err) tufRSAx509Key := trustmanager.CertToKey(cert) assert.NoError(t, err) // test signing against a service that only recognizes a RSAKey (not // RSAx509 key) mockCryptoService := &StrictMockCryptoService{MockCryptoService{privKey}} testData := data.Signed{} err = Sign(mockCryptoService, &testData, tufRSAx509Key) assert.NoError(t, err) assert.Len(t, testData.Signatures, 1) assert.Equal(t, tufRSAx509Key.ID(), testData.Signatures[0].KeyID) } notary-0.1/tuf/signed/verifiers.go000066400000000000000000000201431262207326400172660ustar00rootroot00000000000000package signed import ( "crypto" "crypto/ecdsa" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/pem" "fmt" "math/big" "reflect" "github.com/Sirupsen/logrus" "github.com/agl/ed25519" "github.com/docker/notary/tuf/data" ) const ( minRSAKeySizeBit = 2048 // 2048 bits = 256 bytes minRSAKeySizeByte = minRSAKeySizeBit / 8 ) // Verifiers serves as a map of all verifiers available on the system and // can be injected into a verificationService. For testing and configuration // purposes, it will not be used by default. var Verifiers = map[data.SigAlgorithm]Verifier{ data.RSAPSSSignature: RSAPSSVerifier{}, data.RSAPKCS1v15Signature: RSAPKCS1v15Verifier{}, data.PyCryptoSignature: RSAPyCryptoVerifier{}, data.ECDSASignature: ECDSAVerifier{}, data.EDDSASignature: Ed25519Verifier{}, } // RegisterVerifier provides a convenience function for init() functions // to register additional verifiers or replace existing ones. func RegisterVerifier(algorithm data.SigAlgorithm, v Verifier) { curr, ok := Verifiers[algorithm] if ok { typOld := reflect.TypeOf(curr) typNew := reflect.TypeOf(v) logrus.Debugf( "replacing already loaded verifier %s:%s with %s:%s", typOld.PkgPath(), typOld.Name(), typNew.PkgPath(), typNew.Name(), ) } else { logrus.Debug("adding verifier for: ", algorithm) } Verifiers[algorithm] = v } // Ed25519Verifier used to verify Ed25519 signatures type Ed25519Verifier struct{} // Verify checks that an ed25519 signature is valid func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { if key.Algorithm() != data.ED25519Key { return ErrInvalidKeyType{} } var sigBytes [ed25519.SignatureSize]byte if len(sig) != ed25519.SignatureSize { logrus.Infof("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig)) return ErrInvalid } copy(sigBytes[:], sig) var keyBytes [ed25519.PublicKeySize]byte pub := key.Public() if len(pub) != ed25519.PublicKeySize { logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub)) return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)} } n := copy(keyBytes[:], key.Public()) if n < ed25519.PublicKeySize { logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n) return ErrInvalid } if !ed25519.Verify(&keyBytes, msg, &sigBytes) { logrus.Infof("failed ed25519 verification") return ErrInvalid } return nil } func verifyPSS(key interface{}, digest, sig []byte) error { rsaPub, ok := key.(*rsa.PublicKey) if !ok { logrus.Infof("value was not an RSA public key") return ErrInvalid } if rsaPub.N.BitLen() < minRSAKeySizeBit { logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} } if len(sig) < minRSAKeySizeByte { logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) return ErrInvalid } opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256} if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil { logrus.Infof("failed RSAPSS verification: %s", err) return ErrInvalid } return nil } func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) { algorithm := key.Algorithm() var pubKey crypto.PublicKey switch algorithm { case data.RSAx509Key: pemCert, _ := pem.Decode([]byte(key.Public())) if pemCert == nil { logrus.Infof("failed to decode PEM-encoded x509 certificate") return nil, ErrInvalid } cert, err := x509.ParseCertificate(pemCert.Bytes) if err != nil { logrus.Infof("failed to parse x509 certificate: %s\n", err) return nil, ErrInvalid } pubKey = cert.PublicKey case data.RSAKey: var err error pubKey, err = x509.ParsePKIXPublicKey(key.Public()) if err != nil { logrus.Infof("failed to parse public key: %s\n", err) return nil, ErrInvalid } default: // only accept RSA keys logrus.Infof("invalid key type for RSAPSS verifier: %s", algorithm) return nil, ErrInvalidKeyType{} } return pubKey, nil } // RSAPSSVerifier checks RSASSA-PSS signatures type RSAPSSVerifier struct{} // Verify does the actual check. func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { // will return err if keytype is not a recognized RSA type pubKey, err := getRSAPubKey(key) if err != nil { return err } digest := sha256.Sum256(msg) return verifyPSS(pubKey, digest[:], sig) } // RSAPKCS1v15Verifier checks RSA PKCS1v15 signatures type RSAPKCS1v15Verifier struct{} // Verify does the actual verification func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { // will return err if keytype is not a recognized RSA type pubKey, err := getRSAPubKey(key) if err != nil { return err } digest := sha256.Sum256(msg) rsaPub, ok := pubKey.(*rsa.PublicKey) if !ok { logrus.Infof("value was not an RSA public key") return ErrInvalid } if rsaPub.N.BitLen() < minRSAKeySizeBit { logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen()) return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)} } if len(sig) < minRSAKeySizeByte { logrus.Infof("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig)) return ErrInvalid } if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil { logrus.Errorf("Failed verification: %s", err.Error()) return ErrInvalid } return nil } // RSAPyCryptoVerifier checks RSASSA-PSS signatures type RSAPyCryptoVerifier struct{} // Verify does the actual check. // N.B. We have not been able to make this work in a way that is compatible // with PyCrypto. func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { digest := sha256.Sum256(msg) if key.Algorithm() != data.RSAKey { return ErrInvalidKeyType{} } k, _ := pem.Decode([]byte(key.Public())) if k == nil { logrus.Infof("failed to decode PEM-encoded x509 certificate") return ErrInvalid } pub, err := x509.ParsePKIXPublicKey(k.Bytes) if err != nil { logrus.Infof("failed to parse public key: %s\n", err) return ErrInvalid } return verifyPSS(pub, digest[:], sig) } // ECDSAVerifier checks ECDSA signatures, decoding the keyType appropriately type ECDSAVerifier struct{} // Verify does the actual check. func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error { algorithm := key.Algorithm() var pubKey crypto.PublicKey switch algorithm { case data.ECDSAx509Key: pemCert, _ := pem.Decode([]byte(key.Public())) if pemCert == nil { logrus.Infof("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID()) logrus.Debugf("certificate bytes: %s", string(key.Public())) return ErrInvalid } cert, err := x509.ParseCertificate(pemCert.Bytes) if err != nil { logrus.Infof("failed to parse x509 certificate: %s\n", err) return ErrInvalid } pubKey = cert.PublicKey case data.ECDSAKey: var err error pubKey, err = x509.ParsePKIXPublicKey(key.Public()) if err != nil { logrus.Infof("Failed to parse private key for keyID: %s, %s\n", key.ID(), err) return ErrInvalid } default: // only accept ECDSA keys. logrus.Infof("invalid key type for ECDSA verifier: %s", algorithm) return ErrInvalidKeyType{} } ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey) if !ok { logrus.Infof("value isn't an ECDSA public key") return ErrInvalid } sigLength := len(sig) expectedOctetLength := 2 * ((ecdsaPubKey.Params().BitSize + 7) >> 3) if sigLength != expectedOctetLength { logrus.Infof("signature had an unexpected length") return ErrInvalid } rBytes, sBytes := sig[:sigLength/2], sig[sigLength/2:] r := new(big.Int).SetBytes(rBytes) s := new(big.Int).SetBytes(sBytes) digest := sha256.Sum256(msg) if !ecdsa.Verify(ecdsaPubKey, digest[:], r, s) { logrus.Infof("failed ECDSA signature validation") return ErrInvalid } return nil } notary-0.1/tuf/signed/verifiers_test.go000066400000000000000000000711621262207326400203340ustar00rootroot00000000000000package signed import ( "bytes" "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/hex" "fmt" "testing" "text/template" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) type KeyTemplate struct { KeyType string } const baseRSAKey = `{"keytype":"{{.KeyType}}","keyval":{"public":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyyvBtTg2xzYS+MTTIBqSpI4V78tt8Yzqi7Jki/Z6NqjiDvcnbgcTqNR2t6B2W5NjGdp/hSaT2jyHM+kdmEGaPxg/zIuHbL3NIp4e0qwovWiEgACPIaELdn8O/kt5swsSKl1KMvLCH1sM86qMibNMAZ/hXOwd90TcHXCgZ91wHEAmsdjDC3dB0TT+FBgOac8RM01Y196QrZoOaDMTWh0EQfw7YbXAElhFVDFxBzDdYWbcIHSIogXQmq0CP+zaL/1WgcZZIClt2M6WCaxxF1S34wNn45gCvVZiZQ/iKWHerSr/2dGQeGo+7ezMSutRzvJ+01fInD86RS/CEtBCFZ1VyQIDAQAB","private":"MIIEpAIBAAKCAQEAyyvBtTg2xzYS+MTTIBqSpI4V78tt8Yzqi7Jki/Z6NqjiDvcnbgcTqNR2t6B2W5NjGdp/hSaT2jyHM+kdmEGaPxg/zIuHbL3NIp4e0qwovWiEgACPIaELdn8O/kt5swsSKl1KMvLCH1sM86qMibNMAZ/hXOwd90TcHXCgZ91wHEAmsdjDC3dB0TT+FBgOac8RM01Y196QrZoOaDMTWh0EQfw7YbXAElhFVDFxBzDdYWbcIHSIogXQmq0CP+zaL/1WgcZZIClt2M6WCaxxF1S34wNn45gCvVZiZQ/iKWHerSr/2dGQeGo+7ezMSutRzvJ+01fInD86RS/CEtBCFZ1VyQIDAQABAoIBAHar8FFxrE1gAGTeUpOF8fG8LIQMRwO4U6eVY7V9GpWiv6gOJTHXYFxU/aL0Ty3eQRxwy9tyVRo8EJz5pRex+e6ws1M+jLOviYqW4VocxQ8dZYd+zBvQfWmRfah7XXJ/HPUx2I05zrmR7VbGX6Bu4g5w3KnyIO61gfyQNKF2bm2Q3yblfupx3URvX0bl180R/+QN2Aslr4zxULFE6b+qJqBydrztq+AAP3WmskRxGa6irFnKxkspJqUpQN1mFselj6iQrzAcwkRPoCw0RwCCMq1/OOYvQtgxTJcO4zDVlbw54PvnxPZtcCWw7fO8oZ2Fvo2SDo75CDOATOGaT4Y9iqECgYEAzWZSpFbN9ZHmvq1lJQg//jFAyjsXRNn/nSvyLQILXltz6EHatImnXo3v+SivG91tfzBI1GfDvGUGaJpvKHoomB+qmhd8KIQhO5MBdAKZMf9fZqZofOPTD9xRXECCwdi+XqHBmL+l1OWz+O9Bh+Qobs2as/hQVgHaoXhQpE0NkTcCgYEA/Tjf6JBGl1+WxQDoGZDJrXoejzG9OFW19RjMdmPrg3t4fnbDtqTpZtCzXxPTCSeMrvplKbqAqZglWyq227ksKw4p7O6YfyhdtvC58oJmivlLr6sFaTsER7mDcYce8sQpqm+XQ8IPbnOk0Z1l6g56euTwTnew49uy25M6U1xL0P8CgYEAxEXv2Kw+OVhHV5PX4BBHHj6we88FiDyMfwM8cvfOJ0datekf9X7ImZkmZEAVPJpWBMD+B0J0jzU2b4SLjfFVkzBHVOH2Ob0xCH2MWPAWtekin7OKizUlPbW5ZV8b0+Kq30DQ/4a7D3rEhK8UPqeuX1tHZox1MAqrgbq3zJj4yvcCgYEAktYPKPm4pYCdmgFrlZ+bA0iEPf7Wvbsd91F5BtHsOOM5PQQ7e0bnvWIaEXEad/2CG9lBHlBy2WVLjDEZthILpa/h6e11ao8KwNGY0iKBuebT17rxOVMqqTjPGt8CuD2994IcEgOPFTpkAdUmyvG4XlkxbB8F6St17NPUB5DGuhsCgYA//Lfytk0FflXEeRQ16LT1YXgV7pcR2jsha4+4O5pxSFw/kTsOfJaYHg8StmROoyFnyE3sg76dCgLn0LENRCe5BvDhJnp5bMpQldG3XwcAxH8FGFNY4LtV/2ZKnJhxcONkfmzQPOmTyedOzrKQ+bNURsqLukCypP7/by6afBY4dA=="}}` const baseRSAx509Key = `{"keytype":"{{.KeyType}}","keyval":{"public":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZLekNDQXhXZ0F3SUJBZ0lRVERENFNHZS8yaUVJQjAxZFhzTHE5akFMQmdrcWhraUc5dzBCQVFzd09ERWEKTUJnR0ExVUVDaE1SWkc5amEyVnlMbU52YlM5dWIzUmhjbmt4R2pBWUJnTlZCQU1URVdSdlkydGxjaTVqYjIwdgpibTkwWVhKNU1CNFhEVEUxTURjeE56QXdNemt6TVZvWERURTNNRGN4TmpBd016a3pNVm93T0RFYU1CZ0dBMVVFCkNoTVJaRzlqYTJWeUxtTnZiUzl1YjNSaGNua3hHakFZQmdOVkJBTVRFV1J2WTJ0bGNpNWpiMjB2Ym05MFlYSjUKTUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUFzYnY1R01oS21DK0J6V3hhZTZSTQpyaHVxU082VkNpb1dhMkZmdmtzYlhtaDhNaktTeUFHQUJLSnVoVksyTHI3NWsrZktTeVVOSFpEWUYxVXFhMnljCnlDQndVVVFXYTNqVDdPaFI3T2FzRDBXYVJEL2MvODhkVlNRejVsdEV0Y3IvVGpRSHpqcjVXL2dWWXJIVkV2UXkKWUJGS3BkSHdRRGpLNnZ6Njc1WnRxMjBKUStzcXRNNFlUeis5dXg5Y25LQTFuc0JDM1YvVk1ybTRWZ3pqL0lOawppL0ZNMVh0Yjd1UFFTK0hRSElYc2R5SktsTHdsTXg2RjhTeFpYZHROUjh4bE8wdkI3Qm5KK0hKZlBWTEpYZDVmCjRld1lUZkE3WmtRNWJESzAzeDRkSzloR2VFUjlEVURja2tNM3RVZHhleTFQZXBkK1BSWWRsL0k5MG5UYXN0L2EKdmpQdUxkYjR2KzFuVnZQVzhjMHNvaGhQK1VkUUU1UHA0dlhDUmMvbVZ1S1NNbEU5bFRDYzY2TFoxcm5tQzJ4agpzKzNpcWZWeWFIcjFmbnUrZjdhTk9jSmFSTlpRN1ErWEdBbXljMXJ6MmJzTmc0S1pIVGdHQnBzNHMwQWV6MUhSCll6NW94QmVUVEZUaUNtZFBJS2lhbGhrTmJQWS9FUXQzNzBXYjIzMTVUQm12QkI0NitXbWoycG9ka2FJeGhJLzMKblRwT25mZlFub25rTmRwdTlKeFJWNFlWQTh1b0hvNWc1WjMreEF1S1A1TnlRSHdvQkwwbElKbTdBK2lHUDF5cwpuNnFVVk5ab0dENFR4ZHduYllmMFBKa3ZhL0FjLzkvaktyajVyQ2NrL1NDOVllbGNxaCt1dklENDA5YU9veVAzCk83SDF0bmQ4M1hXZm5nczRuaFNmQmdzQ0F3RUFBYU0xTURNd0RnWURWUjBQQVFIL0JBUURBZ0NnTUJNR0ExVWQKSlFRTU1Bb0dDQ3NHQVFVRkJ3TURNQXdHQTFVZEV3RUIvd1FDTUFBd0N3WUpLb1pJaHZjTkFRRUxBNElDQVFCawpnRGExQ0F2NWY4QWFaN1YvN2JoMzBtUnZEajVwNGtPWkpSclc3UDdsTTJZU2tNVWFnQzB1QTQySUU1TjI4R2VWCkp4VEh6V0FCQThwNE1ZREllTVZJdGJndHBUWVorZTByL3RPL3pnRHVxNEZ5UFZmZllRYlh1L2k5N29TbVNQYy8KRXpZQktrNHl1RHZ3ZjZtNjJPSGxNalZNcitzM3pQUHB4dFFTaFRndkJ4QWp2ekdmVFBRSEZSdm5jWFZPd2dyRAp0ampsS3RzMGx0azI4eWJ3dyt3SVVCdWg0dzNrZFVBR2RYME9sY3NIdnM3TFhoc01XcmdxMUs4ZVNJZlR6YUdGClMwcE5MNEZObUV4VDVKaFk1SnZ4cWRxclB2RFJEU2FOUXV0OHc2K2FpeXVPVFpiZDRQeTlLZHd2bUNrNk5GdHoKd3lpWUwzT2hZa201Ui9iUm93YVY1dWwrY1BETmV0cGV3WnZJQTUzUkJYZlZCejl0TXI0M2ZaaW9YRFltNTkyVQpKTE1GaGRWMm1zYk9McWFIcGRoN0JhWFFITGxEdHZpaUVLdVRqalJKWEZWTk9seTA1UHBxeFhjWnRSbHhpRjhhCkoveWJ5a1Y0aWc0K3U1aVNGK2dFZjkyaWpaRTNjNnlsYkZjSDhoRVV0bTRqSElHZ1JsWGJ3NmZvV3llb2Z5VUIKTk5COTZyVG5UdkxmdDlReGprUjdlNGgycU41MnFIOVY5L3NLSjlSVFFqU1RERXM3MDF2Z1ZVd0tpVC9VZ3hLTAp3UzJ5dnZJeTN5TFpFUGltQnF6emFSeStCZ3Q4anNrNnQvNEdIT2Y0Rzk0a3paMkIyNUJnYjV5MTl2WVdDQSswCitXdlRCeGdxb0o1Y2lCdXMxYWJiUjZORU1RbXQyeUtneTZEejNJVXgxZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K","private":"MIIJKAIBAAKCAgEAsbv5GMhKmC+BzWxae6RMrhuqSO6VCioWa2FfvksbXmh8MjKSyAGABKJuhVK2Lr75k+fKSyUNHZDYF1Uqa2ycyCBwUUQWa3jT7OhR7OasD0WaRD/c/88dVSQz5ltEtcr/TjQHzjr5W/gVYrHVEvQyYBFKpdHwQDjK6vz675Ztq20JQ+sqtM4YTz+9ux9cnKA1nsBC3V/VMrm4Vgzj/INki/FM1Xtb7uPQS+HQHIXsdyJKlLwlMx6F8SxZXdtNR8xlO0vB7BnJ+HJfPVLJXd5f4ewYTfA7ZkQ5bDK03x4dK9hGeER9DUDckkM3tUdxey1Pepd+PRYdl/I90nTast/avjPuLdb4v+1nVvPW8c0sohhP+UdQE5Pp4vXCRc/mVuKSMlE9lTCc66LZ1rnmC2xjs+3iqfVyaHr1fnu+f7aNOcJaRNZQ7Q+XGAmyc1rz2bsNg4KZHTgGBps4s0Aez1HRYz5oxBeTTFTiCmdPIKialhkNbPY/EQt370Wb2315TBmvBB46+Wmj2podkaIxhI/3nTpOnffQnonkNdpu9JxRV4YVA8uoHo5g5Z3+xAuKP5NyQHwoBL0lIJm7A+iGP1ysn6qUVNZoGD4TxdwnbYf0PJkva/Ac/9/jKrj5rCck/SC9Yelcqh+uvID409aOoyP3O7H1tnd83XWfngs4nhSfBgsCAwEAAQKCAgEAjVZB3GdKioMc4dLMkY4yPDJb0+uGMbMOaQ3iKV1owkasnO6CsvIeb5EL+pGvtrS/m9Kzl9Y6+8v3S3a6aPrSIoNJTharDYPkY3zLyWwWX36mEqgGgpadaNuFOiZSGY74P6Q4oNNdALnjp7xrCMuQU7zsc7jjKO8AzqWml2g0hiILQCt+ppFN25eAtZFXAGaWvUt+4LQYwmHWKPfPRTrndjHJO+sBTJN1TSKhcE0/oe1vCaAkpOYc9ZCi8HQ4nGP6DJFOAQbxCdVJz2ZKI493CB3Lpg7n7YdLcrNQCi3UXM18HJ+6IhP2U4mIf2v03lNF5OMbzFAN8Ir+hqHOWHiTZWESvILzzcc4UPKNaxkO+YSLbKOoQNQR/1OblBwsqM3sHwmUalxjyn0U2yCOiw51Q/jIls7kGUdW48YLXdiQ0o+HlB98Ly78Mr3JNx3dky0sBBZG+U9MqroKb6+tbGCz0Y11prEzLIHWlDGHkixWfNYEqvpetKxQ8fYo06HHsoq7PeYa7bbxQZL+HDEml0528SfcNYmdzv/+NhgQxHmsJ4kX4Umeo28ENAURMIPSrsOSxbOOYhFGBptRzR9UZmkt1CzTs0aoHkwjo61FZadYxUbqZnfoAvkaqs5crLmQz0MTEglZK7wohfym91xiTkcx/7WnOZlbfMsLWxM7HDEU2WECggEBAMKww5agT3Yl1cxQHg1pGnMDX9SFm4Q0U4keSEY6SjkLBfTuOusZ5+OivIofrRYudCz6x6Epm6ID26s2exfILoQ/aOGNxsGuZQc4pXYCTZ9rZUG/9PIpAS9JUwZ3MHfFVomzST3EcVzq6qYkb9l6X+QD5sOlHnrTlga2tvTgA7HVKoeVmtnMeKuFNNRUKf7PF3xdrEtusU0xsAndnDKcSY8TU793h8O51aLJpvdf+etRnRRMWI2i0YsBdFjFNi96HMDjeP6TqA+Ev6KzmwbcLHoHcKp2bt2lz7J5CcArXR05PTGnaiwK7KWpCZTz1GcqHMg7EpiPorh03ZgZh7+lqm8CggEBAOm0Qsn2O46qpIb3o/25kziUXpYJLz4V3EL9vvpuTyPV0kia8Mtn05+fq6MphEDeQNgCeHI24UPUrbH7bwljjW6CHRhsOzbiThXZctkIfdlsAAXPKIRlDqmqNGsawqQNVdnUK4kaQgAQoy7EYevAGvPG+E0USJxJHAuKOGy4ir8j8Pap/Nc/u6pWgTxuwBDcwoA8/xWVbB48e+ucEh5LFZociRPLS8P+WH9geFJCHNX1uELM97JE6G1KfFwDGulPhojnL7Dyz2CiFZC+zl/bRHyG/qjxHkabukayVHIbtgpNmANHqjlK31V7MYgnekLmly7bjhPpzNAbfn8nvEMq3CUCggEAaRjm3H75pjvSaBKvxmmAX6nop17gjsN4fMKeHVsGCjkLJCceIx++8EE/KgjjdN/q0wUlkrhVTWZrxMcKN9JWWgmo4mmYa6Fq5DUODOA9atucs5ud7MN54j7g1NKulVkv1/GyjednktM1jC6LOok3Dm2UuvR9uaxShplHtnTfSbZa2QpHp18bnOuxkxVD/kto0Df49Fdy2ssBzrGUyjVX+CZkxS0PWvcMfm4A9fUXgpJyCy0TeJH2L+W/GtSK5aIzt2SUQkkPJiFxGbF+9HsSf2VYyoxYWMpTjnKMcvJ1t3rYr99CDzhuexb/Fytw86fmFajd5wFSw+RCYwMVJr2VfQKCAQAU+aLM8Zai1Vny6yMC0LcP6vEaUjS1Q80DDjcnzuK3eqdm8NEP0H/D4dbLzBwcnlX/jSk2RwqsxdfZE5IBq7ez5WWrHXurD2CmwV93bzWsX+8YlmEykMdiHu6Zdktl4fSEmnBV2890pgmfVuza9eD1ZDRA5sMlk8I6nus1htKdGSK1YMhaoVO8lAsBW4dNfCLQ06ipTUHo7NDKcrWFloOX01vSNPrV2mwi8ouaBmkEIwuoozDQBTM/K+JBd93gdszCWM2E+iX2rFV3KkjnfYyGCK+uhgWLnMp5MeQ2YZpTDmfIU5RJlBi7WVU2vSRSANQs1nPIAcHqI62UyAIznRMpAoIBABka5m4uC6HOeDNuZNYKF8HnTTGxyKUqiDLe6mCWTw4+aQjT3YyZeKDldBl9ICfw/5Igljc5+uFG8I1etEGYJzuvLbd7tj/pJLveUB6UonkrIo1yBWWINdOgU/Iwxn2K662wiUzODy/RLXUzZ7ZppsGf32YgPGLUEpLvd6gsa2ZIcRIebzX8FK2h/gwVq11IijVFlodWqn5ttrmmYI4YVotQf8I15Xi8NvziLVvKWWWaf15GjO/ZW0OzjucQhg/2Jk8brXayuzYxTBT8LN6lxb4CdHcxFPDF6s7ongzOz6TbKYW4XzcQAKHWQSeErKjwXLooWUoqS3o2Y4Rp/lV4Alo="}}` const baseECDSAKey = ` {"keytype":"{{.KeyType}}","keyval":{"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw==","private":"MHcCAQEEIDqtcdzU7H3AbIPSQaxHl9+xYECt7NpK7B1+6ep5cv9CoAoGCCqGSM49AwEHoUQDQgAEgl3rzMPMEKhS1k/AX16MM4PdidpjJr+z4pj0Td+30QnpbOIARgpyR1PiFztU8BZlqG3cUazvFclr2q/xHvfrqw=="}}` const baseECDSAx509Key = `{"keytype":"ecdsa-x509","keyval":{"public":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJwRENDQVVtZ0F3SUJBZ0lRQlBWc1NoUmRMdG45WEtRZ29JaDIvREFLQmdncWhrak9QUVFEQWpBNE1Sb3cKR0FZRFZRUUtFeEZrYjJOclpYSXVZMjl0TDI1dmRHRnllVEVhTUJnR0ExVUVBeE1SWkc5amEyVnlMbU52YlM5dQpiM1JoY25rd0hoY05NVFV3TnpFek1EVXdORFF4V2hjTk1UY3dOekV5TURVd05EUXhXakE0TVJvd0dBWURWUVFLCkV4RmtiMk5yWlhJdVkyOXRMMjV2ZEdGeWVURWFNQmdHQTFVRUF4TVJaRzlqYTJWeUxtTnZiUzl1YjNSaGNua3cKV1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVI3SjNSOGpWODV5Rnp0dGFTV3FMRDFHa042UHlhWAowUUdmOHh2Rzd6MUYwUG5DQUdSWk9QQ01aWWpZSGVkdzNXY0FmQWVVcDY5OVExSjNEYW9kbzNBcm96VXdNekFPCkJnTlZIUThCQWY4RUJBTUNBS0F3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdNd0RBWURWUjBUQVFIL0JBSXcKQURBS0JnZ3Foa2pPUFFRREFnTkpBREJHQWlFQWppVkJjaTBDRTBaazgwZ2ZqbytYdE9xM3NURGJkSWJRRTZBTQpoL29mN1RFQ0lRRGxlbXB5MDRhY0RKODNnVHBvaFNtcFJYdjdJbnRLc0lRTU1oLy9VZzliU2c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==","private":null}}` func TestRSAPSSVerifier(t *testing.T) { // Unmarshal our private RSA Key var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAKey}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using RSAPSS message := []byte("test data for signing") hash := crypto.SHA256 hashed := sha256.Sum256(message) signedData, err := rsaPSSSign(testRSAKey, hash, hashed[:]) assert.NoError(t, err) // Create and call Verify on the verifier rsaVerifier := RSAPSSVerifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using RSA PSS") } func TestRSAPSSx509Verifier(t *testing.T) { // Unmarshal our public RSA Key var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAx509Key) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAx509Key}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed message signedData, _ := hex.DecodeString("3de02fa54cdba45c67860f058b7cff1ba264610dc3c5b466b7df027bc52068bdf2956fe438dba08b0b71daa0780a3037bf8f50a09d91ca81fa872bbdbbbff6ef17e04df8741ad5c2f2c3ea5de97d6ffaf4999c83fdfba4b6cb2443da11c7b7eea84123c2fdaf3319fa6342cbbdbd1aa25d1ac20aeee687e48cbf191cc8f68049230261469eeada33dec0af74287766bd984dd01820a7edfb8b0d030e2fcf00886c578b07eb905a2eebc81fd982a578e717c7ac773cab345950c71e1eaf81b70401e5bf3c67cdcb9068bf4b50ff0456b530b3cec5586827eb39b123f9d666a65f4b418a355438ed1753da8a27577ab9cd791d7b840c7e34ecc1290c46d98aa0dd73c0427f6ef8f63e36af42e9657520b8f56c9231ba7e0172dfc3456c63c54e9eae95d06bafe571e91afa1e42d4010e60dd5c441df112cc8474253eee7f1d6c5350039ffcd1f8b0bb013e4403c16fc5b40d6bd56b742ea1ed82c87880147db194b33b022077cc2e8d31ef3eada3e46683ad437ad8ef7ecbe03c29d7a53a9771e42cc4f9d782813c491186fde2cd1dfa408c4e21dd4c3ca1664e901772ffe1713e37b07c9287572114865a05e17cbe29d8622c6b033dcb43c9721d0943c58098607cc28bd58b3caf3dfc1f66d01ebfaf1aa5c2c5945c23af83fe114e587fa7bcbaea6bdccff3c0ad03ce3328f67af30168e225e5827ad9e94b4702de984e6dd775") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPSSVerifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using RSAPSS and an X509 encoded Key") } func TestRSAPSSVerifierWithInvalidKeyType(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "rsa-invalid"}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed data with invalidRsaKeyJSON signedData, _ := hex.DecodeString("2741a57a5ef89f841b4e0a6afbcd7940bc982cd919fbd11dfc21b5ccfe13855b9c401e3df22da5480cef2fa585d0f6dfc6c35592ed92a2a18001362c3a17f74da3906684f9d81c5846bf6a09e2ede6c009ae164f504e6184e666adb14eadf5f6e12e07ff9af9ad49bf1ea9bcfa3bebb2e33be7d4c0fabfe39534f98f1e3c4bff44f637cff3dae8288aea54d86476a3f1320adc39008eae24b991c1de20744a7967d2e685ac0bcc0bc725947f01c9192ffd3e9300eba4b7faa826e84478493fdf97c705dd331dd46072050d6c5e317c2d63df21694dbaf909ebf46ce0ff04f3979fe13723ae1a823c65f27e56efa19e88f9e7b8ee56eac34353b944067deded3a") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPSSVerifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "invalid key type for RSAPSS verifier: rsa-invalid") } func TestRSAPSSVerifierWithInvalidKeyLength(t *testing.T) { key, err := rsa.GenerateKey(rand.Reader, 512) assert.NoError(t, err) err = verifyPSS(key.Public(), nil, nil) assert.Error(t, err) assert.IsType(t, ErrInvalidKeyLength{}, err) } func TestRSAPSSVerifierWithInvalidKey(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "ecdsa"}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed data with invalidRsaKeyJSON signedData, _ := hex.DecodeString("2741a57a5ef89f841b4e0a6afbcd7940bc982cd919fbd11dfc21b5ccfe13855b9c401e3df22da5480cef2fa585d0f6dfc6c35592ed92a2a18001362c3a17f74da3906684f9d81c5846bf6a09e2ede6c009ae164f504e6184e666adb14eadf5f6e12e07ff9af9ad49bf1ea9bcfa3bebb2e33be7d4c0fabfe39534f98f1e3c4bff44f637cff3dae8288aea54d86476a3f1320adc39008eae24b991c1de20744a7967d2e685ac0bcc0bc725947f01c9192ffd3e9300eba4b7faa826e84478493fdf97c705dd331dd46072050d6c5e317c2d63df21694dbaf909ebf46ce0ff04f3979fe13723ae1a823c65f27e56efa19e88f9e7b8ee56eac34353b944067deded3a") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPSSVerifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "invalid key type for RSAPSS verifier: ecdsa") } func TestRSAPSSVerifierWithInvalidSignature(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAKey}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using RSAPSS message := []byte("test data for signing") hash := crypto.SHA256 hashed := sha256.Sum256(message) signedData, err := rsaPSSSign(testRSAKey, hash, hashed[:]) assert.NoError(t, err) // Modify the signature signedData[0]++ // Create and call Verify on the verifier rsaVerifier := RSAPSSVerifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "signature verification failed") } func TestRSAPKCS1v15Verifier(t *testing.T) { // Unmarshal our private RSA Key var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAKey}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using RSAPKCS1v15 message := []byte("test data for signing") hash := crypto.SHA256 hashed := sha256.Sum256(message) signedData, err := rsaPKCS1v15Sign(testRSAKey, hash, hashed[:]) assert.NoError(t, err) // Create and call Verify on the verifier rsaVerifier := RSAPKCS1v15Verifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using RSAPKCS1v15") } func TestRSAPKCS1v15x509Verifier(t *testing.T) { // Unmarshal our public RSA Key var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAx509Key) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAx509Key}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed message signedData, _ := hex.DecodeString("a19602f609646d57f3d0db930bbe491a997baf33f13191916713734ae778ddb4898ece2078741bb0c24d726514c6b4538c3665c374b0b8ec9ff234b45459633268224c9962756ad3684aca5f13a286657375e798ddcb857ed2707c900f097666b958df56b43b790357430c2e7a5c379ba9972c8b008363c144aac5c7e0fbfad83cf6855cf73baf8e3ad774e910ba6ac8dc4cce58fe19cffb7b0a1feaa73d23ebd2d59de2d7d9e98a809d73a310c5396df64ff7a22d735e661e39d37a6c4a013caa6005e91f597ea35db24e6c750d704d292a180128dcf72a818c53a96b0a83ba0414a3611097905262eb79a6ced1484af27c7da6809aa21ae7c6f05ae6568d5e5d9c170470213a30caf2340c3d52e7bd4056d22074daffee6e29d0a6fd3ca6dbd001831fb1e48573f3663b63e110cde19efaf56e49a835aeda82e4d7286de591376ecd03de36d402ec703f39f79b2f764f991d8950a119f2618f6d4e4618114900597a1e89ced609949410623a17b97095afe08babc4c295ade954f055ca01b7909f5585e98eb99bd916583476aa877d20da8f4fe35c0867e934f41c935d469664b80904a93f9f4d9432cabd9383e08559d6452f8e12b2d861412c450709ff874ad63c25a640605a41c4073f0eb4e16e1965abf8e088e210cbf9d3ca884ec2c13fc8a288cfcef2425d9607fcab01dab45c5c346671a9ae1d0e52c81379fa212c") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPKCS1v15Verifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using RSAPKCS1v15 and an X509 encoded Key") } func TestRSAPKCS1v15VerifierWithInvalidKeyType(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "rsa-invalid"}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed data with invalidRsaKeyJSON signedData, _ := hex.DecodeString("2741a57a5ef89f841b4e0a6afbcd7940bc982cd919fbd11dfc21b5ccfe13855b9c401e3df22da5480cef2fa585d0f6dfc6c35592ed92a2a18001362c3a17f74da3906684f9d81c5846bf6a09e2ede6c009ae164f504e6184e666adb14eadf5f6e12e07ff9af9ad49bf1ea9bcfa3bebb2e33be7d4c0fabfe39534f98f1e3c4bff44f637cff3dae8288aea54d86476a3f1320adc39008eae24b991c1de20744a7967d2e685ac0bcc0bc725947f01c9192ffd3e9300eba4b7faa826e84478493fdf97c705dd331dd46072050d6c5e317c2d63df21694dbaf909ebf46ce0ff04f3979fe13723ae1a823c65f27e56efa19e88f9e7b8ee56eac34353b944067deded3a") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPKCS1v15Verifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "invalid key type for RSAPKCS1v15 verifier: rsa-invalid") } func TestRSAPKCS1v15VerifierWithInvalidKey(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "ecdsa"}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signed data with invalidRsaKeyJSON signedData, _ := hex.DecodeString("2741a57a5ef89f841b4e0a6afbcd7940bc982cd919fbd11dfc21b5ccfe13855b9c401e3df22da5480cef2fa585d0f6dfc6c35592ed92a2a18001362c3a17f74da3906684f9d81c5846bf6a09e2ede6c009ae164f504e6184e666adb14eadf5f6e12e07ff9af9ad49bf1ea9bcfa3bebb2e33be7d4c0fabfe39534f98f1e3c4bff44f637cff3dae8288aea54d86476a3f1320adc39008eae24b991c1de20744a7967d2e685ac0bcc0bc725947f01c9192ffd3e9300eba4b7faa826e84478493fdf97c705dd331dd46072050d6c5e317c2d63df21694dbaf909ebf46ce0ff04f3979fe13723ae1a823c65f27e56efa19e88f9e7b8ee56eac34353b944067deded3a") message := []byte("test data for signing") // Create and call Verify on the verifier rsaVerifier := RSAPKCS1v15Verifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "invalid key type for RSAPKCS1v15 verifier: ecdsa") } func TestRSAPKCS1v15VerifierWithInvalidSignature(t *testing.T) { var testRSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.RSAKey}) testRSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using RSAPKCS1v15 message := []byte("test data for signing") hash := crypto.SHA256 hashed := sha256.Sum256(message) signedData, err := rsaPKCS1v15Sign(testRSAKey, hash, hashed[:]) assert.NoError(t, err) // Modify the signature signedData[0]++ // Create and call Verify on the verifier rsaVerifier := RSAPKCS1v15Verifier{} err = rsaVerifier.Verify(testRSAKey, signedData, message) assert.Error(t, err, "signature verification failed") } func TestECDSAVerifier(t *testing.T) { var testECDSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.ECDSAKey}) testECDSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using ECDSA message := []byte("test data for signing") hashed := sha256.Sum256(message) signedData, err := ecdsaSign(testECDSAKey, hashed[:]) assert.NoError(t, err) // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using ECDSA") } func TestECDSAVerifierOtherCurves(t *testing.T) { curves := []elliptic.Curve{elliptic.P256(), elliptic.P384(), elliptic.P521()} for _, curve := range curves { ecdsaPrivKey, err := ecdsa.GenerateKey(curve, rand.Reader) // Get a DER-encoded representation of the PublicKey ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey) assert.NoError(t, err, "failed to marshal public key") // Get a DER-encoded representation of the PrivateKey ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey) assert.NoError(t, err, "failed to marshal private key") testECDSAPubKey := data.NewECDSAPublicKey(ecdsaPubBytes) testECDSAKey, err := data.NewECDSAPrivateKey(testECDSAPubKey, ecdsaPrivKeyBytes) assert.NoError(t, err, "failed to read private key") // Sign some data using ECDSA message := []byte("test data for signing") hashed := sha256.Sum256(message) signedData, err := ecdsaSign(testECDSAKey, hashed[:]) assert.NoError(t, err) // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using ECDSA") // Make sure an invalid signature fails verification signedData[0]++ err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.Error(t, err, "expecting error but got success while verifying data using ECDSA") } } func TestECDSAx509Verifier(t *testing.T) { var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAx509Key) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.ECDSAx509Key}) testECDSAKey, err := data.UnmarshalPublicKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signature for message signedData, _ := hex.DecodeString("b82e0ed5c5dddd74c8d3602bfd900c423511697c3cfe54e1d56b9c1df599695c53aa0caafcdc40df3ef496d78ccf67750ba9413f1ccbd8b0ef137f0da1ee9889") message := []byte("test data for signing") // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.NoError(t, err, "expecting success but got error while verifying data using ECDSA and an x509 encoded key") } func TestECDSAVerifierWithInvalidKeyType(t *testing.T) { var testECDSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "ecdsa-invalid"}) testECDSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signature using invalidECDSAx509Key signedData, _ := hex.DecodeString("7b1c45a4dd488a087db46ee459192d890d4f52352620cb84c2c10e0ce8a67fd6826936463a91ffdffab8e6f962da6fc3d3e5735412f7cd161a9fcf97ba1a7033") message := []byte("test data for signing") // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.Error(t, err, "invalid key type for ECDSA verifier: ecdsa-invalid") } func TestECDSAVerifierWithInvalidKey(t *testing.T) { var testECDSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseRSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: "rsa"}) testECDSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Valid signature using invalidECDSAx509Key signedData, _ := hex.DecodeString("7b1c45a4dd488a087db46ee459192d890d4f52352620cb84c2c10e0ce8a67fd6826936463a91ffdffab8e6f962da6fc3d3e5735412f7cd161a9fcf97ba1a7033") message := []byte("test data for signing") // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.Error(t, err, "invalid key type for ECDSA verifier: rsa") } func TestECDSAVerifierWithInvalidSignature(t *testing.T) { var testECDSAKey data.PrivateKey var jsonKey bytes.Buffer // Execute our template templ, _ := template.New("KeyTemplate").Parse(baseECDSAKey) templ.Execute(&jsonKey, KeyTemplate{KeyType: data.ECDSAKey}) testECDSAKey, err := data.UnmarshalPrivateKey(jsonKey.Bytes()) assert.NoError(t, err) // Sign some data using ECDSA message := []byte("test data for signing") hashed := sha256.Sum256(message) signedData, err := ecdsaSign(testECDSAKey, hashed[:]) assert.NoError(t, err) // Modify the signature signedData[0]++ // Create and call Verify on the verifier ecdsaVerifier := ECDSAVerifier{} err = ecdsaVerifier.Verify(testECDSAKey, signedData, message) assert.Error(t, err, "signature verification failed") } func TestED25519VerifierInvalidKeyType(t *testing.T) { key := data.NewPublicKey("bad_type", nil) v := Ed25519Verifier{} err := v.Verify(key, nil, nil) assert.Error(t, err) assert.IsType(t, ErrInvalidKeyType{}, err) } func TestRSAPyCryptoVerifierInvalidKeyType(t *testing.T) { key := data.NewPublicKey("bad_type", nil) v := RSAPyCryptoVerifier{} err := v.Verify(key, nil, nil) assert.Error(t, err) assert.IsType(t, ErrInvalidKeyType{}, err) } func rsaPSSSign(privKey data.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { if privKey, ok := privKey.(*data.RSAPrivateKey); !ok { return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm()) } // Create an rsa.PrivateKey out of the private key bytes rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKey.Private()) if err != nil { return nil, err } // Use the RSA key to RSASSA-PSS sign the data sig, err := rsa.SignPSS(rand.Reader, rsaPrivKey, hash, hashed[:], &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) if err != nil { return nil, err } return sig, nil } func rsaPKCS1v15Sign(privKey data.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { if privKey, ok := privKey.(*data.RSAPrivateKey); !ok { return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm()) } // Create an rsa.PrivateKey out of the private key bytes rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKey.Private()) if err != nil { return nil, err } // Use the RSA key to RSAPKCS1v15 sign the data sig, err := rsa.SignPKCS1v15(rand.Reader, rsaPrivKey, hash, hashed[:]) if err != nil { return nil, err } return sig, nil } func ecdsaSign(privKey data.PrivateKey, hashed []byte) ([]byte, error) { if _, ok := privKey.(*data.ECDSAPrivateKey); !ok { return nil, fmt.Errorf("private key type not supported: %s", privKey.Algorithm()) } // Create an ecdsa.PrivateKey out of the private key bytes ecdsaPrivKey, err := x509.ParseECPrivateKey(privKey.Private()) if err != nil { return nil, err } // Use the ECDSA key to sign the data r, s, err := ecdsa.Sign(rand.Reader, ecdsaPrivKey, hashed[:]) if err != nil { return nil, err } rBytes, sBytes := r.Bytes(), s.Bytes() octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3 // MUST include leading zeros in the output rBuf := make([]byte, octetLength-len(rBytes), octetLength) sBuf := make([]byte, octetLength-len(sBytes), octetLength) rBuf = append(rBuf, rBytes...) sBuf = append(sBuf, sBytes...) return append(rBuf, sBuf...), nil } notary-0.1/tuf/signed/verify.go000066400000000000000000000116141262207326400165770ustar00rootroot00000000000000package signed import ( "errors" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/jfrazelle/go/canonical/json" ) // Various basic signing errors var ( ErrMissingKey = errors.New("tuf: missing key") ErrNoSignatures = errors.New("tuf: data has no signatures") ErrInvalid = errors.New("tuf: signature verification failed") ErrWrongMethod = errors.New("tuf: invalid signature type") ErrUnknownRole = errors.New("tuf: unknown role") ErrWrongType = errors.New("tuf: meta file has wrong type") ) // VerifyRoot checks if a given root file is valid against a known set of keys. // Threshold is always assumed to be 1 func VerifyRoot(s *data.Signed, minVersion int, keys map[string]data.PublicKey) error { if len(s.Signatures) == 0 { return ErrNoSignatures } var decoded map[string]interface{} if err := json.Unmarshal(s.Signed, &decoded); err != nil { return err } msg, err := json.MarshalCanonical(decoded) if err != nil { return err } for _, sig := range s.Signatures { // method lookup is consistent due to Unmarshal JSON doing lower case for us. method := sig.Method verifier, ok := Verifiers[method] if !ok { logrus.Debugf("continuing b/c signing method is not supported for verify root: %s\n", sig.Method) continue } key, ok := keys[sig.KeyID] if !ok { logrus.Debugf("continuing b/c signing key isn't present in keys: %s\n", sig.KeyID) continue } if err := verifier.Verify(key, sig.Signature, msg); err != nil { logrus.Debugf("continuing b/c signature was invalid\n") continue } // threshold of 1 so return on first success return verifyMeta(s, "root", minVersion) } return ErrRoleThreshold{} } // Verify checks the signatures and metadata (expiry, version) for the signed role // data func Verify(s *data.Signed, role string, minVersion int, db *keys.KeyDB) error { if err := verifyMeta(s, role, minVersion); err != nil { return err } return VerifySignatures(s, role, db) } func verifyMeta(s *data.Signed, role string, minVersion int) error { sm := &data.SignedCommon{} if err := json.Unmarshal(s.Signed, sm); err != nil { return err } if !data.ValidTUFType(sm.Type, role) { return ErrWrongType } if IsExpired(sm.Expires) { logrus.Errorf("Metadata for %s expired", role) return ErrExpired{Role: role, Expired: sm.Expires.Format("Mon Jan 2 15:04:05 MST 2006")} } if sm.Version < minVersion { return ErrLowVersion{sm.Version, minVersion} } return nil } // IsExpired checks if the given time passed before the present time func IsExpired(t time.Time) bool { return t.Before(time.Now()) } // VerifySignatures checks the we have sufficient valid signatures for the given role func VerifySignatures(s *data.Signed, role string, db *keys.KeyDB) error { if len(s.Signatures) == 0 { return ErrNoSignatures } roleData := db.GetRole(role) if roleData == nil { return ErrUnknownRole } if roleData.Threshold < 1 { return ErrRoleThreshold{} } logrus.Debugf("%s role has key IDs: %s", role, strings.Join(roleData.KeyIDs, ",")) var decoded map[string]interface{} if err := json.Unmarshal(s.Signed, &decoded); err != nil { return err } msg, err := json.MarshalCanonical(decoded) if err != nil { return err } valid := make(map[string]struct{}) for _, sig := range s.Signatures { logrus.Debug("verifying signature for key ID: ", sig.KeyID) if !roleData.ValidKey(sig.KeyID) { logrus.Debugf("continuing b/c keyid was invalid: %s for roledata %s\n", sig.KeyID, roleData) continue } key := db.GetKey(sig.KeyID) if key == nil { logrus.Debugf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID) continue } // method lookup is consistent due to Unmarshal JSON doing lower case for us. method := sig.Method verifier, ok := Verifiers[method] if !ok { logrus.Debugf("continuing b/c signing method is not supported: %s\n", sig.Method) continue } if err := verifier.Verify(key, sig.Signature, msg); err != nil { logrus.Debugf("continuing b/c signature was invalid\n") continue } valid[sig.KeyID] = struct{}{} } if len(valid) < roleData.Threshold { return ErrRoleThreshold{} } return nil } // Unmarshal unmarshals and verifys the raw bytes for a given role's metadata func Unmarshal(b []byte, v interface{}, role string, minVersion int, db *keys.KeyDB) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err } if err := Verify(s, role, minVersion, db); err != nil { return err } return json.Unmarshal(s.Signed, v) } // UnmarshalTrusted unmarshals and verifies signatures only, not metadata, for a // given role's metadata func UnmarshalTrusted(b []byte, v interface{}, role string, db *keys.KeyDB) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err } if err := VerifySignatures(s, role, db); err != nil { return err } return json.Unmarshal(s.Signed, v) } notary-0.1/tuf/signed/verify_test.go000066400000000000000000000131551262207326400176400ustar00rootroot00000000000000package signed import ( "errors" "testing" "time" "github.com/jfrazelle/go/canonical/json" "github.com/stretchr/testify/assert" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" ) func TestRoleNoKeys(t *testing.T) { cs := NewEd25519() k, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) r, err := data.NewRole( "root", 1, []string{}, nil, nil, ) assert.NoError(t, err) db := keys.NewDB() assert.NoError(t, err) err = db.AddRole(r) assert.NoError(t, err) meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cs, s, k) err = Verify(s, "root", 1, db) assert.IsType(t, ErrRoleThreshold{}, err) } func TestNotEnoughSigs(t *testing.T) { cs := NewEd25519() k, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) r, err := data.NewRole( "root", 2, []string{k.ID()}, nil, nil, ) assert.NoError(t, err) db := keys.NewDB() assert.NoError(t, err) db.AddKey(k) err = db.AddRole(r) assert.NoError(t, err) meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cs, s, k) err = Verify(s, "root", 1, db) assert.IsType(t, ErrRoleThreshold{}, err) } func TestMoreThanEnoughSigs(t *testing.T) { cs := NewEd25519() k1, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) k2, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) r, err := data.NewRole( "root", 1, []string{k1.ID(), k2.ID()}, nil, nil, ) assert.NoError(t, err) db := keys.NewDB() assert.NoError(t, err) db.AddKey(k1) db.AddKey(k2) err = db.AddRole(r) assert.NoError(t, err) meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cs, s, k1, k2) assert.Equal(t, 2, len(s.Signatures)) err = Verify(s, "root", 1, db) assert.NoError(t, err) } func TestDuplicateSigs(t *testing.T) { cs := NewEd25519() k, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) r, err := data.NewRole( "root", 2, []string{k.ID()}, nil, nil, ) assert.NoError(t, err) db := keys.NewDB() assert.NoError(t, err) db.AddKey(k) err = db.AddRole(r) assert.NoError(t, err) meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cs, s, k) s.Signatures = append(s.Signatures, s.Signatures[0]) err = Verify(s, "root", 1, db) assert.IsType(t, ErrRoleThreshold{}, err) } func TestUnknownKeyBelowThreshold(t *testing.T) { cs := NewEd25519() k, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) unknown, err := cs.Create("root", data.ED25519Key) assert.NoError(t, err) r, err := data.NewRole( "root", 2, []string{k.ID()}, nil, nil, ) assert.NoError(t, err) db := keys.NewDB() assert.NoError(t, err) db.AddKey(k) db.AddKey(unknown) err = db.AddRole(r) assert.NoError(t, err) meta := &data.SignedCommon{Type: "Root", Version: 1, Expires: data.DefaultExpires("root")} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cs, s, k, unknown) s.Signatures = append(s.Signatures) err = Verify(s, "root", 1, db) assert.IsType(t, ErrRoleThreshold{}, err) } func Test(t *testing.T) { cryptoService := NewEd25519() type test struct { name string keys []data.PublicKey roles map[string]*data.Role s *data.Signed ver int exp *time.Time typ string role string err error mut func(*test) } expiredTime := time.Now().Add(-time.Hour) minVer := 10 tests := []test{ { name: "no signatures", mut: func(t *test) { t.s.Signatures = []data.Signature{} }, err: ErrNoSignatures, }, { name: "unknown role", role: "foo", err: errors.New("tuf: meta file has wrong type"), }, { name: "exactly enough signatures", }, { name: "wrong type", typ: "bar", err: ErrWrongType, }, { name: "low version", ver: minVer - 1, err: ErrLowVersion{minVer - 1, minVer}, }, { role: "root", name: "expired", exp: &expiredTime, err: ErrExpired{"root", expiredTime.Format("Mon Jan 2 15:04:05 MST 2006")}, }, } for _, run := range tests { db := keys.NewDB() if run.role == "" { run.role = "root" } if run.ver == 0 { run.ver = minVer } if run.exp == nil { expires := time.Now().Add(time.Hour) run.exp = &expires } if run.typ == "" { run.typ = data.TUFTypes[run.role] } if run.keys == nil && run.s == nil { k, _ := cryptoService.Create("root", data.ED25519Key) db.AddKey(k) r, err := data.NewRole( "root", 1, []string{k.ID()}, nil, nil, ) assert.NoError(t, err) db.AddRole(r) meta := &data.SignedCommon{Type: run.typ, Version: run.ver, Expires: *run.exp} b, err := json.MarshalCanonical(meta) assert.NoError(t, err) s := &data.Signed{Signed: b} Sign(cryptoService, s, k) run.s = s } if run.mut != nil { run.mut(&run) } err := Verify(run.s, run.role, minVer, db) if e, ok := run.err.(ErrExpired); ok { assertErrExpired(t, err, e) } else { assert.Equal(t, run.err, err) } } } func assertErrExpired(t *testing.T, err error, expected ErrExpired) { actual, ok := err.(ErrExpired) if !ok { t.Fatalf("expected err to have type ErrExpired, got %T", err) } assert.Equal(t, actual.Expired, expected.Expired) } notary-0.1/tuf/store/000077500000000000000000000000001262207326400146245ustar00rootroot00000000000000notary-0.1/tuf/store/errors.go000066400000000000000000000003321262207326400164650ustar00rootroot00000000000000package store // ErrMetaNotFound indicates we did not find a particular piece // of metadata in the store type ErrMetaNotFound struct{} func (err ErrMetaNotFound) Error() string { return "no trust data available" } notary-0.1/tuf/store/filestore.go000066400000000000000000000033731262207326400171550ustar00rootroot00000000000000package store import ( "fmt" "io/ioutil" "os" "path" "path/filepath" ) // NewFilesystemStore creates a new store in a directory tree func NewFilesystemStore(baseDir, metaSubDir, metaExtension, targetsSubDir string) (*FilesystemStore, error) { metaDir := path.Join(baseDir, metaSubDir) targetsDir := path.Join(baseDir, targetsSubDir) // Make sure we can create the necessary dirs and they are writable err := os.MkdirAll(metaDir, 0700) if err != nil { return nil, err } err = os.MkdirAll(targetsDir, 0700) if err != nil { return nil, err } return &FilesystemStore{ baseDir: baseDir, metaDir: metaDir, metaExtension: metaExtension, targetsDir: targetsDir, }, nil } // FilesystemStore is a store in a locally accessible directory type FilesystemStore struct { baseDir string metaDir string metaExtension string targetsDir string } // GetMeta returns the meta for the given name (a role) func (f *FilesystemStore) GetMeta(name string, size int64) ([]byte, error) { fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) path := filepath.Join(f.metaDir, fileName) meta, err := ioutil.ReadFile(path) if err != nil { return nil, err } return meta, nil } // SetMultiMeta sets the metadata for multiple roles in one operation func (f *FilesystemStore) SetMultiMeta(metas map[string][]byte) error { for role, blob := range metas { err := f.SetMeta(role, blob) if err != nil { return err } } return nil } // SetMeta sets the meta for a single role func (f *FilesystemStore) SetMeta(name string, meta []byte) error { fileName := fmt.Sprintf("%s.%s", name, f.metaExtension) path := filepath.Join(f.metaDir, fileName) if err := ioutil.WriteFile(path, meta, 0600); err != nil { return err } return nil } notary-0.1/tuf/store/filestore_test.go000066400000000000000000000040051262207326400202050ustar00rootroot00000000000000package store import ( "io/ioutil" "os" "path" "testing" "github.com/stretchr/testify/assert" ) const testDir = "/tmp/testFilesystemStore/" func TestNewFilesystemStore(t *testing.T) { _, err := NewFilesystemStore(testDir, "metadata", "json", "targets") assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) defer os.RemoveAll(testDir) info, err := os.Stat(path.Join(testDir, "metadata")) assert.Nil(t, err, "Error attempting to stat metadata dir: %v", err) assert.NotNil(t, info, "Nil FileInfo from stat on metadata dir") assert.True(t, 0700&info.Mode() != 0, "Metadata directory is not writable") info, err = os.Stat(path.Join(testDir, "targets")) assert.Nil(t, err, "Error attempting to stat targets dir: %v", err) assert.NotNil(t, info, "Nil FileInfo from stat on targets dir") assert.True(t, 0700&info.Mode() != 0, "Targets directory is not writable") } func TestSetMeta(t *testing.T) { s, err := NewFilesystemStore(testDir, "metadata", "json", "targets") assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) defer os.RemoveAll(testDir) testContent := []byte("test data") err = s.SetMeta("testMeta", testContent) assert.Nil(t, err, "SetMeta returned unexpected error: %v", err) content, err := ioutil.ReadFile(path.Join(testDir, "metadata", "testMeta.json")) assert.Nil(t, err, "Error reading file: %v", err) assert.Equal(t, testContent, content, "Content written to file was corrupted.") } func TestGetMeta(t *testing.T) { s, err := NewFilesystemStore(testDir, "metadata", "json", "targets") assert.Nil(t, err, "Initializing FilesystemStore returned unexpected error: %v", err) defer os.RemoveAll(testDir) testContent := []byte("test data") ioutil.WriteFile(path.Join(testDir, "metadata", "testMeta.json"), testContent, 0600) content, err := s.GetMeta("testMeta", int64(len(testContent))) assert.Nil(t, err, "GetMeta returned unexpected error: %v", err) assert.Equal(t, testContent, content, "Content read from file was corrupted.") } notary-0.1/tuf/store/httpstore.go000066400000000000000000000153411262207326400172130ustar00rootroot00000000000000package store import ( "bytes" "errors" "fmt" "io" "io/ioutil" "mime/multipart" "net/http" "net/url" "path" "github.com/Sirupsen/logrus" ) // ErrServerUnavailable indicates an error from the server. code allows us to // populate the http error we received type ErrServerUnavailable struct { code int } func (err ErrServerUnavailable) Error() string { return fmt.Sprintf("Unable to reach trust server at this time: %d.", err.code) } // ErrMaliciousServer indicates the server returned a response that is highly suspected // of being malicious. i.e. it attempted to send us more data than the known size of a // particular role metadata. type ErrMaliciousServer struct{} func (err ErrMaliciousServer) Error() string { return "Trust server returned a bad response." } // HTTPStore manages pulling and pushing metadata from and to a remote // service over HTTP. It assumes the URL structure of the remote service // maps identically to the structure of the TUF repo: // //(root|targets|snapshot|timestamp).json // //foo.sh // // If consistent snapshots are disabled, it is advised that caching is not // enabled. Simple set a cachePath (and ensure it's writeable) to enable // caching. type HTTPStore struct { baseURL url.URL metaPrefix string metaExtension string targetsPrefix string keyExtension string roundTrip http.RoundTripper } // NewHTTPStore initializes a new store against a URL and a number of configuration options func NewHTTPStore(baseURL, metaPrefix, metaExtension, targetsPrefix, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) { base, err := url.Parse(baseURL) if err != nil { return nil, err } if !base.IsAbs() { return nil, errors.New("HTTPStore requires an absolute baseURL") } return &HTTPStore{ baseURL: *base, metaPrefix: metaPrefix, metaExtension: metaExtension, targetsPrefix: targetsPrefix, keyExtension: keyExtension, roundTrip: roundTrip, }, nil } // GetMeta downloads the named meta file with the given size. A short body // is acceptable because in the case of timestamp.json, the size is a cap, // not an exact length. func (s HTTPStore) GetMeta(name string, size int64) ([]byte, error) { url, err := s.buildMetaURL(name) if err != nil { return nil, err } req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, ErrMetaNotFound{} } else if resp.StatusCode != http.StatusOK { logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name) return nil, ErrServerUnavailable{code: resp.StatusCode} } if resp.ContentLength > size { return nil, ErrMaliciousServer{} } logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name) b := io.LimitReader(resp.Body, size) body, err := ioutil.ReadAll(b) if err != nil { return nil, err } return body, nil } // SetMeta uploads a piece of TUF metadata to the server func (s HTTPStore) SetMeta(name string, blob []byte) error { url, err := s.buildMetaURL("") if err != nil { return err } req, err := http.NewRequest("POST", url.String(), bytes.NewReader(blob)) if err != nil { return err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return ErrMetaNotFound{} } else if resp.StatusCode != http.StatusOK { return ErrServerUnavailable{code: resp.StatusCode} } return nil } // SetMultiMeta does a single batch upload of multiple pieces of TUF metadata. // This should be preferred for updating a remote server as it enable the server // to remain consistent, either accepting or rejecting the complete update. func (s HTTPStore) SetMultiMeta(metas map[string][]byte) error { url, err := s.buildMetaURL("") if err != nil { return err } body := &bytes.Buffer{} writer := multipart.NewWriter(body) for role, blob := range metas { part, err := writer.CreateFormFile("files", role) _, err = io.Copy(part, bytes.NewBuffer(blob)) if err != nil { return err } } err = writer.Close() if err != nil { return err } req, err := http.NewRequest("POST", url.String(), body) req.Header.Set("Content-Type", writer.FormDataContentType()) if err != nil { return err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return ErrMetaNotFound{} } else if resp.StatusCode != http.StatusOK { return ErrServerUnavailable{code: resp.StatusCode} } return nil } func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) { var filename string if name != "" { filename = fmt.Sprintf("%s.%s", name, s.metaExtension) } uri := path.Join(s.metaPrefix, filename) return s.buildURL(uri) } func (s HTTPStore) buildTargetsURL(name string) (*url.URL, error) { uri := path.Join(s.targetsPrefix, name) return s.buildURL(uri) } func (s HTTPStore) buildKeyURL(name string) (*url.URL, error) { filename := fmt.Sprintf("%s.%s", name, s.keyExtension) uri := path.Join(s.metaPrefix, filename) return s.buildURL(uri) } func (s HTTPStore) buildURL(uri string) (*url.URL, error) { sub, err := url.Parse(uri) if err != nil { return nil, err } return s.baseURL.ResolveReference(sub), nil } // GetTarget returns a reader for the desired target or an error. // N.B. The caller is responsible for closing the reader. func (s HTTPStore) GetTarget(path string) (io.ReadCloser, error) { url, err := s.buildTargetsURL(path) if err != nil { return nil, err } logrus.Debug("Attempting to download target: ", url.String()) req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, ErrMetaNotFound{} } else if resp.StatusCode != http.StatusOK { return nil, ErrServerUnavailable{code: resp.StatusCode} } return resp.Body, nil } // GetKey retrieves a public key from the remote server func (s HTTPStore) GetKey(role string) ([]byte, error) { url, err := s.buildKeyURL(role) if err != nil { return nil, err } req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, err } resp, err := s.roundTrip.RoundTrip(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { return nil, ErrMetaNotFound{} } else if resp.StatusCode != http.StatusOK { return nil, ErrServerUnavailable{code: resp.StatusCode} } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil } notary-0.1/tuf/store/httpstore_test.go000066400000000000000000000261741262207326400202600ustar00rootroot00000000000000package store import ( "encoding/base64" "encoding/hex" "fmt" "io" "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/signed" "github.com/jfrazelle/go/canonical/json" ) const testRoot = `{"signed":{"_type":"Root","consistent_snapshot":false,"expires":"2025-07-17T16:19:21.101698314-07:00","keys":{"1ca15c7f4b2b0c6efce202a545e7267152da28ab7c91590b3b60bdb4da723aad":{"keytype":"ecdsa","keyval":{"private":null,"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEb0720c99Cj6ZmuDlznEZ52NA6YpeY9Sj45z51XvPnG63Bi2RSBezMJlPzbSfP39mXKXqOJyT+z9BZhi3FVWczg=="}},"b1d6813b55442ecbfb1f4b40eb1fcdb4290e53434cfc9ba2da24c26c9143873b":{"keytype":"ecdsa-x509","keyval":{"private":null,"public":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJVekNCKzZBREFnRUNBaEFCWDNKLzkzaW8zbHcrZUsvNFhvSHhNQW9HQ0NxR1NNNDlCQU1DTUJFeER6QU4KQmdOVkJBTVRCbVY0Y0dseVpUQWVGdzB4TlRBM01qQXlNekU1TVRkYUZ3MHlOVEEzTVRjeU16RTVNVGRhTUJFeApEekFOQmdOVkJBTVRCbVY0Y0dseVpUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJFTDhOTFhQCitreUJZYzhYY0FTMXB2S2l5MXRQUDlCZHJ1dEdrWlR3Z0dEYTM1THMzSUFXaWlrUmlPbGRuWmxVVEE5cG5JekoKOFlRQThhTjQ1TDQvUlplak5UQXpNQTRHQTFVZER3RUIvd1FFQXdJQW9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRgpCUWNEQXpBTUJnTlZIUk1CQWY4RUFqQUFNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRVJ1ZUVURG5xMlRqRFBmClhGRStqUFJqMEtqdXdEOG9HSmtoVGpMUDAycjhBaUI5cUNyL2ZqSXpJZ1NQcTJVSXZqR0hlYmZOYXh1QlpZZUUKYW8xNjd6dHNYZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"}},"fbddae7f25a6c23ca735b017206a849d4c89304a4d8de4dcc4b3d6f3eb22ce3b":{"keytype":"ecdsa","keyval":{"private":null,"public":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE/xS5fBHK2HKmlGcvAr06vwPITvmxWP4P3CMDCgY25iSaIiM21OiXA1/Uvo3Pa3xh5G3cwCtDvi+4FpflW2iB/w=="}},"fd75751f010c3442e23b3e3e99a1442a112f2f21038603cb8609d8b17c9e912a":{"keytype":"ed25519","keyval":{"private":null,"public":"rc+glN01m+q8jmX8SolGsjTfk6NMhUQTWyj10hjmne0="}}},"roles":{"root":{"keyids":["b1d6813b55442ecbfb1f4b40eb1fcdb4290e53434cfc9ba2da24c26c9143873b"],"threshold":1},"snapshot":{"keyids":["1ca15c7f4b2b0c6efce202a545e7267152da28ab7c91590b3b60bdb4da723aad"],"threshold":1},"targets":{"keyids":["fbddae7f25a6c23ca735b017206a849d4c89304a4d8de4dcc4b3d6f3eb22ce3b"],"threshold":1},"timestamp":{"keyids":["fd75751f010c3442e23b3e3e99a1442a112f2f21038603cb8609d8b17c9e912a"],"threshold":1}},"version":2},"signatures":[{"keyid":"b1d6813b55442ecbfb1f4b40eb1fcdb4290e53434cfc9ba2da24c26c9143873b","method":"ecdsa","sig":"A2lNVwxHBnD9ViFtRre8r5oG6VvcvJnC6gdvvxv/Jyag40q/fNMjllCqyHrb+6z8XDZcrTTDsFU1R3/e+92d1A=="}]}` const testRootKey = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJVekNCKzZBREFnRUNBaEFCWDNKLzkzaW8zbHcrZUsvNFhvSHhNQW9HQ0NxR1NNNDlCQU1DTUJFeER6QU4KQmdOVkJBTVRCbVY0Y0dseVpUQWVGdzB4TlRBM01qQXlNekU1TVRkYUZ3MHlOVEEzTVRjeU16RTVNVGRhTUJFeApEekFOQmdOVkJBTVRCbVY0Y0dseVpUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJFTDhOTFhQCitreUJZYzhYY0FTMXB2S2l5MXRQUDlCZHJ1dEdrWlR3Z0dEYTM1THMzSUFXaWlrUmlPbGRuWmxVVEE5cG5JekoKOFlRQThhTjQ1TDQvUlplak5UQXpNQTRHQTFVZER3RUIvd1FFQXdJQW9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRgpCUWNEQXpBTUJnTlZIUk1CQWY4RUFqQUFNQW9HQ0NxR1NNNDlCQU1DQTBjQU1FUUNJRVJ1ZUVURG5xMlRqRFBmClhGRStqUFJqMEtqdXdEOG9HSmtoVGpMUDAycjhBaUI5cUNyL2ZqSXpJZ1NQcTJVSXZqR0hlYmZOYXh1QlpZZUUKYW8xNjd6dHNYZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" type TestRoundTripper struct{} func (rt *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { return http.DefaultClient.Do(req) } func TestHTTPStoreGetMeta(t *testing.T) { handler := func(w http.ResponseWriter, r *http.Request) { w.Write([]byte(testRoot)) } server := httptest.NewServer(http.HandlerFunc(handler)) defer server.Close() store, err := NewHTTPStore( server.URL, "metadata", "txt", "targets", "key", &http.Transport{}, ) if err != nil { t.Fatal(err) } j, err := store.GetMeta("root", 4801) if err != nil { t.Fatal(err) } p := &data.Signed{} err = json.Unmarshal(j, p) if err != nil { t.Fatal(err) } rootKey, err := base64.StdEncoding.DecodeString(testRootKey) assert.NoError(t, err) k := data.NewPublicKey("ecdsa-x509", rootKey) sigBytes := p.Signatures[0].Signature if err != nil { t.Fatal(err) } var decoded map[string]interface{} if err := json.Unmarshal(p.Signed, &decoded); err != nil { t.Fatal(err) } msg, err := json.MarshalCanonical(decoded) if err != nil { t.Fatal(err) } method := p.Signatures[0].Method err = signed.Verifiers[method].Verify(k, sigBytes, msg) if err != nil { t.Fatal(err) } } func TestSetMultiMeta(t *testing.T) { metas := map[string][]byte{ "root": []byte("root data"), "targets": []byte("targets data"), } handler := func(w http.ResponseWriter, r *http.Request) { reader, err := r.MultipartReader() if err != nil { t.Fatal(err) } updates := make(map[string][]byte) for { part, err := reader.NextPart() if err == io.EOF { break } role := strings.TrimSuffix(part.FileName(), ".json") updates[role], err = ioutil.ReadAll(part) if err != nil { t.Fatal(err) } } rd, rok := updates["root"] assert.True(t, rok) assert.Equal(t, rd, metas["root"]) td, tok := updates["targets"] assert.True(t, tok) assert.Equal(t, td, metas["targets"]) } server := httptest.NewServer(http.HandlerFunc(handler)) defer server.Close() store, err := NewHTTPStore(server.URL, "metadata", "json", "targets", "key", http.DefaultTransport) if err != nil { t.Fatal(err) } store.SetMultiMeta(metas) } func TestPyCryptoRSAPSSCompat(t *testing.T) { pubPem := "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAnKuXZeefa2LmgxaL5NsM\nzKOHNe+x/nL6ik+lDBCTV6OdcwAhHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5\nVSCuRJ53UronENl6lsa5mFKP8StYLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDf\nBEPIRp28ev/NViwGOEkBu2UAbwCIdnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK\n6pdzJXlhr9yap3UpgQ/iO9JtoEYB2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq\n3xmN4p+R4VGzfdQN+8Kl/IPjqWB535twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrM\nBI8ztvPiogz+MvXb8WvarZ6TMTh8ifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X\n7sXoaqszEtXdq5ef5zKVxkiyIQZcbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj\n1ANMFPxDQpHudCLxwCzjCb+sVa20HBRPTnzo8LSZkI6jAgMBAAE=\n-----END PUBLIC KEY-----" //privPem := "-----BEGIN RSA PRIVATE KEY-----\nMIIG4wIBAAKCAYEAnKuXZeefa2LmgxaL5NsMzKOHNe+x/nL6ik+lDBCTV6OdcwAh\nHQS+PONGhrChIUVR6Vth3hUCrreLzPO73Oo5VSCuRJ53UronENl6lsa5mFKP8StY\nLvIDITNvkoT3j52BJIjyNUK9UKY9As2TNqDfBEPIRp28ev/NViwGOEkBu2UAbwCI\ndnDXm8JQErCZA0Ydm7PKGgjLbFsFGrVzqXHK6pdzJXlhr9yap3UpgQ/iO9JtoEYB\n2EXsnSrPc9JRjR30bNHHtnVql3fvinXrAEwq3xmN4p+R4VGzfdQN+8Kl/IPjqWB5\n35twhFYEG/B7Ze8IwbygBjK3co/KnOPqMUrMBI8ztvPiogz+MvXb8WvarZ6TMTh8\nifZI96r7zzqyzjR1hJulEy3IsMGvz8XS2J0X7sXoaqszEtXdq5ef5zKVxkiyIQZc\nbPgmpHLq4MgfdryuVVc/RPASoRIXG4lKaTJj1ANMFPxDQpHudCLxwCzjCb+sVa20\nHBRPTnzo8LSZkI6jAgMBAAECggGAdzyI7z/HLt2IfoAsXDLynNRgVYZluzgawiU3\ngeUjnnGhpSKWERXJC2IWDPBk0YOGgcnQxErNTdfXiFZ/xfRlSgqjVwob2lRe4w4B\npLr+CZXcgznv1VrPUvdolOSp3R2Mahfn7u0qVDUQ/g8jWVI6KW7FACmQhzQkPM8o\ntLGrpcmK+PA465uaHKtYccEB02ILqrK8v++tknv7eIZczrsSKlS1h/HHjSaidYxP\n2DAUiF7wnChrwwQEvuEUHhwVgQcoDMBoow0zwHdbFiFO2ZT54H2oiJWLhpR/x6RK\ngM1seqoPH2sYErPJACMcYsMtF4Tx7b5c4WSj3vDCGb+jeqnNS6nFC3aMnv75mUS2\nYDPU1heJFd8pNHVf0RDejLZZUiJSnXf3vpOxt9Xv2+4He0jeMfLV7zX0mO2Ni3MJ\nx6PiVy4xerHImOuuHzSla5crOq2ECiAxd1wEOFDRD2LRHzfhpk1ghiA5xA1qwc7Z\neRnkVfoy6PPZ4lZakZTm0p8YCQURAoHBAMUIC/7vnayLae7POmgy+np/ty7iMfyd\nV1eO6LTO21KAaGGlhaY26WD/5LcG2FUgc5jKKahprGrmiNLzLUeQPckJmuijSEVM\nl/4DlRvCo867l7fLaVqYzsQBBdeGIFNiT+FBOd8atff87ZBEfH/rXbDi7METD/VR\n4TdblnCsKYAXEJUdkw3IK7SUGERiQZIwKXrH/Map4ibDrljJ71iCgEureU0DBwcg\nwLftmjGMISoLscdRxeubX5uf/yxtHBJeRwKBwQDLjzHhb4gNGdBHUl4hZPAGCq1V\nLX/GpfoOVObW64Lud+tI6N9GNua5/vWduL7MWWOzDTMZysganhKwsJCY5SqAA9p0\nb6ohusf9i1nUnOa2F2j+weuYPXrTYm+ZrESBBdaEJPuj3R5YHVujrBA9Xe0kVOe3\nne151A+0xJOI3tX9CttIaQAsXR7cMDinkDITw6i7X4olRMPCSixHLW97cDsVDRGt\necO1d4dP3OGscN+vKCoL6tDKDotzWHYPwjH47sUCgcEAoVI8WCiipbKkMnaTsNsE\ngKXvO0DSgq3k5HjLCbdQldUzIbgfnH7bSKNcBYtiNxjR7OihgRW8qO5GWsnmafCs\n1dy6a/2835id3cnbHRaZflvUFhVDFn2E1bCsstFLyFn3Y0w/cO9yzC/X5sZcVXRF\nit3R0Selakv3JZckru4XMJwx5JWJYMBjIIAc+miknWg3niL+UT6pPun65xG3mXWI\nS+yC7c4rw+dKQ44UMLs2MDHRBoxqi8T0W/x9NkfDszpjAoHAclH7S4ZdvC3RIR0L\nLGoJuvroGbwx1JiGdOINuooNwGuswge2zTIsJi0gN/H3hcB2E6rIFiYid4BrMrwW\nmSeq1LZVS6siu0qw4p4OVy+/CmjfWKQD8j4k6u6PipiK6IMk1JYIlSCr2AS04JjT\njgNgGVVtxVt2cUM9huIXkXjEaRZdzK7boA60NCkIyGJdHWh3LLQdW4zg/A64C0lj\nIMoJBGuQkAKgfRuh7KI6Q6Qom7BM3OCFXdUJUEBQHc2MTyeZAoHAJdBQGBn1RFZ+\nn75AnbTMZJ6Twp2fVjzWUz/+rnXFlo87ynA18MR2BzaDST4Bvda29UBFGb32Mux9\nOHukqLgIE5jDuqWjy4B5eCoxZf/OvwlgXkX9+gprGR3axn/PZBFPbFB4ZmjbWLzn\nbocn7FJCXf+Cm0cMmv1jIIxej19MUU/duq9iq4RkHY2LG+KrSEQIUVmImCftXdN3\n/qNP5JetY0eH6C+KRc8JqDB0nvbqZNOgYXOfYXo/5Gk8XIHTFihm\n-----END RSA PRIVATE KEY-----" testStr := "The quick brown fox jumps over the lazy dog." sigHex := "4e05ee9e435653549ac4eddbc43e1a6868636e8ea6dbec2564435afcb0de47e0824cddbd88776ddb20728c53ecc90b5d543d5c37575fda8bd0317025fc07de62ee8084b1a75203b1a23d1ef4ac285da3d1fc63317d5b2cf1aafa3e522acedd366ccd5fe4a7f02a42922237426ca3dc154c57408638b9bfaf0d0213855d4e9ee621db204151bcb13d4dbb18f930ec601469c992c84b14e9e0b6f91ac9517bb3b749dd117e1cbac2e4acb0e549f44558a2005898a226d5b6c8b9291d7abae0d9e0a16858b89662a085f74a202deb867acab792bdbd2c36731217caea8b17bd210c29b890472f11e5afdd1dd7b69004db070e04201778f2c49f5758643881403d45a58d08f51b5c63910c6185892f0b590f191d760b669eff2464456f130239bba94acf54a0cb98f6939ff84ae26a37f9b890be259d9b5d636f6eb367b53e895227d7d79a3a88afd6d28c198ee80f6527437c5fbf63accb81709925c4e03d1c9eaee86f58e4bd1c669d6af042dbd412de0d13b98b1111e2fadbe34b45de52125e9a" k := data.NewPublicKey(data.RSAKey, []byte(pubPem)) sigBytes, err := hex.DecodeString(sigHex) if err != nil { t.Fatal(err) } v := signed.RSAPyCryptoVerifier{} err = v.Verify(k, sigBytes, []byte(testStr)) if err != nil { t.Fatal(err) } } func TestPyNaCled25519Compat(t *testing.T) { pubHex := "846612b43cef909a0e4ea9c818379bca4723a2020619f95e7a0ccc6f0850b7dc" //privHex := "bf3cdb9b2a664b0460e6755cb689ffca15b6e294f79f9f1fcf90b52e5b063a76" testStr := "The quick brown fox jumps over the lazy dog." sigHex := "166e7013e48f26dccb4e68fe4cf558d1cd3af902f8395534336a7f8b4c56588694aa3ac671767246298a59d5ef4224f02c854f41bfcfe70241db4be1546d6a00" pub, _ := hex.DecodeString(pubHex) k := data.NewPublicKey(data.ED25519Key, pub) sigBytes, _ := hex.DecodeString(sigHex) err := signed.Verifiers[data.EDDSASignature].Verify(k, sigBytes, []byte(testStr)) if err != nil { t.Fatal(err) } } func testErrorCode(t *testing.T, errorCode int, errType error) { handler := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(errorCode) } server := httptest.NewServer(http.HandlerFunc(handler)) defer server.Close() store, err := NewHTTPStore( server.URL, "metadata", "txt", "targets", "key", &http.Transport{}, ) assert.NoError(t, err) _, err = store.GetMeta("root", 4801) assert.Error(t, err) assert.IsType(t, errType, err, fmt.Sprintf("%d should translate to %v", errorCode, errType)) } func TestErrMetadataNotFound(t *testing.T) { testErrorCode(t, http.StatusNotFound, ErrMetaNotFound{}) } func Test500Errors(t *testing.T) { fiveHundreds := []int{ http.StatusInternalServerError, http.StatusNotImplemented, http.StatusBadGateway, http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusHTTPVersionNotSupported, } for _, code := range fiveHundreds { testErrorCode(t, code, ErrServerUnavailable{}) } } notary-0.1/tuf/store/interfaces.go000066400000000000000000000021761262207326400173040ustar00rootroot00000000000000package store import ( "io" "github.com/docker/notary/tuf/data" ) type targetsWalkFunc func(path string, meta data.FileMeta) error // MetadataStore must be implemented by anything that intends to interact // with a store of TUF files type MetadataStore interface { GetMeta(name string, size int64) ([]byte, error) SetMeta(name string, blob []byte) error SetMultiMeta(map[string][]byte) error } // PublicKeyStore must be implemented by a key service type PublicKeyStore interface { GetKey(role string) ([]byte, error) } // TargetStore represents a collection of targets that can be walked similarly // to walking a directory, passing a callback that receives the path and meta // for each target type TargetStore interface { WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error } // LocalStore represents a local TUF sture type LocalStore interface { MetadataStore TargetStore } // RemoteStore is similar to LocalStore with the added expectation that it should // provide a way to download targets once located type RemoteStore interface { MetadataStore PublicKeyStore GetTarget(path string) (io.ReadCloser, error) } notary-0.1/tuf/store/memorystore.go000066400000000000000000000040651262207326400175450ustar00rootroot00000000000000package store import ( "bytes" "fmt" "io" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" ) // NewMemoryStore returns a MetadataStore that operates entirely in memory. // Very useful for testing func NewMemoryStore(meta map[string][]byte, files map[string][]byte) RemoteStore { if meta == nil { meta = make(map[string][]byte) } if files == nil { files = make(map[string][]byte) } return &memoryStore{ meta: meta, files: files, keys: make(map[string][]data.PrivateKey), } } type memoryStore struct { meta map[string][]byte files map[string][]byte keys map[string][]data.PrivateKey } func (m *memoryStore) GetMeta(name string, size int64) ([]byte, error) { d, ok := m.meta[name] if ok { if int64(len(d)) < size { return d, nil } return d[:size], nil } return nil, ErrMetaNotFound{} } func (m *memoryStore) SetMeta(name string, meta []byte) error { m.meta[name] = meta return nil } func (m *memoryStore) SetMultiMeta(metas map[string][]byte) error { for role, blob := range metas { m.SetMeta(role, blob) } return nil } func (m *memoryStore) GetTarget(path string) (io.ReadCloser, error) { return &utils.NoopCloser{Reader: bytes.NewReader(m.files[path])}, nil } func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn targetsWalkFunc) error { if len(paths) == 0 { for path, dat := range m.files { meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256") if err != nil { return err } if err = targetsFn(path, meta); err != nil { return err } } return nil } for _, path := range paths { dat, ok := m.files[path] if !ok { return ErrMetaNotFound{} } meta, err := data.NewFileMeta(bytes.NewReader(dat), "sha256") if err != nil { return err } if err = targetsFn(path, meta); err != nil { return err } } return nil } func (m *memoryStore) Commit(map[string][]byte, bool, map[string]data.Hashes) error { return nil } func (m *memoryStore) GetKey(role string) ([]byte, error) { return nil, fmt.Errorf("GetKey is not implemented for the memoryStore") } notary-0.1/tuf/testutils/000077500000000000000000000000001262207326400155305ustar00rootroot00000000000000notary-0.1/tuf/testutils/repo.go000066400000000000000000000055321262207326400170310ustar00rootroot00000000000000package testutils import ( "encoding/json" "math/rand" "time" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/utils" fuzz "github.com/google/gofuzz" tuf "github.com/docker/notary/tuf" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" ) // EmptyRepo creates an in memory key database, crypto service // and initializes a repo with no targets or delegations. func EmptyRepo() (*keys.KeyDB, *tuf.Repo, signed.CryptoService) { c := signed.NewEd25519() kdb := keys.NewDB() r := tuf.NewRepo(kdb, c) for _, role := range []string{"root", "targets", "snapshot", "timestamp"} { key, _ := c.Create(role, data.ED25519Key) role, _ := data.NewRole(role, 1, []string{key.ID()}, nil, nil) kdb.AddKey(key) kdb.AddRole(role) } r.InitRepo(false) return kdb, r, c } // AddTarget generates a fake target and adds it to a repo. func AddTarget(role string, r *tuf.Repo) (name string, meta data.FileMeta, content []byte, err error) { randness := fuzz.Continue{} content = RandomByteSlice(1024) name = randness.RandString() t := data.FileMeta{ Length: int64(len(content)), Hashes: data.Hashes{ "sha256": utils.DoHash("sha256", content), "sha512": utils.DoHash("sha512", content), }, } files := data.Files{name: t} _, err = r.AddTargets(role, files) return } // RandomByteSlice generates some random data to be used for testing only func RandomByteSlice(maxSize int) []byte { r := rand.New(rand.NewSource(time.Now().UnixNano())) contentSize := r.Intn(maxSize) content := make([]byte, contentSize) for i := range content { content[i] = byte(r.Int63() & 0xff) } return content } // Sign signs all top level roles in a repo in the appropriate order func Sign(repo *tuf.Repo) (root, targets, snapshot, timestamp *data.Signed, err error) { root, err = repo.SignRoot(data.DefaultExpires("root")) if err != nil { return nil, nil, nil, nil, err } targets, err = repo.SignTargets("targets", data.DefaultExpires("targets")) if err != nil { return nil, nil, nil, nil, err } snapshot, err = repo.SignSnapshot(data.DefaultExpires("snapshot")) if err != nil { return nil, nil, nil, nil, err } timestamp, err = repo.SignTimestamp(data.DefaultExpires("timestamp")) if err != nil { return nil, nil, nil, nil, err } return } // Serialize takes the Signed objects for the 4 top level roles and serializes them all to JSON func Serialize(sRoot, sTargets, sSnapshot, sTimestamp *data.Signed) (root, targets, snapshot, timestamp []byte, err error) { root, err = json.Marshal(sRoot) if err != nil { return nil, nil, nil, nil, err } targets, err = json.Marshal(sTargets) if err != nil { return nil, nil, nil, nil, err } snapshot, err = json.Marshal(sSnapshot) if err != nil { return nil, nil, nil, nil, err } timestamp, err = json.Marshal(sTimestamp) if err != nil { return nil, nil, nil, nil, err } return } notary-0.1/tuf/testutils/utils.go000066400000000000000000000030041262207326400172140ustar00rootroot00000000000000package testutils import ( "database/sql" "fmt" "os" "github.com/docker/notary/tuf/data" // need to initialize sqlite for tests _ "github.com/mattn/go-sqlite3" ) var counter = 1 // SampleMeta returns a static, fake (and invalid) FileMeta object func SampleMeta() data.FileMeta { meta := data.FileMeta{ Length: 1, Hashes: data.Hashes{ "sha256": []byte{0x01, 0x02}, "sha512": []byte{0x03, 0x04}, }, } return meta } // GetSqliteDB creates and initializes a sqlite db func GetSqliteDB() *sql.DB { os.Mkdir("/tmp/sqlite", 0755) conn, err := sql.Open("sqlite3", fmt.Sprintf("/tmp/sqlite/file%d.db", counter)) if err != nil { panic("can't connect to db") } counter++ tx, err := conn.Begin() if err != nil { panic("can't begin db transaction") } tx.Exec("CREATE TABLE keys (id int auto_increment, namespace varchar(255) not null, role varchar(255) not null, key text not null, primary key (id));") tx.Exec("CREATE TABLE filehashes(namespace varchar(255) not null, path varchar(255) not null, alg varchar(10) not null, hash varchar(128) not null, primary key (namespace, path, alg));") tx.Exec("CREATE TABLE filemeta(namespace varchar(255) not null, path varchar(255) not null, size int not null, custom text default null, primary key (namespace, path));") tx.Commit() return conn } // FlushDB deletes a sqliteDB func FlushDB(db *sql.DB) { tx, _ := db.Begin() tx.Exec("DELETE FROM `filemeta`") tx.Exec("DELETE FROM `filehashes`") tx.Exec("DELETE FROM `keys`") tx.Commit() os.RemoveAll("/tmp/tuf") } notary-0.1/tuf/tuf.go000066400000000000000000000401001262207326400146100ustar00rootroot00000000000000// Package tuf defines the core TUF logic around manipulating a repo. package tuf import ( "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "path/filepath" "strings" "time" "github.com/Sirupsen/logrus" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" "github.com/docker/notary/tuf/utils" ) // ErrSigVerifyFail - signature verification failed type ErrSigVerifyFail struct{} func (e ErrSigVerifyFail) Error() string { return "Error: Signature verification failed" } // ErrMetaExpired - metadata file has expired type ErrMetaExpired struct{} func (e ErrMetaExpired) Error() string { return "Error: Metadata has expired" } // ErrLocalRootExpired - the local root file is out of date type ErrLocalRootExpired struct{} func (e ErrLocalRootExpired) Error() string { return "Error: Local Root Has Expired" } // ErrNotLoaded - attempted to access data that has not been loaded into // the repo type ErrNotLoaded struct { role string } func (err ErrNotLoaded) Error() string { return fmt.Sprintf("%s role has not been loaded", err.role) } // Repo is an in memory representation of the TUF Repo. // It operates at the data.Signed level, accepting and producing // data.Signed objects. Users of a Repo are responsible for // fetching raw JSON and using the Set* functions to populate // the Repo instance. type Repo struct { Root *data.SignedRoot Targets map[string]*data.SignedTargets Snapshot *data.SignedSnapshot Timestamp *data.SignedTimestamp keysDB *keys.KeyDB cryptoService signed.CryptoService } // NewRepo initializes a Repo instance with a keysDB and a signer. // If the Repo will only be used for reading, the signer should be nil. func NewRepo(keysDB *keys.KeyDB, cryptoService signed.CryptoService) *Repo { repo := &Repo{ Targets: make(map[string]*data.SignedTargets), keysDB: keysDB, cryptoService: cryptoService, } return repo } // AddBaseKeys is used to add keys to the role in root.json func (tr *Repo) AddBaseKeys(role string, keys ...data.PublicKey) error { if tr.Root == nil { return ErrNotLoaded{role: "root"} } ids := []string{} for _, k := range keys { // Store only the public portion tr.Root.Signed.Keys[k.ID()] = k tr.keysDB.AddKey(k) tr.Root.Signed.Roles[role].KeyIDs = append(tr.Root.Signed.Roles[role].KeyIDs, k.ID()) ids = append(ids, k.ID()) } r, err := data.NewRole( role, tr.Root.Signed.Roles[role].Threshold, ids, nil, nil, ) if err != nil { return err } tr.keysDB.AddRole(r) tr.Root.Dirty = true return nil } // ReplaceBaseKeys is used to replace all keys for the given role with the new keys func (tr *Repo) ReplaceBaseKeys(role string, keys ...data.PublicKey) error { r := tr.keysDB.GetRole(role) err := tr.RemoveBaseKeys(role, r.KeyIDs...) if err != nil { return err } return tr.AddBaseKeys(role, keys...) } // RemoveBaseKeys is used to remove keys from the roles in root.json func (tr *Repo) RemoveBaseKeys(role string, keyIDs ...string) error { if tr.Root == nil { return ErrNotLoaded{role: "root"} } var keep []string toDelete := make(map[string]struct{}) // remove keys from specified role for _, k := range keyIDs { toDelete[k] = struct{}{} for _, rk := range tr.Root.Signed.Roles[role].KeyIDs { if k != rk { keep = append(keep, rk) } } } tr.Root.Signed.Roles[role].KeyIDs = keep // determine which keys are no longer in use by any roles for roleName, r := range tr.Root.Signed.Roles { if roleName == role { continue } for _, rk := range r.KeyIDs { if _, ok := toDelete[rk]; ok { delete(toDelete, rk) } } } // remove keys no longer in use by any roles for k := range toDelete { delete(tr.Root.Signed.Keys, k) // remove the signing key from the cryptoservice if it // isn't a root key. Root keys must be kept for rotation // signing if role != data.CanonicalRootRole { tr.cryptoService.RemoveKey(k) } } tr.Root.Dirty = true return nil } // UpdateDelegations updates the appropriate delegations, either adding // a new delegation or updating an existing one. If keys are // provided, the IDs will be added to the role (if they do not exist // there already), and the keys will be added to the targets file. // The "before" argument specifies another role which this new role // will be added in front of (i.e. higher priority) in the delegation list. // An empty before string indicates to add the role to the end of the // delegation list. // A new, empty, targets file will be created for the new role. func (tr *Repo) UpdateDelegations(role *data.Role, keys []data.PublicKey, before string) error { if !role.IsDelegation() || !role.IsValid() { return data.ErrInvalidRole{Role: role.Name} } parent := filepath.Dir(role.Name) p, ok := tr.Targets[parent] if !ok { return data.ErrInvalidRole{Role: role.Name} } for _, k := range keys { if !utils.StrSliceContains(role.KeyIDs, k.ID()) { role.KeyIDs = append(role.KeyIDs, k.ID()) } p.Signed.Delegations.Keys[k.ID()] = k tr.keysDB.AddKey(k) } i := -1 var r *data.Role for i, r = range p.Signed.Delegations.Roles { if r.Name == role.Name { break } } if i >= 0 { p.Signed.Delegations.Roles[i] = role } else { p.Signed.Delegations.Roles = append(p.Signed.Delegations.Roles, role) } p.Dirty = true roleTargets := data.NewTargets() // NewTargets always marked Dirty tr.Targets[role.Name] = roleTargets tr.keysDB.AddRole(role) return nil } // InitRepo creates the base files for a repo. It inspects data.ValidRoles and // data.ValidTypes to determine what the role names and filename should be. It // also relies on the keysDB having already been populated with the keys and // roles. func (tr *Repo) InitRepo(consistent bool) error { if err := tr.InitRoot(consistent); err != nil { return err } if err := tr.InitTargets(); err != nil { return err } if err := tr.InitSnapshot(); err != nil { return err } return tr.InitTimestamp() } // InitRoot initializes an empty root file with the 4 core roles based // on the current content of th ekey db func (tr *Repo) InitRoot(consistent bool) error { rootRoles := make(map[string]*data.RootRole) rootKeys := make(map[string]data.PublicKey) for _, r := range data.ValidRoles { role := tr.keysDB.GetRole(r) if role == nil { return data.ErrInvalidRole{Role: data.CanonicalRootRole} } rootRoles[r] = &role.RootRole for _, kid := range role.KeyIDs { // don't need to check if GetKey returns nil, Key presence was // checked by KeyDB when role was added. key := tr.keysDB.GetKey(kid) rootKeys[kid] = key } } root, err := data.NewRoot(rootKeys, rootRoles, consistent) if err != nil { return err } tr.Root = root return nil } // InitTargets initializes an empty targets func (tr *Repo) InitTargets() error { targets := data.NewTargets() tr.Targets[data.ValidRoles["targets"]] = targets return nil } // InitSnapshot initializes a snapshot based on the current root and targets func (tr *Repo) InitSnapshot() error { root, err := tr.Root.ToSigned() if err != nil { return err } targets, err := tr.Targets[data.ValidRoles["targets"]].ToSigned() if err != nil { return err } snapshot, err := data.NewSnapshot(root, targets) if err != nil { return err } tr.Snapshot = snapshot return nil } // InitTimestamp initializes a timestamp based on the current snapshot func (tr *Repo) InitTimestamp() error { snap, err := tr.Snapshot.ToSigned() if err != nil { return err } timestamp, err := data.NewTimestamp(snap) if err != nil { return err } tr.Timestamp = timestamp return nil } // SetRoot parses the Signed object into a SignedRoot object, sets // the keys and roles in the KeyDB, and sets the Repo.Root field // to the SignedRoot object. func (tr *Repo) SetRoot(s *data.SignedRoot) error { for _, key := range s.Signed.Keys { logrus.Debug("Adding key ", key.ID()) tr.keysDB.AddKey(key) } for roleName, role := range s.Signed.Roles { logrus.Debugf("Adding role %s with keys %s", roleName, strings.Join(role.KeyIDs, ",")) baseRole, err := data.NewRole( roleName, role.Threshold, role.KeyIDs, nil, nil, ) if err != nil { return err } err = tr.keysDB.AddRole(baseRole) if err != nil { return err } } tr.Root = s return nil } // SetTimestamp parses the Signed object into a SignedTimestamp object // and sets the Repo.Timestamp field. func (tr *Repo) SetTimestamp(s *data.SignedTimestamp) error { tr.Timestamp = s return nil } // SetSnapshot parses the Signed object into a SignedSnapshots object // and sets the Repo.Snapshot field. func (tr *Repo) SetSnapshot(s *data.SignedSnapshot) error { tr.Snapshot = s return nil } // SetTargets parses the Signed object into a SignedTargets object, // reads the delegated roles and keys into the KeyDB, and sets the // SignedTargets object agaist the role in the Repo.Targets map. func (tr *Repo) SetTargets(role string, s *data.SignedTargets) error { for _, k := range s.Signed.Delegations.Keys { tr.keysDB.AddKey(k) } for _, r := range s.Signed.Delegations.Roles { tr.keysDB.AddRole(r) } tr.Targets[role] = s return nil } // TargetMeta returns the FileMeta entry for the given path in the // targets file associated with the given role. This may be nil if // the target isn't found in the targets file. func (tr Repo) TargetMeta(role, path string) *data.FileMeta { if t, ok := tr.Targets[role]; ok { if m, ok := t.Signed.Targets[path]; ok { return &m } } return nil } // TargetDelegations returns a slice of Roles that are valid publishers // for the target path provided. func (tr Repo) TargetDelegations(role, path, pathHex string) []*data.Role { if pathHex == "" { pathDigest := sha256.Sum256([]byte(path)) pathHex = hex.EncodeToString(pathDigest[:]) } var roles []*data.Role if t, ok := tr.Targets[role]; ok { for _, r := range t.Signed.Delegations.Roles { if r.CheckPrefixes(pathHex) || r.CheckPaths(path) { roles = append(roles, r) } } } return roles } // FindTarget attempts to find the target represented by the given // path by starting at the top targets file and traversing // appropriate delegations until the first entry is found or it // runs out of locations to search. // N.B. Multiple entries may exist in different delegated roles // for the same target. Only the first one encountered is returned. func (tr Repo) FindTarget(path string) *data.FileMeta { pathDigest := sha256.Sum256([]byte(path)) pathHex := hex.EncodeToString(pathDigest[:]) var walkTargets func(role string) *data.FileMeta walkTargets = func(role string) *data.FileMeta { if m := tr.TargetMeta(role, path); m != nil { return m } // Depth first search of delegations based on order // as presented in current targets file for role: for _, r := range tr.TargetDelegations(role, path, pathHex) { if m := walkTargets(r.Name); m != nil { return m } } return nil } return walkTargets("targets") } // AddTargets will attempt to add the given targets specifically to // the directed role. If the user does not have the signing keys for the role // the function will return an error and the full slice of targets. func (tr *Repo) AddTargets(role string, targets data.Files) (data.Files, error) { t, ok := tr.Targets[role] if !ok { return targets, data.ErrInvalidRole{Role: role} } invalid := make(data.Files) for path, target := range targets { pathDigest := sha256.Sum256([]byte(path)) pathHex := hex.EncodeToString(pathDigest[:]) r := tr.keysDB.GetRole(role) if role == data.ValidRoles["targets"] || (r.CheckPaths(path) || r.CheckPrefixes(pathHex)) { t.Signed.Targets[path] = target } else { invalid[path] = target } } t.Dirty = true if len(invalid) > 0 { return invalid, fmt.Errorf("Could not add all targets") } return nil, nil } // RemoveTargets removes the given target (paths) from the given target role (delegation) func (tr *Repo) RemoveTargets(role string, targets ...string) error { t, ok := tr.Targets[role] if !ok { return data.ErrInvalidRole{Role: role} } for _, path := range targets { delete(t.Signed.Targets, path) } t.Dirty = true return nil } // UpdateSnapshot updates the FileMeta for the given role based on the Signed object func (tr *Repo) UpdateSnapshot(role string, s *data.Signed) error { jsonData, err := json.Marshal(s) if err != nil { return err } meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256") if err != nil { return err } tr.Snapshot.Signed.Meta[role] = meta tr.Snapshot.Dirty = true return nil } // UpdateTimestamp updates the snapshot meta in the timestamp based on the Signed object func (tr *Repo) UpdateTimestamp(s *data.Signed) error { jsonData, err := json.Marshal(s) if err != nil { return err } meta, err := data.NewFileMeta(bytes.NewReader(jsonData), "sha256") if err != nil { return err } tr.Timestamp.Signed.Meta["snapshot"] = meta tr.Timestamp.Dirty = true return nil } // SignRoot signs the root func (tr *Repo) SignRoot(expires time.Time) (*data.Signed, error) { logrus.Debug("signing root...") tr.Root.Signed.Expires = expires tr.Root.Signed.Version++ root := tr.keysDB.GetRole(data.ValidRoles["root"]) signed, err := tr.Root.ToSigned() if err != nil { return nil, err } signed, err = tr.sign(signed, *root) if err != nil { return nil, err } tr.Root.Signatures = signed.Signatures return signed, nil } // SignTargets signs the targets file for the given top level or delegated targets role func (tr *Repo) SignTargets(role string, expires time.Time) (*data.Signed, error) { logrus.Debugf("sign targets called for role %s", role) tr.Targets[role].Signed.Expires = expires tr.Targets[role].Signed.Version++ signed, err := tr.Targets[role].ToSigned() if err != nil { logrus.Debug("errored getting targets data.Signed object") return nil, err } targets := tr.keysDB.GetRole(role) signed, err = tr.sign(signed, *targets) if err != nil { logrus.Debug("errored signing ", role) return nil, err } tr.Targets[role].Signatures = signed.Signatures return signed, nil } // SignSnapshot updates the snapshot based on the current targets and root then signs it func (tr *Repo) SignSnapshot(expires time.Time) (*data.Signed, error) { logrus.Debug("signing snapshot...") signedRoot, err := tr.Root.ToSigned() if err != nil { return nil, err } err = tr.UpdateSnapshot("root", signedRoot) if err != nil { return nil, err } tr.Root.Dirty = false // root dirty until changes captures in snapshot for role, targets := range tr.Targets { signedTargets, err := targets.ToSigned() if err != nil { return nil, err } err = tr.UpdateSnapshot(role, signedTargets) if err != nil { return nil, err } } tr.Snapshot.Signed.Expires = expires tr.Snapshot.Signed.Version++ signed, err := tr.Snapshot.ToSigned() if err != nil { return nil, err } snapshot := tr.keysDB.GetRole(data.ValidRoles["snapshot"]) signed, err = tr.sign(signed, *snapshot) if err != nil { return nil, err } tr.Snapshot.Signatures = signed.Signatures return signed, nil } // SignTimestamp updates the timestamp based on the current snapshot then signs it func (tr *Repo) SignTimestamp(expires time.Time) (*data.Signed, error) { logrus.Debug("SignTimestamp") signedSnapshot, err := tr.Snapshot.ToSigned() if err != nil { return nil, err } err = tr.UpdateTimestamp(signedSnapshot) if err != nil { return nil, err } tr.Timestamp.Signed.Expires = expires tr.Timestamp.Signed.Version++ signed, err := tr.Timestamp.ToSigned() if err != nil { return nil, err } timestamp := tr.keysDB.GetRole(data.ValidRoles["timestamp"]) signed, err = tr.sign(signed, *timestamp) if err != nil { return nil, err } tr.Timestamp.Signatures = signed.Signatures tr.Snapshot.Dirty = false // snapshot is dirty until changes have been captured in timestamp return signed, nil } func (tr Repo) sign(signedData *data.Signed, role data.Role) (*data.Signed, error) { ks := make([]data.PublicKey, 0, len(role.KeyIDs)) for _, kid := range role.KeyIDs { k := tr.keysDB.GetKey(kid) if k == nil { continue } ks = append(ks, k) } if len(ks) < 1 { return nil, keys.ErrInvalidKey } err := signed.Sign(tr.cryptoService, signedData, ks...) if err != nil { return nil, err } return signedData, nil } notary-0.1/tuf/tuf_test.go000066400000000000000000000073631262207326400156650ustar00rootroot00000000000000package tuf import ( "encoding/json" "io/ioutil" "os" "path" "path/filepath" "testing" "github.com/docker/notary/tuf/data" "github.com/docker/notary/tuf/keys" "github.com/docker/notary/tuf/signed" ) func initRepo(t *testing.T, cryptoService signed.CryptoService, keyDB *keys.KeyDB) *Repo { rootKey, err := cryptoService.Create("root", data.ED25519Key) if err != nil { t.Fatal(err) } targetsKey, err := cryptoService.Create("targets", data.ED25519Key) if err != nil { t.Fatal(err) } snapshotKey, err := cryptoService.Create("snapshot", data.ED25519Key) if err != nil { t.Fatal(err) } timestampKey, err := cryptoService.Create("timestamp", data.ED25519Key) if err != nil { t.Fatal(err) } keyDB.AddKey(rootKey) keyDB.AddKey(targetsKey) keyDB.AddKey(snapshotKey) keyDB.AddKey(timestampKey) rootRole := &data.Role{ Name: "root", RootRole: data.RootRole{ KeyIDs: []string{rootKey.ID()}, Threshold: 1, }, } targetsRole := &data.Role{ Name: "targets", RootRole: data.RootRole{ KeyIDs: []string{targetsKey.ID()}, Threshold: 1, }, } snapshotRole := &data.Role{ Name: "snapshot", RootRole: data.RootRole{ KeyIDs: []string{snapshotKey.ID()}, Threshold: 1, }, } timestampRole := &data.Role{ Name: "timestamp", RootRole: data.RootRole{ KeyIDs: []string{timestampKey.ID()}, Threshold: 1, }, } keyDB.AddRole(rootRole) keyDB.AddRole(targetsRole) keyDB.AddRole(snapshotRole) keyDB.AddRole(timestampRole) repo := NewRepo(keyDB, cryptoService) err = repo.InitRepo(false) if err != nil { t.Fatal(err) } return repo } func writeRepo(t *testing.T, dir string, repo *Repo) { //err := os.Remove(dir) //if err != nil { // t.Fatal(err) //} err := os.MkdirAll(dir, 0755) if err != nil { t.Fatal(err) } signedRoot, err := repo.SignRoot(data.DefaultExpires("root")) if err != nil { t.Fatal(err) } rootJSON, _ := json.Marshal(signedRoot) ioutil.WriteFile(dir+"/root.json", rootJSON, 0755) for r := range repo.Targets { signedTargets, err := repo.SignTargets(r, data.DefaultExpires("targets")) if err != nil { t.Fatal(err) } targetsJSON, _ := json.Marshal(signedTargets) p := path.Join(dir, r+".json") parentDir := filepath.Dir(p) os.MkdirAll(parentDir, 0755) ioutil.WriteFile(p, targetsJSON, 0755) } signedSnapshot, err := repo.SignSnapshot(data.DefaultExpires("snapshot")) if err != nil { t.Fatal(err) } snapshotJSON, _ := json.Marshal(signedSnapshot) ioutil.WriteFile(dir+"/snapshot.json", snapshotJSON, 0755) signedTimestamp, err := repo.SignTimestamp(data.DefaultExpires("timestamp")) if err != nil { t.Fatal(err) } timestampJSON, _ := json.Marshal(signedTimestamp) ioutil.WriteFile(dir+"/timestamp.json", timestampJSON, 0755) } func TestInitRepo(t *testing.T) { ed25519 := signed.NewEd25519() keyDB := keys.NewDB() repo := initRepo(t, ed25519, keyDB) writeRepo(t, "/tmp/tufrepo", repo) } func TestUpdateDelegations(t *testing.T) { ed25519 := signed.NewEd25519() keyDB := keys.NewDB() repo := initRepo(t, ed25519, keyDB) testKey, err := ed25519.Create("targets/test", data.ED25519Key) if err != nil { t.Fatal(err) } role, err := data.NewRole("targets/test", 1, []string{testKey.ID()}, []string{"test"}, []string{}) if err != nil { t.Fatal(err) } err = repo.UpdateDelegations(role, data.KeyList{testKey}, "") if err != nil { t.Fatal(err) } testDeepKey, err := ed25519.Create("targets/test/deep", data.ED25519Key) if err != nil { t.Fatal(err) } roleDeep, err := data.NewRole("targets/test/deep", 1, []string{testDeepKey.ID()}, []string{"test/deep"}, []string{}) if err != nil { t.Fatal(err) } err = repo.UpdateDelegations(roleDeep, data.KeyList{testDeepKey}, "") if err != nil { t.Fatal(err) } writeRepo(t, "/tmp/tufdelegation", repo) } notary-0.1/tuf/utils/000077500000000000000000000000001262207326400146305ustar00rootroot00000000000000notary-0.1/tuf/utils/util.go000066400000000000000000000057471262207326400161510ustar00rootroot00000000000000package utils import ( "crypto/hmac" "encoding/hex" "errors" "fmt" gopath "path" "path/filepath" "github.com/docker/notary/trustmanager" "github.com/docker/notary/tuf/data" ) // ErrWrongLength indicates the length was different to that expected var ErrWrongLength = errors.New("wrong length") // ErrWrongHash indicates the hash was different to that expected type ErrWrongHash struct { Type string Expected []byte Actual []byte } // Error implements error interface func (e ErrWrongHash) Error() string { return fmt.Sprintf("wrong %s hash, expected %#x got %#x", e.Type, e.Expected, e.Actual) } // ErrNoCommonHash indicates the metadata did not provide any hashes this // client recognizes type ErrNoCommonHash struct { Expected data.Hashes Actual data.Hashes } // Error implements error interface func (e ErrNoCommonHash) Error() string { types := func(a data.Hashes) []string { t := make([]string, 0, len(a)) for typ := range a { t = append(t, typ) } return t } return fmt.Sprintf("no common hash function, expected one of %s, got %s", types(e.Expected), types(e.Actual)) } // ErrUnknownHashAlgorithm - client was ashed to use a hash algorithm // it is not familiar with type ErrUnknownHashAlgorithm struct { Name string } // Error implements error interface func (e ErrUnknownHashAlgorithm) Error() string { return fmt.Sprintf("unknown hash algorithm: %s", e.Name) } // PassphraseFunc type for func that request a passphrase type PassphraseFunc func(role string, confirm bool) ([]byte, error) // FileMetaEqual checks whether 2 FileMeta objects are consistent with eachother func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { if actual.Length != expected.Length { return ErrWrongLength } hashChecked := false for typ, hash := range expected.Hashes { if h, ok := actual.Hashes[typ]; ok { hashChecked = true if !hmac.Equal(h, hash) { return ErrWrongHash{typ, hash, h} } } } if !hashChecked { return ErrNoCommonHash{expected.Hashes, actual.Hashes} } return nil } // NormalizeTarget adds a slash, if required, to the front of a target path func NormalizeTarget(path string) string { return gopath.Join("/", path) } // HashedPaths prefixes the filename with the known hashes for the file, // returning a list of possible consistent paths. func HashedPaths(path string, hashes data.Hashes) []string { paths := make([]string, 0, len(hashes)) for _, hash := range hashes { hashedPath := filepath.Join(filepath.Dir(path), hex.EncodeToString(hash)+"."+filepath.Base(path)) paths = append(paths, hashedPath) } return paths } // CanonicalKeyID returns the ID of the public bytes version of a TUF key. // On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA // TUF keys, this is the key ID of the public key part of the key. func CanonicalKeyID(k data.PublicKey) (string, error) { switch k.Algorithm() { case data.ECDSAx509Key, data.RSAx509Key: return trustmanager.X509PublicKeyID(k) default: return k.ID(), nil } } notary-0.1/tuf/utils/util_test.go000066400000000000000000000052341262207326400171770ustar00rootroot00000000000000package utils import ( "encoding/hex" "testing" "github.com/docker/notary/tuf/data" "github.com/stretchr/testify/assert" ) func TestFileMetaEqual(t *testing.T) { type test struct { name string b data.FileMeta a data.FileMeta err func(test) error } fileMeta := func(length int64, hashes map[string]string) data.FileMeta { m := data.FileMeta{Length: length, Hashes: make(map[string][]byte, len(hashes))} for typ, hash := range hashes { v, err := hex.DecodeString(hash) assert.NoError(t, err, "hash not in hex") m.Hashes[typ] = v } return m } tests := []test{ { name: "wrong length", a: data.FileMeta{Length: 1}, b: data.FileMeta{Length: 2}, err: func(test) error { return ErrWrongLength }, }, { name: "wrong sha512 hash", a: fileMeta(10, map[string]string{"sha512": "111111"}), b: fileMeta(10, map[string]string{"sha512": "222222"}), err: func(t test) error { return ErrWrongHash{"sha512", t.b.Hashes["sha512"], t.a.Hashes["sha512"]} }, }, { name: "intersecting hashes", a: fileMeta(10, map[string]string{"sha512": "111111", "md5": "222222"}), b: fileMeta(10, map[string]string{"sha512": "111111", "sha256": "333333"}), err: func(test) error { return nil }, }, { name: "no common hashes", a: fileMeta(10, map[string]string{"sha512": "111111"}), b: fileMeta(10, map[string]string{"sha256": "222222", "md5": "333333"}), err: func(t test) error { return ErrNoCommonHash{t.b.Hashes, t.a.Hashes} }, }, } for _, run := range tests { assert.Equal(t, FileMetaEqual(run.a, run.b), run.err(run), "Files not equivalent") } } func TestNormalizeTarget(t *testing.T) { for before, after := range map[string]string{ "": "/", "foo.txt": "/foo.txt", "/bar.txt": "/bar.txt", "foo//bar.txt": "/foo/bar.txt", "/with/./a/dot": "/with/a/dot", "/with/double/../dot": "/with/dot", } { assert.Equal(t, NormalizeTarget(before), after, "Path normalization did not output expected.") } } func TestHashedPaths(t *testing.T) { hexBytes := func(s string) []byte { v, err := hex.DecodeString(s) assert.NoError(t, err, "String was not hex") return v } hashes := data.Hashes{ "sha512": hexBytes("abc123"), "sha256": hexBytes("def456"), } paths := HashedPaths("foo/bar.txt", hashes) // cannot use DeepEquals as the returned order is non-deterministic assert.Len(t, paths, 2, "Expected 2 paths") expected := map[string]struct{}{"foo/abc123.bar.txt": {}, "foo/def456.bar.txt": {}} for _, path := range paths { if _, ok := expected[path]; !ok { t.Fatalf("unexpected path: %s", path) } delete(expected, path) } } notary-0.1/tuf/utils/utils.go000066400000000000000000000047061262207326400163260ustar00rootroot00000000000000package utils import ( "bytes" "crypto/sha256" "crypto/sha512" "crypto/tls" "fmt" "io" "net/http" "net/url" "os" "strings" "github.com/docker/notary/tuf/data" ) // Download does a simple download from a URL func Download(url url.URL) (*http.Response, error) { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} return client.Get(url.String()) } // Upload does a simple JSON upload to a URL func Upload(url string, body io.Reader) (*http.Response, error) { tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} return client.Post(url, "application/json", body) } // ValidateTarget ensures that the data read from reader matches // the known metadata func ValidateTarget(r io.Reader, m *data.FileMeta) error { h := sha256.New() length, err := io.Copy(h, r) if err != nil { return err } if length != m.Length { return fmt.Errorf("Size of downloaded target did not match targets entry.\nExpected: %d\nReceived: %d\n", m.Length, length) } hashDigest := h.Sum(nil) if bytes.Compare(m.Hashes["sha256"], hashDigest[:]) != 0 { return fmt.Errorf("Hash of downloaded target did not match targets entry.\nExpected: %x\nReceived: %x\n", m.Hashes["sha256"], hashDigest) } return nil } // StrSliceContains checks if the given string appears in the slice func StrSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false } // StrSliceContainsI checks if the given string appears in the slice // in a case insensitive manner func StrSliceContainsI(ss []string, s string) bool { s = strings.ToLower(s) for _, v := range ss { v = strings.ToLower(v) if v == s { return true } } return false } // FileExists returns true if a file (or dir) exists at the given path, // false otherwise func FileExists(path string) bool { _, err := os.Stat(path) return os.IsNotExist(err) } // NoopCloser is a simple Reader wrapper that does nothing when Close is // called type NoopCloser struct { io.Reader } // Close does nothing for a NoopCloser func (nc *NoopCloser) Close() error { return nil } // DoHash returns the digest of d using the hashing algorithm named // in alg func DoHash(alg string, d []byte) []byte { switch alg { case "sha256": digest := sha256.Sum256(d) return digest[:] case "sha512": digest := sha512.Sum512(d) return digest[:] } return nil } notary-0.1/utils/000077500000000000000000000000001262207326400140325ustar00rootroot00000000000000notary-0.1/utils/http.go000066400000000000000000000062051262207326400153430ustar00rootroot00000000000000package utils import ( "net/http" "github.com/Sirupsen/logrus" ctxu "github.com/docker/distribution/context" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/auth" "github.com/docker/notary/errors" "github.com/docker/notary/tuf/signed" "github.com/gorilla/mux" "golang.org/x/net/context" ) // contextHandler defines an alterate HTTP handler interface which takes in // a context for authorization and returns an HTTP application error. type contextHandler func(ctx context.Context, w http.ResponseWriter, r *http.Request) error // rootHandler is an implementation of an HTTP request handler which handles // authorization and calling out to the defined alternate http handler. type rootHandler struct { handler contextHandler auth auth.AccessController actions []string context context.Context trust signed.CryptoService //cachePool redis.Pool } // RootHandlerFactory creates a new rootHandler factory using the given // Context creator and authorizer. The returned factory allows creating // new rootHandlers from the alternate http handler contextHandler and // a scope. func RootHandlerFactory(auth auth.AccessController, ctx context.Context, trust signed.CryptoService) func(contextHandler, ...string) *rootHandler { return func(handler contextHandler, actions ...string) *rootHandler { return &rootHandler{ handler: handler, auth: auth, actions: actions, context: ctx, trust: trust, } } } // ServeHTTP serves an HTTP request and implements the http.Handler interface. func (root *rootHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) ctx := ctxu.WithRequest(root.context, r) ctx, w = ctxu.WithResponseWriter(ctx, w) ctx = ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx)) ctx = context.WithValue(ctx, "repo", vars["imageName"]) ctx = context.WithValue(ctx, "cryptoService", root.trust) defer func() { ctxu.GetResponseLogger(ctx).Info("response completed") }() if root.auth != nil { access := buildAccessRecords(vars["imageName"], root.actions...) var authCtx context.Context var err error if authCtx, err = root.auth.Authorized(ctx, access...); err != nil { if err, ok := err.(auth.Challenge); ok { err.ServeHTTP(w, r) w.WriteHeader(http.StatusUnauthorized) return } errcode.ServeJSON(w, v2.ErrorCodeUnauthorized) return } ctx = authCtx } if err := root.handler(ctx, w, r); err != nil { e := errcode.ServeJSON(w, err) if e != nil { logrus.Error(e) } return } } func buildAccessRecords(repo string, actions ...string) []auth.Access { requiredAccess := make([]auth.Access, 0, len(actions)) for _, action := range actions { requiredAccess = append(requiredAccess, auth.Access{ Resource: auth.Resource{ Type: "repository", Name: repo, }, Action: action, }) } return requiredAccess } // NotFoundHandler is used as a generic catch all handler to return the ErrMetadataNotFound // 404 response func NotFoundHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { return errors.ErrMetadataNotFound.WithDetail(nil) } notary-0.1/utils/http_test.go000066400000000000000000000037771262207326400164150ustar00rootroot00000000000000package utils import ( "io/ioutil" "net/http" "net/http/httptest" "strings" "testing" "github.com/docker/notary/tuf/signed" "golang.org/x/net/context" "github.com/docker/notary/errors" ) func MockContextHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { return nil } func MockBetterErrorHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error { return errors.ErrUnknown.WithDetail("Test Error") } func TestRootHandlerFactory(t *testing.T) { hand := RootHandlerFactory(nil, context.Background(), &signed.Ed25519{}) handler := hand(MockContextHandler) if _, ok := interface{}(handler).(http.Handler); !ok { t.Fatalf("A rootHandler must implement the http.Handler interface") } ts := httptest.NewServer(handler) defer ts.Close() res, err := http.Get(ts.URL) if err != nil { t.Fatal(err) } if res.StatusCode != http.StatusOK { t.Fatalf("Expected 200, received %d", res.StatusCode) } } //func TestRootHandlerUnauthorized(t *testing.T) { // hand := RootHandlerFactory(nil, context.Background(), &signed.Ed25519{}) // handler := hand(MockContextHandler) // // ts := httptest.NewServer(handler) // defer ts.Close() // // res, err := http.Get(ts.URL) // if err != nil { // t.Fatal(err) // } // if res.StatusCode != http.StatusUnauthorized { // t.Fatalf("Expected 401, received %d", res.StatusCode) // } //} func TestRootHandlerError(t *testing.T) { hand := RootHandlerFactory(nil, context.Background(), &signed.Ed25519{}) handler := hand(MockBetterErrorHandler) ts := httptest.NewServer(handler) defer ts.Close() res, err := http.Get(ts.URL) if res.StatusCode != http.StatusInternalServerError { t.Fatalf("Expected 500, received %d", res.StatusCode) } content, err := ioutil.ReadAll(res.Body) if err != nil { t.Fatal(err) } contentStr := strings.Trim(string(content), "\r\n\t ") if strings.TrimSpace(contentStr) != `{"errors":[{"code":"UNKNOWN","message":"unknown error","detail":"Test Error"}]}` { t.Fatalf("Error Body Incorrect: `%s`", content) } } notary-0.1/utils/tls_config.go000066400000000000000000000072541262207326400165200ustar00rootroot00000000000000package utils import ( "crypto/rand" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" ) // Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) var clientCipherSuites = []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, } // Server TLS cipher suites var serverCipherSuites = append(clientCipherSuites, []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_128_CBC_SHA, }...) func poolFromFile(filename string) (*x509.CertPool, error) { pemBytes, err := ioutil.ReadFile(filename) if err != nil { return nil, err } pool := x509.NewCertPool() if ok := pool.AppendCertsFromPEM(pemBytes); !ok { return nil, fmt.Errorf( "Unable to parse certificates from %s", filename) } if len(pool.Subjects()) == 0 { return nil, fmt.Errorf( "No certificates parsed from %s", filename) } return pool, nil } // ServerTLSOpts generates a tls configuration for servers using the // provided parameters. type ServerTLSOpts struct { ServerCertFile string ServerKeyFile string RequireClientAuth bool ClientCAFile string } // ConfigureServerTLS specifies a set of ciphersuites, the server cert and key, // and optionally client authentication. Note that a tls configuration is // constructed that either requires and verifies client authentication or // doesn't deal with client certs at all. Nothing in the middle. // // Also note that if the client CA file contains invalid data, behavior is not // guaranteed. Currently (as of Go 1.5.1) only the valid certificates up to // the bad data will be parsed and added the client CA pool. func ConfigureServerTLS(opts *ServerTLSOpts) (*tls.Config, error) { keypair, err := tls.LoadX509KeyPair( opts.ServerCertFile, opts.ServerKeyFile) if err != nil { return nil, err } tlsConfig := &tls.Config{ MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CipherSuites: serverCipherSuites, Certificates: []tls.Certificate{keypair}, Rand: rand.Reader, } if opts.RequireClientAuth { tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } if opts.ClientCAFile != "" { pool, err := poolFromFile(opts.ClientCAFile) if err != nil { return nil, err } tlsConfig.ClientCAs = pool } return tlsConfig, nil } // ClientTLSOpts is a struct that contains options to pass to // ConfigureClientTLS type ClientTLSOpts struct { RootCAFile string ServerName string InsecureSkipVerify bool ClientCertFile string ClientKeyFile string } // ConfigureClientTLS generates a tls configuration for clients using the // provided parameters. /// // Note that if the root CA file contains invalid data, behavior is not // guaranteed. Currently (as of Go 1.5.1) only the valid certificates up to // the bad data will be parsed and added the root CA pool. func ConfigureClientTLS(opts *ClientTLSOpts) (*tls.Config, error) { tlsConfig := &tls.Config{ InsecureSkipVerify: opts.InsecureSkipVerify, MinVersion: tls.VersionTLS12, CipherSuites: clientCipherSuites, ServerName: opts.ServerName, } if opts.RootCAFile != "" { pool, err := poolFromFile(opts.RootCAFile) if err != nil { return nil, err } tlsConfig.RootCAs = pool } if opts.ClientCertFile != "" || opts.ClientKeyFile != "" { keypair, err := tls.LoadX509KeyPair( opts.ClientCertFile, opts.ClientKeyFile) if err != nil { return nil, err } tlsConfig.Certificates = []tls.Certificate{keypair} } return tlsConfig, nil } notary-0.1/utils/tls_config_test.go000066400000000000000000000202301262207326400175440ustar00rootroot00000000000000package utils import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "io/ioutil" "os" "testing" "github.com/docker/notary/trustmanager" "github.com/stretchr/testify/assert" ) const ( ServerCert = "../fixtures/notary-server.crt" ServerKey = "../fixtures/notary-server.key" RootCA = "../fixtures/root-ca.crt" ) // generates a multiple-certificate file with both RSA and ECDSA certs and // returns the filename so that cleanup can be deferred. func generateMultiCert(t *testing.T) string { tempFile, err := ioutil.TempFile("/tmp", "cert-test") defer tempFile.Close() assert.NoError(t, err) rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) assert.NoError(t, err) ecKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) assert.NoError(t, err) template, err := trustmanager.NewCertificate("gun") assert.NoError(t, err) for _, key := range []crypto.Signer{rsaKey, ecKey} { derBytes, err := x509.CreateCertificate( rand.Reader, template, template, key.Public(), key) assert.NoError(t, err) cert, err := x509.ParseCertificate(derBytes) assert.NoError(t, err) pemBytes := trustmanager.CertToPEM(cert) nBytes, err := tempFile.Write(pemBytes) assert.NoError(t, err) assert.Equal(t, nBytes, len(pemBytes)) } return tempFile.Name() } // If the cert files and directory are provided but are invalid, an error is // returned. func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { for i := 0; i < 3; i++ { files := []string{ServerCert, ServerKey, RootCA} files[i] = "not-real-file" result, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: files[0], ServerKeyFile: files[1], RequireClientAuth: true, ClientCAFile: files[2], }) assert.Nil(t, result) assert.Error(t, err) } } // If server cert and key are provided, and client auth is disabled, then // a valid tls.Config is returned with ClientAuth set to NoClientCert func TestConfigServerTLSServerCertsOnly(t *testing.T) { keypair, err := tls.LoadX509KeyPair(ServerCert, ServerKey) assert.NoError(t, err) tlsConfig, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: ServerCert, ServerKeyFile: ServerKey, }) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.True(t, tlsConfig.PreferServerCipherSuites) assert.Equal(t, tls.NoClientCert, tlsConfig.ClientAuth) assert.Nil(t, tlsConfig.ClientCAs) } // If a valid client cert file is provided, but it contains no client // certs, an error is returned. func TestConfigServerTLSWithEmptyCACertFile(t *testing.T) { tempFile, err := ioutil.TempFile("/tmp", "cert-test") assert.NoError(t, err) defer os.RemoveAll(tempFile.Name()) tempFile.Close() tlsConfig, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: ServerCert, ServerKeyFile: ServerKey, ClientCAFile: tempFile.Name(), }) assert.Nil(t, tlsConfig) assert.Error(t, err) } // If server cert and key are provided, and client cert file is provided with // one cert, a valid tls.Config is returned with the clientCAs set to that // cert. func TestConfigServerTLSWithOneCACert(t *testing.T) { keypair, err := tls.LoadX509KeyPair(ServerCert, ServerKey) assert.NoError(t, err) tlsConfig, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: ServerCert, ServerKeyFile: ServerKey, ClientCAFile: RootCA, }) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.True(t, tlsConfig.PreferServerCipherSuites) assert.Equal(t, tls.NoClientCert, tlsConfig.ClientAuth) assert.Len(t, tlsConfig.ClientCAs.Subjects(), 1) } // If server cert and key are provided, and client cert file is provided with // multiple certs, a valid tls.Config is returned with the clientCAs set to // the valid cert. func TestConfigServerTLSWithMultipleCACerts(t *testing.T) { tempFilename := generateMultiCert(t) defer os.RemoveAll(tempFilename) keypair, err := tls.LoadX509KeyPair(ServerCert, ServerKey) assert.NoError(t, err) tlsConfig, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: ServerCert, ServerKeyFile: ServerKey, ClientCAFile: tempFilename, }) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.True(t, tlsConfig.PreferServerCipherSuites) assert.Equal(t, tls.NoClientCert, tlsConfig.ClientAuth) assert.Len(t, tlsConfig.ClientCAs.Subjects(), 2) } // If server cert and key are provided, and client auth is disabled, then // a valid tls.Config is returned with ClientAuth set to // RequireAndVerifyClientCert func TestConfigServerTLSClientAuthEnabled(t *testing.T) { keypair, err := tls.LoadX509KeyPair(ServerCert, ServerKey) assert.NoError(t, err) tlsConfig, err := ConfigureServerTLS(&ServerTLSOpts{ ServerCertFile: ServerCert, ServerKeyFile: ServerKey, RequireClientAuth: true, }) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.True(t, tlsConfig.PreferServerCipherSuites) assert.Equal(t, tls.RequireAndVerifyClientCert, tlsConfig.ClientAuth) assert.Nil(t, tlsConfig.ClientCAs) } // The skipVerify boolean gets set on the tls.Config's InsecureSkipBoolean func TestConfigClientTLSNoVerify(t *testing.T) { for _, skip := range []bool{true, false} { tlsConfig, err := ConfigureClientTLS( &ClientTLSOpts{InsecureSkipVerify: skip}) assert.NoError(t, err) assert.Nil(t, tlsConfig.Certificates) assert.Equal(t, skip, tlsConfig.InsecureSkipVerify) assert.Equal(t, "", tlsConfig.ServerName) assert.Nil(t, tlsConfig.RootCAs) } } // The skipVerify boolean gets set on the tls.Config's InsecureSkipBoolean func TestConfigClientServerName(t *testing.T) { for _, name := range []string{"", "myname"} { tlsConfig, err := ConfigureClientTLS(&ClientTLSOpts{ServerName: name}) assert.NoError(t, err) assert.Nil(t, tlsConfig.Certificates) assert.Equal(t, false, tlsConfig.InsecureSkipVerify) assert.Equal(t, name, tlsConfig.ServerName) assert.Nil(t, tlsConfig.RootCAs) } } // The RootCA is set if the file provided has a single CA cert. func TestConfigClientTLSRootCAFileWithOneCert(t *testing.T) { tlsConfig, err := ConfigureClientTLS(&ClientTLSOpts{RootCAFile: RootCA}) assert.NoError(t, err) assert.Nil(t, tlsConfig.Certificates) assert.Equal(t, false, tlsConfig.InsecureSkipVerify) assert.Equal(t, "", tlsConfig.ServerName) assert.Len(t, tlsConfig.RootCAs.Subjects(), 1) } // If the root CA file provided has multiple CA certs, only the valid certs // are read. func TestConfigClientTLSRootCAFileMultipleCerts(t *testing.T) { tempFilename := generateMultiCert(t) defer os.RemoveAll(tempFilename) tlsConfig, err := ConfigureClientTLS( &ClientTLSOpts{RootCAFile: tempFilename}) assert.NoError(t, err) assert.Nil(t, tlsConfig.Certificates) assert.Equal(t, false, tlsConfig.InsecureSkipVerify) assert.Equal(t, "", tlsConfig.ServerName) assert.Len(t, tlsConfig.RootCAs.Subjects(), 2) } // An error is returned if a root CA is provided but the file doesn't exist. func TestConfigClientTLSNonexistentRootCAFile(t *testing.T) { tlsConfig, err := ConfigureClientTLS( &ClientTLSOpts{RootCAFile: "not-a-file"}) assert.Error(t, err) assert.Nil(t, tlsConfig) } // An error is returned if either the client cert or the key are provided // but invalid or blank. func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { for i := 0; i < 2; i++ { for _, invalid := range []string{"not-a-file", ""} { files := []string{ServerCert, ServerKey} files[i] = invalid tlsConfig, err := ConfigureClientTLS(&ClientTLSOpts{ ClientCertFile: files[0], ClientKeyFile: files[1]}) assert.Error(t, err) assert.Nil(t, tlsConfig) } } } // The certificate is set if the client cert and client key are provided and // valid. func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { keypair, err := tls.LoadX509KeyPair(ServerCert, ServerKey) assert.NoError(t, err) tlsConfig, err := ConfigureClientTLS(&ClientTLSOpts{ ClientCertFile: ServerCert, ClientKeyFile: ServerKey}) assert.NoError(t, err) assert.Equal(t, []tls.Certificate{keypair}, tlsConfig.Certificates) assert.Equal(t, false, tlsConfig.InsecureSkipVerify) assert.Equal(t, "", tlsConfig.ServerName) assert.Nil(t, tlsConfig.RootCAs) } notary-0.1/version/000077500000000000000000000000001262207326400143575ustar00rootroot00000000000000notary-0.1/version/version.go000066400000000000000000000003051262207326400163710ustar00rootroot00000000000000package version // NotaryVersion indicates which version of the binary is running. var NotaryVersion string // GitCommit indicates which git hash the binary was built off of var GitCommit string