pax_global_header00006660000000000000000000000064135454646710014531gustar00rootroot0000000000000052 comment=b61f114fd73b6ee379be34c554157b0684a86a98 image-4.0.1/000077500000000000000000000000001354546467100126155ustar00rootroot00000000000000image-4.0.1/.gitignore000066400000000000000000000001431354546467100146030ustar00rootroot00000000000000vendor tools.timestamp # Idea IDE *.iml .idea # Visual Studio Code .vscode/* # trash trash.lock image-4.0.1/.pullapprove.yml000066400000000000000000000003221354546467100157640ustar00rootroot00000000000000approve_by_comment: true approve_regex: '^(Approved|lgtm|LGTM|:shipit:|:star:|:\+1:|:ship:)' reject_regex: ^Rejected reset_on_push: false reviewers: teams: - image-maintainers name: default required: 2 image-4.0.1/.travis.Dockerfile000066400000000000000000000006021354546467100161720ustar00rootroot00000000000000FROM ubuntu:18.04 RUN apt-get -qq update && \ apt-get install -y sudo docker.io git make btrfs-tools libdevmapper-dev libgpgme-dev libostree-dev ADD https://storage.googleapis.com/golang/go1.11.12.linux-amd64.tar.gz /tmp RUN tar -C /usr/local -xzf /tmp/go1.11.12.linux-amd64.tar.gz && \ rm /tmp/go1.11.12.linux-amd64.tar.gz && \ ln -s /usr/local/go/bin/* /usr/local/bin/ image-4.0.1/.travis.yml000066400000000000000000000020611354546467100147250ustar00rootroot00000000000000--- language: go sudo: required notifications: email: false dist: trusty services: - docker os: - linux before_install: - sudo docker build -t image-test -f .travis.Dockerfile . - sudo chown -R $(id -u):$(id -g) $HOME/gopath env: - BUILDTAGS='btrfs_noversion libdm_no_deferred_remove' - BUILDTAGS='btrfs_noversion libdm_no_deferred_remove containers_image_openpgp' script: > sudo docker run --privileged -ti --rm --user $(id -u):$(id -g) -e TRAVIS=$TRAVIS -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG -e TRAVIS_BRANCH=$TRAVIS_BRANCH -e TRAVIS_COMMIT=$TRAVIS_COMMIT -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache -e GOCACHE=/tmp/gocache -v /etc/passwd:/etc/passwd -v /etc/sudoers:/etc/sudoers -v /etc/sudoers.d:/etc/sudoers.d -v /var/run:/var/run:z -v $HOME/gopath:/gopath:Z -w /gopath/src/github.com/containers/image image-test bash -c "PATH=$PATH:/gopath/bin make cross tools .gitvalidation validate test test-skopeo SUDO=sudo BUILDTAGS=\"$BUILDTAGS\"" image-4.0.1/CONTRIBUTING.md000066400000000000000000000127431354546467100150550ustar00rootroot00000000000000# Contributing to Containers/Image We'd love to have you join the community! Below summarizes the processes that we follow. ## Topics * [Reporting Issues](#reporting-issues) * [Submitting Pull Requests](#submitting-pull-requests) * [Communications](#communications) ## Reporting Issues Before reporting an issue, check our backlog of [open issues](https://github.com/containers/image/issues) to see if someone else has already reported it. If so, feel free to add your scenario, or additional information, to the discussion. Or simply "subscribe" to it to be notified when it is updated. If you find a new issue with the project we'd love to hear about it! The most important aspect of a bug report is that it includes enough information for us to reproduce it. So, please include as much detail as possible and try to remove the extra stuff that doesn't really relate to the issue itself. The easier it is for us to reproduce it, the faster it'll be fixed! Please don't include any private/sensitive information in your issue! ## Submitting Pull Requests No Pull Request (PR) is too small! Typos, additional comments in the code, new testcases, bug fixes, new features, more documentation, ... it's all welcome! While bug fixes can first be identified via an "issue", that is not required. It's ok to just open up a PR with the fix, but make sure you include the same information you would have included in an issue - like how to reproduce it. PRs for new features should include some background on what use cases the new code is trying to address. When possible and when it makes sense, try to break-up larger PRs into smaller ones - it's easier to review smaller code changes. But only if those smaller ones make sense as stand-alone PRs. Regardless of the type of PR, all PRs should include: * well documented code changes * additional testcases. Ideally, they should fail w/o your code change applied * documentation changes Squash your commits into logical pieces of work that might want to be reviewed separate from the rest of the PRs. Ideally, each commit should implement a single idea, and the PR branch should pass the tests at every commit. GitHub makes it easy to review the cumulative effect of many commits; so, when in doubt, use smaller commits. PRs that fix issues should include a reference like `Closes #XXXX` in the commit message so that github will automatically close the referenced issue when the PR is merged. ### Sign your PRs The sign-off is a line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. ## Communications For general questions, or discussions, please use the IRC group on `irc.freenode.net` called `container-projects` that has been setup. For discussions around issues/bugs and features, you can use the github [issues](https://github.com/containers/image/issues) and [PRs](https://github.com/containers/image/pulls) tracking system. image-4.0.1/LICENSE000066400000000000000000000247501354546467100136320ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. image-4.0.1/MAINTAINERS000066400000000000000000000004331354546467100143120ustar00rootroot00000000000000Antonio Murdaca (@runcom) Brandon Philips (@philips) Miloslav Trmac (@mtrmac) Dan Walsh (@dwalsh) Nalin Dahyabhai (@nalind) Valentin Rothberg (@vrothberg) image-4.0.1/Makefile000066400000000000000000000101631354546467100142560ustar00rootroot00000000000000.PHONY: all tools test validate lint .gitvalidation fmt export GOPROXY=https://proxy.golang.org # Which github repository and branch to use for testing with skopeo SKOPEO_REPO = containers/skopeo SKOPEO_BRANCH = master # Set SUDO=sudo to run container integration tests using sudo. SUDO = # when cross compiling _for_ a Darwin or windows host, then we must use openpgp BUILD_TAGS_WINDOWS_CROSS = containers_image_openpgp BUILD_TAGS_DARWIN_CROSS = containers_image_openpgp BUILDTAGS = btrfs_noversion libdm_no_deferred_remove BUILDFLAGS := -tags "$(BUILDTAGS)" PACKAGES := $(shell GO111MODULE=on go list $(BUILDFLAGS) ./...) SOURCE_DIRS = $(shell echo $(PACKAGES) | awk 'BEGIN{FS="/"; RS=" "}{print $$4}' | uniq) PREFIX ?= ${DESTDIR}/usr MANINSTALLDIR=${PREFIX}/share/man GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man') MANPAGES_MD = $(wildcard docs/*.5.md) MANPAGES ?= $(MANPAGES_MD:%.md=%) # On macOS, (brew install gpgme) installs it within /usr/local, but /usr/local/include is not in the default search path. # Rather than hard-code this directory, use gpgme-config. Sadly that must be done at the top-level user # instead of locally in the gpgme subpackage, because cgo supports only pkg-config, not general shell scripts, # and gpgme does not install a pkg-config file. # If gpgme is not installed or gpgme-config can’t be found for other reasons, the error is silently ignored # (and the user will probably find out because the cgo compilation will fail). GPGME_ENV = CGO_CFLAGS="$(shell gpgme-config --cflags 2>/dev/null)" CGO_LDFLAGS="$(shell gpgme-config --libs 2>/dev/null)" all: tools test validate .gitvalidation build: $(GPGME_ENV) GO111MODULE="on" go build $(BUILDFLAGS) ./... $(MANPAGES): %: %.md $(GOMD2MAN) -in $< -out $@ docs: $(MANPAGES) install-docs: docs install -d -m 755 ${MANINSTALLDIR}/man5 install -m 644 docs/*.5 ${MANINSTALLDIR}/man5/ install: install-docs cross: GOOS=windows $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_WINDOWS_CROSS)" GOOS=darwin $(MAKE) build BUILDTAGS="$(BUILDTAGS) $(BUILD_TAGS_DARWIN_CROSS)" tools: tools.timestamp tools.timestamp: Makefile @GO111MODULE="off" go get -u $(BUILDFLAGS) golang.org/x/lint/golint @GO111MODULE="off" go get $(BUILDFLAGS) github.com/vbatts/git-validation @touch tools.timestamp clean: rm -rf tools.timestamp $(MANPAGES) test: @$(GPGME_ENV) GO111MODULE="on" go test $(BUILDFLAGS) -cover ./... # This is not run as part of (make all), but Travis CI does run this. # Demonstrating a working version of skopeo (possibly with modified SKOPEO_REPO/SKOPEO_BRANCH, e.g. # make test-skopeo SKOPEO_REPO=runcom/skopeo-1 SKOPEO_BRANCH=oci-3 SUDO=sudo # ) is a requirement before merging; note that Travis will only test # the master branch of the upstream repo. test-skopeo: @echo === Testing skopeo build @project_path=$$(pwd) && export GOPATH=$$(mktemp -d) && \ skopeo_path=$${GOPATH}/src/github.com/containers/skopeo && \ vendor_path=$${skopeo_path}/vendor/github.com/containers/image && \ git clone -b $(SKOPEO_BRANCH) https://github.com/$(SKOPEO_REPO) $${skopeo_path} && \ cd $${skopeo_path} && \ GO111MODULE="on" go mod edit -replace github.com/containers/image=$${project_path} && \ make vendor && \ make BUILDTAGS="$(BUILDTAGS)" binary-local test-all-local && \ $(SUDO) make BUILDTAGS="$(BUILDTAGS)" check && \ rm -rf $${skopeo_path} fmt: @gofmt -l -s -w $(SOURCE_DIRS) validate: lint @GO111MODULE="on" go vet ./... @test -z "$$(gofmt -s -l . | grep -ve '^vendor' | tee /dev/stderr)" lint: @out="$$(GO111MODULE="on" golint $(PACKAGES))"; \ if [ -n "$$out" ]; then \ echo "$$out"; \ exit 1; \ fi # When this is running in travis, it will only check the travis commit range .gitvalidation: @which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found. Consider 'make clean && make tools'" && false) ifeq ($(TRAVIS),true) git-validation -q -run DCO,short-subject,dangling-whitespace else git fetch -q "https://github.com/containers/image.git" "refs/heads/master" upstream="$$(git rev-parse --verify FETCH_HEAD)" ; \ git-validation -q -run DCO,short-subject,dangling-whitespace -range $$upstream..HEAD endif image-4.0.1/README.md000066400000000000000000000073001354546467100140740ustar00rootroot00000000000000[![GoDoc](https://godoc.org/github.com/containers/image?status.svg)](https://godoc.org/github.com/containers/image) [![Build Status](https://travis-ci.org/containers/image.svg?branch=master)](https://travis-ci.org/containers/image) = `image` is a set of Go libraries aimed at working in various way with containers' images and container image registries. The containers/image library allows application to pull and push images from container image registries, like the upstream docker registry. It also implements "simple image signing". The containers/image library also allows you to inspect a repository on a container registry without pulling down the image. This means it fetches the repository's manifest and it is able to show you a `docker inspect`-like json output about a whole repository or a tag. This library, in contrast to `docker inspect`, helps you gather useful information about a repository or a tag without requiring you to run `docker pull`. The containers/image library also allows you to translate from one image format to another, for example docker container images to OCI images. It also allows you to copy container images between various registries, possibly converting them as necessary, and to sign and verify images. ## Command-line usage The containers/image project is only a library with no user interface; you can either incorporate it into your Go programs, or use the `skopeo` tool: The [skopeo](https://github.com/containers/skopeo) tool uses the containers/image library and takes advantage of many of its features, e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality. ## Dependencies This library ships as a [Go module]. ## Building If you want to see what the library can do, or an example of how it is called, consider starting with the [skopeo](https://github.com/containers/skopeo) tool instead. To integrate this library into your project, include it as a [Go module], put it into `$GOPATH` or use your preferred vendoring tool to include a copy in your project. Ensure that the dependencies documented [in go.mod][go.mod] are also available (using those exact versions or different versions of your choosing). This library, by default, also depends on the GpgME and libostree C libraries. Either install them: ```sh Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel macOS$ brew install gpgme ``` or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`) [Go module]: https://github.com/golang/go/wiki/Modules [go.mod]: https://github.com/containers/image/blob/master/go.mod ### Supported build tags - `containers_image_openpgp`: Use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation; the primary downside is that creating new signatures with the Golang-only implementation is not supported. - `containers_image_ostree`: Import `ostree:` transport in `github.com/containers/image/transports/alltransports`. This builds the library requiring the `libostree` development libraries. Otherwise a stub which reports that the transport is not supported gets used. The `github.com/containers/image/ostree` package is completely disabled and impossible to import when this build tag is not in use. ## [Contributing](CONTRIBUTING.md) Information about contributing to this project. When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation. ## License Apache License 2.0 SPDX-License-Identifier: Apache-2.0 ## Contact - Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev) - IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net image-4.0.1/copy/000077500000000000000000000000001354546467100135675ustar00rootroot00000000000000image-4.0.1/copy/copy.go000066400000000000000000001204211354546467100150700ustar00rootroot00000000000000package copy import ( "bytes" "context" "fmt" "io" "io/ioutil" "os" "reflect" "runtime" "strings" "sync" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache" "github.com/containers/image/v4/pkg/compression" "github.com/containers/image/v4/signature" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb" "github.com/vbauerster/mpb/decor" "golang.org/x/crypto/ssh/terminal" "golang.org/x/sync/semaphore" ) type digestingReader struct { source io.Reader digester digest.Digester expectedDigest digest.Digest validationFailed bool validationSucceeded bool } // maxParallelDownloads is used to limit the maxmimum number of parallel // downloads. Let's follow Firefox by limiting it to 6. var maxParallelDownloads = 6 // compressionBufferSize is the buffer size used to compress a blob var compressionBufferSize = 1048576 // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest. // (neither is set if EOF is never reached). func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { if err := expectedDigest.Validate(); err != nil { return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) } digestAlgorithm := expectedDigest.Algorithm() if !digestAlgorithm.Available() { return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) } return &digestingReader{ source: source, digester: digestAlgorithm.Digester(), expectedDigest: expectedDigest, validationFailed: false, }, nil } func (d *digestingReader) Read(p []byte) (int, error) { n, err := d.source.Read(p) if n > 0 { if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { // Coverage: This should not happen, the hash.Hash interface requires // d.digest.Write to never return an error, and the io.Writer interface // requires n2 == len(input) if no error is returned. return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) } } if err == io.EOF { actualDigest := d.digester.Digest() if actualDigest != d.expectedDigest { d.validationFailed = true return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) } d.validationSucceeded = true } return n, err } // copier allows us to keep track of diffID values for blobs, and other // data shared across one or more images in a possible manifest list. type copier struct { dest types.ImageDestination rawSource types.ImageSource reportWriter io.Writer progressOutput io.Writer progressInterval time.Duration progress chan types.ProgressProperties blobInfoCache types.BlobInfoCache copyInParallel bool compressionFormat compression.Algorithm compressionLevel *int } // imageCopier tracks state specific to a single image (possibly an item of a manifest list) type imageCopier struct { c *copier manifestUpdates *types.ManifestUpdateOptions src types.Image diffIDsAreNeeded bool canModifyManifest bool canSubstituteBlobs bool } // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), ReportWriter io.Writer SourceCtx *types.SystemContext DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type ForceManifestMIMEType string } // Image copies image from srcRef to destRef, using policyContext to validate // source image admissibility. It returns the manifest which was written to // the new copy of the image. func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) { // NOTE this function uses an output parameter for the error return value. // Setting this and returning is the ideal way to return an error. // // the defers in this routine will wrap the error return with its own errors // which can be valuable context in the middle of a multi-streamed copy. if options == nil { options = &Options{} } reportWriter := ioutil.Discard if options.ReportWriter != nil { reportWriter = options.ReportWriter } dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx) if err != nil { return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) } defer func() { if err := dest.Close(); err != nil { retErr = errors.Wrapf(retErr, " (dest: %v)", err) } }() rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx) if err != nil { return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) } defer func() { if err := rawSource.Close(); err != nil { retErr = errors.Wrapf(retErr, " (src: %v)", err) } }() // If reportWriter is not a TTY (e.g., when piping to a file), do not // print the progress bars to avoid long and hard to parse output. // createProgressBar() will print a single line instead. progressOutput := reportWriter if !isTTY(reportWriter) { progressOutput = ioutil.Discard } copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() c := &copier{ dest: dest, rawSource: rawSource, reportWriter: reportWriter, progressOutput: progressOutput, progressInterval: options.ProgressInterval, progress: options.Progress, copyInParallel: copyInParallel, // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually // we might want to add a separate CommonCtx — or would that be too confusing? blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx), } // Default to using gzip compression unless specified otherwise. if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil { algo, err := compression.AlgorithmByName("gzip") if err != nil { return nil, err } c.compressionFormat = algo } else { c.compressionFormat = *options.DestinationCtx.CompressionFormat } if options.DestinationCtx != nil { // Note that the compressionLevel can be nil. c.compressionLevel = options.DestinationCtx.CompressionLevel } unparsedToplevel := image.UnparsedInstance(rawSource, nil) multiImage, err := isMultiImage(ctx, unparsedToplevel) if err != nil { return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) } if !multiImage { // The simple case: Just copy a single image. if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil { return nil, err } } else { // This is a manifest list. Choose a single image and copy it. // FIXME: Copy to destinations which support manifest lists, one image at a time. instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel) if err != nil { return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) } logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil { return nil, err } } if err := c.dest.Commit(ctx); err != nil { return nil, errors.Wrap(err, "Error committing the finished image") } return manifest, nil } // Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate // source image admissibility. func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) { // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. // Make sure we fail cleanly in such cases. multiImage, err := isMultiImage(ctx, unparsedImage) if err != nil { // FIXME FIXME: How to name a reference for the sub-image? return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) } if multiImage { return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") } // Please keep this policy check BEFORE reading any other information about the image. // (the multiImage check above only matches the MIME type, which we have received anyway. // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. return nil, errors.Wrap(err, "Source image rejected") } src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage) if err != nil { return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } // If the destination is a digested reference, make a note of that, determine what digest value we're // expecting, and check that the source manifest matches it. destIsDigestedReference := false if named := c.dest.Reference().DockerReference(); named != nil { if digested, ok := named.(reference.Digested); ok { destIsDigestedReference = true sourceManifest, _, err := src.Manifest(ctx) if err != nil { return nil, errors.Wrapf(err, "Error reading manifest from source image") } matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest()) if err != nil { return nil, errors.Wrapf(err, "Error computing digest of source image's manifest") } if !matches { return nil, errors.New("Digest of source image's manifest would not match destination reference") } } } if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil { return nil, err } var sigs [][]byte if options.RemoveSignatures { sigs = [][]byte{} } else { c.Printf("Getting image source signatures\n") s, err := src.Signatures(ctx) if err != nil { return nil, errors.Wrap(err, "Error reading signatures") } sigs = s } if len(sigs) != 0 { c.Printf("Checking if image destination supports signatures\n") if err := c.dest.SupportsSignatures(ctx); err != nil { return nil, errors.Wrap(err, "Can not copy signatures") } } ic := imageCopier{ c: c, manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, src: src, // diffIDsAreNeeded is computed later canModifyManifest: len(sigs) == 0 && !destIsDigestedReference, } // Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. // This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path: // The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended. // We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk // that the compressed version coming from a third party may be designed to attack some other decompressor implementation, // and we would reuse and sign it. ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == "" if err := ic.updateEmbeddedDockerReference(); err != nil { return nil, err } // We compute preferredManifestMIMEType only to show it in error messages. // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) if err != nil { return nil, err } // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) if err := ic.copyLayers(ctx); err != nil { return nil, err } // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support // without actually trying to upload something and getting a types.ManifestTypeRejectedError. // So, try the preferred manifest MIME type. If the process succeeds, fine… manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx) if err != nil { logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { // We don’t have other options. // In principle the code below would handle this as well, but the resulting error message is fairly ugly. // Don’t bother the user with MIME types if we have no choice. return nil, err } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. if !ic.canModifyManifest { return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") } // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} for _, manifestMIMEType := range otherManifestMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, err := ic.copyUpdatedConfigAndManifest(ctx) if err != nil { logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) continue } // We have successfully uploaded a manifest. manifestBytes = attemptedManifest errs = nil // Mark this as a success so that we don't abort below. break } if errs != nil { return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) } } if options.SignBy != "" { newSig, err := c.createSignature(manifestBytes, options.SignBy) if err != nil { return nil, err } sigs = append(sigs, newSig) } c.Printf("Storing signatures\n") if err := c.dest.PutSignatures(ctx, sigs); err != nil { return nil, errors.Wrap(err, "Error writing signatures") } return manifestBytes, nil } // Printf writes a formatted string to c.reportWriter. // Note that the method name Printf is not entirely arbitrary: (go tool vet) // has a built-in list of functions/methods (whatever object they are for) // which have their format strings checked; for other names we would have // to pass a parameter to every (go tool vet) invocation. func (c *copier) Printf(format string, a ...interface{}) { fmt.Fprintf(c.reportWriter, format, a...) } func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error { if dest.MustMatchRuntimeOS() { wantedOS := runtime.GOOS if sys != nil && sys.OSChoice != "" { wantedOS = sys.OSChoice } c, err := src.OCIConfig(ctx) if err != nil { return errors.Wrapf(err, "Error parsing image configuration") } osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) if wantedOS == "windows" && c.OS == "linux" { return osErr } else if wantedOS != "windows" && c.OS == "windows" { return osErr } } return nil } // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. func (ic *imageCopier) updateEmbeddedDockerReference() error { if ic.c.dest.IgnoresEmbeddedDockerReference() { return nil // Destination would prefer us not to update the embedded reference. } destRef := ic.c.dest.Reference().DockerReference() if destRef == nil { return nil // Destination does not care about Docker references } if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { return nil // No reference embedded in the manifest, or it matches destRef already. } if !ic.canModifyManifest { return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", transports.ImageName(ic.c.dest.Reference()), destRef.String()) } ic.manifestUpdates.EmbeddedDockerReference = destRef return nil } // isTTY returns true if the io.Writer is a file and a tty. func isTTY(w io.Writer) bool { if f, ok := w.(*os.File); ok { return terminal.IsTerminal(int(f.Fd())) } return false } // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers(ctx context.Context) error { srcInfos := ic.src.LayerInfos() numLayers := len(srcInfos) updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx) if err != nil { return err } srcInfosUpdated := false if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { if !ic.canModifyManifest { return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") } srcInfos = updatedSrcInfos srcInfosUpdated = true } type copyLayerData struct { destInfo types.BlobInfo diffID digest.Digest err error } // copyGroup is used to determine if all layers are copied copyGroup := sync.WaitGroup{} copyGroup.Add(numLayers) // copySemaphore is used to limit the number of parallel downloads to // avoid malicious images causing troubles and to be nice to servers. var copySemaphore *semaphore.Weighted if ic.c.copyInParallel { copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads)) } else { copySemaphore = semaphore.NewWeighted(int64(1)) } data := make([]copyLayerData, numLayers) copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) { defer copySemaphore.Release(1) defer copyGroup.Done() cld := copyLayerData{} if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. if ic.diffIDsAreNeeded { cld.err = errors.New("getting DiffID for foreign layers is unimplemented") } else { cld.destInfo = srcLayer logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } } else { cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool) } data[index] = cld } func() { // A scope for defer progressPool, progressCleanup := ic.c.newProgressPool(ctx) defer progressCleanup() for i, srcLayer := range srcInfos { copySemaphore.Acquire(ctx, 1) go copyLayerHelper(i, srcLayer, progressPool) } // Wait for all layers to be copied copyGroup.Wait() }() destInfos := make([]types.BlobInfo, numLayers) diffIDs := make([]digest.Digest, numLayers) for i, cld := range data { if cld.err != nil { return cld.err } destInfos[i] = cld.destInfo diffIDs[i] = cld.diffID } ic.manifestUpdates.InformationOnly.LayerInfos = destInfos if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil } // layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) func layerDigestsDiffer(a, b []types.BlobInfo) bool { if len(a) != len(b) { return true } for i := range a { if a[i].Digest != b[i].Digest { return true } } return false } // copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, // stores the resulting config and manifest to the destination, and returns the stored manifest. func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte, error) { pendingImage := ic.src if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { if !ic.canModifyManifest { return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") } if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. // So, this can only happen if we are trying to upload using one of the other MIME type candidates. // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) } pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates) if err != nil { return nil, errors.Wrap(err, "Error creating an updated image manifest") } pendingImage = pi } manifest, _, err := pendingImage.Manifest(ctx) if err != nil { return nil, errors.Wrap(err, "Error reading manifest") } if err := ic.c.copyConfig(ctx, pendingImage); err != nil { return nil, err } ic.c.Printf("Writing manifest to image destination\n") if err := ic.c.dest.PutManifest(ctx, manifest); err != nil { return nil, errors.Wrap(err, "Error writing manifest") } return manifest, nil } // newProgressPool creates a *mpb.Progress and a cleanup function. // The caller must eventually call the returned cleanup function after the pool will no longer be updated. func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) { ctx, cancel := context.WithCancel(ctx) pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx)) return pool, func() { cancel() pool.Wait() } } // createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter // is ioutil.Discard, the progress bar's output will be discarded func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string, onComplete string) *mpb.Bar { // shortDigestLen is the length of the digest used for blobs. const shortDigestLen = 12 prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. maxPrefixLen := len("Copying blob ") + shortDigestLen if len(prefix) > maxPrefixLen { prefix = prefix[:maxPrefixLen] } // Use a normal progress bar when we know the size (i.e., size > 0). // Otherwise, use a spinner to indicate that something's happening. var bar *mpb.Bar if info.Size > 0 { bar = pool.AddBar(info.Size, mpb.BarClearOnComplete(), mpb.PrependDecorators( decor.Name(prefix), ), mpb.AppendDecorators( decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), " "+onComplete), ), ) } else { bar = pool.AddSpinner(info.Size, mpb.SpinnerOnLeft, mpb.BarClearOnComplete(), mpb.SpinnerStyle([]string{".", "..", "...", "....", ""}), mpb.PrependDecorators( decor.Name(prefix), ), mpb.AppendDecorators( decor.OnComplete(decor.Name(""), " "+onComplete), ), ) } if c.progressOutput == ioutil.Discard { c.Printf("Copying %s %s\n", kind, info.Digest) } return bar } // copyConfig copies config.json, if any, from src to dest. func (c *copier) copyConfig(ctx context.Context, src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { configBlob, err := src.ConfigBlob(ctx) if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } destInfo, err := func() (types.BlobInfo, error) { // A scope for defer progressPool, progressCleanup := c.newProgressPool(ctx) defer progressCleanup() bar := c.createProgressBar(progressPool, srcInfo, "config", "done") destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar) if err != nil { return types.BlobInfo{}, err } bar.SetTotal(int64(len(configBlob)), true) return destInfo, nil }() if err != nil { return nil } if destInfo.Digest != srcInfo.Digest { return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) } } return nil } // diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. // We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. type diffIDResult struct { digest digest.Digest err error } // copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) { cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" // If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source. if !diffIDIsNeeded { reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest) } if reused { logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) bar := ic.c.createProgressBar(pool, srcInfo, "blob", "skipped: already exists") bar.SetTotal(0, true) return blobInfo, cachedDiffID, nil } } // Fallback: copy the layer, computing the diffID if we need to do so srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } defer srcStream.Close() bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done") blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar) if err != nil { return types.BlobInfo{}, "", err } diffID := cachedDiffID if diffIDIsNeeded { select { case <-ctx.Done(): return types.BlobInfo{}, "", ctx.Err() case diffIDResult := <-diffIDChan: if diffIDResult.err != nil { return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) // This is safe because we have just computed diffIDResult.Digest ourselves, and in the process // we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader. ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest) diffID = diffIDResult.digest } } bar.SetTotal(srcInfo.Size, true) return blobInfo, diffID, nil } // copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps compressing the stream if canCompress, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below if diffIDIsNeeded { diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. pipeReader, pipeWriter := io.Pipe() defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further // reading from the pipe has failed, we don’t really care. // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, // the return value includes an error indication, which we do check. // // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader return pipeWriter } } blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } // diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { result := diffIDResult{ digest: "", err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), } defer func() { dest <- result }() defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. result.digest, result.err = computeDiffID(layerStream, decompressor) } // computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { if decompressor != nil { s, err := decompressor(stream) if err != nil { return "", err } defer s.Close() stream = s } return digest.Canonical.FromReader(stream) } // copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. // === Input: srcStream // === Process input through digestingReader to validate against the expected digest. // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, // use a separate validation failure indicator. // Note that for this check we don't use the stronger "validationSucceeded" indicator, because // dest.PutBlob may detect that the layer already exists, in which case we don't // read stream to the end, and validation does not happen. digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) } var destStream io.Reader = digestingReader // === Detect compression of the input stream. // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. compressionFormat, decompressor, destStream, err := compression.DetectCompressionFormat(destStream) // We could skip this in some cases, but let's keep the code path uniform if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } isCompressed := decompressor != nil destStream = bar.ProxyReader(destStream) // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. if getOriginalLayerCopyWriter != nil { destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) originalLayerReader = destStream } desiredCompressionFormat := c.compressionFormat // === Deal with layer compression/decompression if necessary var inputInfo types.BlobInfo var compressionOperation types.LayerCompression if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed { logrus.Debugf("Compressing blob on the fly") compressionOperation = types.Compress pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, // we don’t care. go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() { // When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally // re-compressed using the desired format. logrus.Debugf("Blob will be converted") compressionOperation = types.PreserveOriginal s, err := decompressor(destStream) if err != nil { return types.BlobInfo{}, err } defer s.Close() pipeReader, pipeWriter := io.Pipe() defer pipeReader.Close() go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter destStream = pipeReader inputInfo.Digest = "" inputInfo.Size = -1 } else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed { logrus.Debugf("Blob will be decompressed") compressionOperation = types.Decompress s, err := decompressor(destStream) if err != nil { return types.BlobInfo{}, err } defer s.Close() destStream = s inputInfo.Digest = "" inputInfo.Size = -1 } else { // PreserveOriginal might also need to recompress the original blob if the desired compression format is different. logrus.Debugf("Using original blob without modification") compressionOperation = types.PreserveOriginal inputInfo = srcInfo } // === Report progress using the c.progress channel, if required. if c.progress != nil && c.progressInterval > 0 { destStream = &progressReader{ source: destStream, channel: c.progress, interval: c.progressInterval, artifact: srcInfo, lastTime: time.Now(), } } // === Finally, send the layer stream to dest. uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } uploadedInfo.Annotations = srcInfo.Annotations uploadedInfo.CompressionOperation = compressionOperation // If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest. if canModifyBlob && !isConfig { uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat } // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. // So, read everything from originalLayerReader, which will cause the rest to be // sent there if we are not already at EOF. if getOriginalLayerCopyWriter != nil { logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") _, err := io.Copy(ioutil.Discard, originalLayerReader) if err != nil { return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) } } if digestingReader.validationFailed { // Coverage: This should never happen. return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) } if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) } if digestingReader.validationSucceeded { // If compressionOperation != types.PreserveOriginal, we now have two reliable digest values: // srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader // uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob // (because inputInfo.Digest == "", this must have been computed afresh). switch compressionOperation { case types.PreserveOriginal: break // Do nothing, we have only one digest and we might not have even verified it. case types.Compress: c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) case types.Decompress: c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) default: return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation) } } return uploadedInfo, nil } // compressGoroutine reads all input from src and writes its compressed equivalent to dest. func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, compressionFormat compression.Algorithm) { err := errors.New("Internal error: unexpected panic in compressGoroutine") defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() }() compressor, err := compression.CompressStream(dest, compressionFormat, c.compressionLevel) if err != nil { return } defer compressor.Close() buf := make([]byte, compressionBufferSize) _, err = io.CopyBuffer(compressor, src, buf) // Sets err to nil, i.e. causes dest.Close() } image-4.0.1/copy/copy_test.go000066400000000000000000000117421354546467100161340ustar00rootroot00000000000000package copy import ( "bytes" "io" "os" "testing" "time" "github.com/pkg/errors" "github.com/containers/image/v4/pkg/compression" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewDigestingReader(t *testing.T) { // Only the failure cases, success is tested in TestDigestingReaderRead below. source := bytes.NewReader([]byte("abc")) for _, input := range []digest.Digest{ "abc", // Not algo:hexvalue "crc32:", // Unknown algorithm, empty value "crc32:012345678", // Unknown algorithm "sha256:", // Empty value "sha256:0", // Invalid hex value "sha256:01", // Invalid length of hex value } { _, err := newDigestingReader(source, input) assert.Error(t, err, input.String()) } } func TestDigestingReaderRead(t *testing.T) { cases := []struct { input []byte digest digest.Digest }{ {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, {make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, } // Valid input for _, c := range cases { source := bytes.NewReader(c.input) reader, err := newDigestingReader(source, c.digest) require.NoError(t, err, c.digest.String()) dest := bytes.Buffer{} n, err := io.Copy(&dest, reader) assert.NoError(t, err, c.digest.String()) assert.Equal(t, int64(len(c.input)), n, c.digest.String()) assert.Equal(t, c.input, dest.Bytes(), c.digest.String()) assert.False(t, reader.validationFailed, c.digest.String()) assert.True(t, reader.validationSucceeded, c.digest.String()) } // Modified input for _, c := range cases { source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil)) reader, err := newDigestingReader(source, c.digest) require.NoError(t, err, c.digest.String()) dest := bytes.Buffer{} _, err = io.Copy(&dest, reader) assert.Error(t, err, c.digest.String()) assert.True(t, reader.validationFailed, c.digest.String()) assert.False(t, reader.validationSucceeded, c.digest.String()) } // Truncated input for _, c := range cases { source := bytes.NewReader(c.input) reader, err := newDigestingReader(source, c.digest) require.NoError(t, err, c.digest.String()) if len(c.input) != 0 { dest := bytes.Buffer{} truncatedLen := int64(len(c.input) - 1) n, err := io.CopyN(&dest, reader, truncatedLen) assert.NoError(t, err, c.digest.String()) assert.Equal(t, truncatedLen, n, c.digest.String()) } assert.False(t, reader.validationFailed, c.digest.String()) assert.False(t, reader.validationSucceeded, c.digest.String()) } } func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompressor compression.DecompressorFunc) *diffIDResult { ch := make(chan diffIDResult) go diffIDComputationGoroutine(ch, layerStream, nil) timeout := time.After(time.Second) select { case res := <-ch: return &res case <-timeout: return nil } } func TestDiffIDComputationGoroutine(t *testing.T) { stream, err := os.Open("fixtures/Hello.uncompressed") require.NoError(t, err) res := goDiffIDComputationGoroutineWithTimeout(stream, nil) require.NotNil(t, res) assert.NoError(t, res.err) assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String()) // Error reading input reader, writer := io.Pipe() writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine")) res = goDiffIDComputationGoroutineWithTimeout(reader, nil) require.NotNil(t, res) assert.Error(t, res.err) } func TestComputeDiffID(t *testing.T) { for _, c := range []struct { filename string decompressor compression.DecompressorFunc result digest.Digest }{ {"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, {"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"}, {"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, {"fixtures/Hello.zst", nil, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"}, {"fixtures/Hello.zst", compression.ZstdDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, } { stream, err := os.Open(c.filename) require.NoError(t, err, c.filename) defer stream.Close() diffID, err := computeDiffID(stream, c.decompressor) require.NoError(t, err, c.filename) assert.Equal(t, c.result, diffID) } // Error initializing decompression _, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor) assert.Error(t, err) // Error reading input reader, writer := io.Pipe() defer reader.Close() writer.CloseWithError(errors.New("Expected error reading input in computeDiffID")) _, err = computeDiffID(reader, nil) assert.Error(t, err) } image-4.0.1/copy/fixtures/000077500000000000000000000000001354546467100154405ustar00rootroot00000000000000image-4.0.1/copy/fixtures/Hello.bz2000077700000000000000000000000001354546467100262202../../pkg/compression/fixtures/Hello.bz2ustar00rootroot00000000000000image-4.0.1/copy/fixtures/Hello.gz000077700000000000000000000000001354546467100260662../../pkg/compression/fixtures/Hello.gzustar00rootroot00000000000000image-4.0.1/copy/fixtures/Hello.std000066400000000000000000000000221354546467100172110ustar00rootroot00000000000000(/X)HelloD}uimage-4.0.1/copy/fixtures/Hello.uncompressed000077700000000000000000000000001354546467100322442../../pkg/compression/fixtures/Hello.uncompressedustar00rootroot00000000000000image-4.0.1/copy/fixtures/Hello.xz000077700000000000000000000000001354546467100261302../../pkg/compression/fixtures/Hello.xzustar00rootroot00000000000000image-4.0.1/copy/fixtures/Hello.zst000066400000000000000000000000221354546467100172370ustar00rootroot00000000000000(/X)HelloD}uimage-4.0.1/copy/manifest.go000066400000000000000000000121351354546467100157260ustar00rootroot00000000000000package copy import ( "context" "strings" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. // Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} // orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. type orderedSet struct { list []string included map[string]struct{} } // newOrderedSet creates a correctly initialized orderedSet. // [Sometimes it would be really nice if Golang had constructors…] func newOrderedSet() *orderedSet { return &orderedSet{ list: []string{}, included: map[string]struct{}{}, } } // append adds s to the end of os, only if it is not included already. func (os *orderedSet) append(s string) { if _, ok := os.included[s]; !ok { os.list = append(os.list, s) os.included[s] = struct{}{} } } // determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. // Note that the conversion will only happen later, through ic.src.UpdatedImage // Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), // and a list of other possible alternatives, in order. func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { _, srcType, err := ic.src.Manifest(ctx) if err != nil { // This should have been cached?! return "", nil, errors.Wrap(err, "Error reading manifest") } normalizedSrcType := manifest.NormalizedMIMEType(srcType) if srcType != normalizedSrcType { logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) srcType = normalizedSrcType } if forceManifestMIMEType != "" { destSupportedManifestMIMETypes = []string{forceManifestMIMEType} } if len(destSupportedManifestMIMETypes) == 0 { return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. } supportedByDest := map[string]struct{}{} for _, t := range destSupportedManifestMIMETypes { supportedByDest[t] = struct{}{} } // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. // So, build a list of types to try in order of decreasing preference. // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types // and never attempt the other one. prioritizedTypes := newOrderedSet() // First of all, prefer to keep the original manifest unmodified. if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } if !ic.canModifyManifest { // We could also drop the !ic.canModifyManifest check and have the caller // make the choice; it is already doing that to an extent, to improve error // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? } // Then use our list of preferred types. for _, t := range preferredManifestMIMETypes { if _, ok := supportedByDest[t]; ok { prioritizedTypes.append(t) } } // Finally, try anything else the destination supports. for _, t := range destSupportedManifestMIMETypes { prioritizedTypes.append(t) } logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. return "", nil, errors.New("Internal error: no candidate MIME types") } preferredType := prioritizedTypes.list[0] if preferredType != srcType { ic.manifestUpdates.ManifestMIMEType = preferredType } else { logrus.Debugf("... will first try using the original manifest unmodified") } return preferredType, prioritizedTypes.list[1:], nil } // isMultiImage returns true if img is a list of images func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) { _, mt, err := img.Manifest(ctx) if err != nil { return false, err } return manifest.MIMETypeIsMultiImage(mt), nil } image-4.0.1/copy/manifest_test.go000066400000000000000000000213071354546467100167660ustar00rootroot00000000000000package copy import ( "context" "errors" "fmt" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestOrderedSet(t *testing.T) { for _, c := range []struct{ input, expected []string }{ {[]string{}, []string{}}, {[]string{"a", "b", "c"}, []string{"a", "b", "c"}}, {[]string{"a", "b", "a", "c"}, []string{"a", "b", "c"}}, } { os := newOrderedSet() for _, s := range c.input { os.append(s) } assert.Equal(t, c.expected, os.list, fmt.Sprintf("%#v", c.input)) } } // fakeImageSource is an implementation of types.Image which only returns itself as a MIME type in Manifest // except that "" means “reading the manifest should fail” type fakeImageSource string func (f fakeImageSource) Reference() types.ImageReference { panic("Unexpected call to a mock function") } func (f fakeImageSource) Manifest(ctx context.Context) ([]byte, string, error) { if string(f) == "" { return nil, "", errors.New("Manifest() directed to fail") } return nil, string(f), nil } func (f fakeImageSource) Signatures(context.Context) ([][]byte, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) ConfigInfo() types.BlobInfo { panic("Unexpected call to a mock function") } func (f fakeImageSource) ConfigBlob(context.Context) ([]byte, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) OCIConfig(context.Context) (*v1.Image, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) LayerInfos() []types.BlobInfo { panic("Unexpected call to a mock function") } func (f fakeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { panic("Unexpected call to a mock function") } func (f fakeImageSource) Inspect(context.Context) (*types.ImageInspectInfo, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { panic("Unexpected call to a mock function") } func (f fakeImageSource) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { panic("Unexpected call to a mock function") } func (f fakeImageSource) Size() (int64, error) { panic("Unexpected call to a mock function") } func TestDetermineManifestConversion(t *testing.T) { supportS1S2OCI := []string{ v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } supportS1OCI := []string{ v1.MediaTypeImageManifest, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } supportS1S2 := []string{ manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } supportOnlyS1 := []string{ manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } cases := []struct { description string sourceType string destTypes []string expectedUpdate string expectedOtherCandidates []string }{ // Destination accepts anything — no conversion necessary {"s1→anything", manifest.DockerV2Schema1SignedMediaType, nil, "", []string{}}, {"s2→anything", manifest.DockerV2Schema2MediaType, nil, "", []string{}}, // Destination accepts the unmodified original {"s1→s1s2", manifest.DockerV2Schema1SignedMediaType, supportS1S2, "", []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType}}, {"s2→s1s2", manifest.DockerV2Schema2MediaType, supportS1S2, "", supportOnlyS1}, {"s1→s1", manifest.DockerV2Schema1SignedMediaType, supportOnlyS1, "", []string{manifest.DockerV2Schema1MediaType}}, // text/plain is normalized to s1, and if the destination accepts s1, no conversion happens. {"text→s1s2", "text/plain", supportS1S2, "", []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1MediaType}}, {"text→s1", "text/plain", supportOnlyS1, "", []string{manifest.DockerV2Schema1MediaType}}, // Conversion necessary, a preferred format is acceptable {"s2→s1", manifest.DockerV2Schema2MediaType, supportOnlyS1, manifest.DockerV2Schema1SignedMediaType, []string{manifest.DockerV2Schema1MediaType}}, // Conversion necessary, a preferred format is not acceptable {"s2→OCI", manifest.DockerV2Schema2MediaType, []string{v1.MediaTypeImageManifest}, v1.MediaTypeImageManifest, []string{}}, // text/plain is converted if the destination does not accept s1 {"text→s2", "text/plain", []string{manifest.DockerV2Schema2MediaType}, manifest.DockerV2Schema2MediaType, []string{}}, // Conversion necessary, try the preferred formats in order. // We abuse manifest.DockerV2ListMediaType here as a MIME type which is not in supportS1S2OCI, // but is still recognized by manifest.NormalizedMIMEType and not normalized to s1 { "special→s2", manifest.DockerV2ListMediaType, supportS1S2OCI, manifest.DockerV2Schema2MediaType, []string{manifest.DockerV2Schema1SignedMediaType, v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType}, }, { "special→s1", manifest.DockerV2ListMediaType, supportS1OCI, manifest.DockerV2Schema1SignedMediaType, []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema1MediaType}, }, { "special→OCI", manifest.DockerV2ListMediaType, []string{v1.MediaTypeImageManifest, "other options", "with lower priority"}, v1.MediaTypeImageManifest, []string{"other options", "with lower priority"}, }, } for _, c := range cases { src := fakeImageSource(c.sourceType) ic := &imageCopier{ manifestUpdates: &types.ManifestUpdateOptions{}, src: src, canModifyManifest: true, } preferredMIMEType, otherCandidates, err := ic.determineManifestConversion(context.Background(), c.destTypes, "") require.NoError(t, err, c.description) assert.Equal(t, c.expectedUpdate, ic.manifestUpdates.ManifestMIMEType, c.description) if c.expectedUpdate == "" { assert.Equal(t, manifest.NormalizedMIMEType(c.sourceType), preferredMIMEType, c.description) } else { assert.Equal(t, c.expectedUpdate, preferredMIMEType, c.description) } assert.Equal(t, c.expectedOtherCandidates, otherCandidates, c.description) } // Whatever the input is, with !canModifyManifest we return "keep the original as is" for _, c := range cases { src := fakeImageSource(c.sourceType) ic := &imageCopier{ manifestUpdates: &types.ManifestUpdateOptions{}, src: src, canModifyManifest: false, } preferredMIMEType, otherCandidates, err := ic.determineManifestConversion(context.Background(), c.destTypes, "") require.NoError(t, err, c.description) assert.Equal(t, "", ic.manifestUpdates.ManifestMIMEType, c.description) assert.Equal(t, manifest.NormalizedMIMEType(c.sourceType), preferredMIMEType, c.description) assert.Equal(t, []string{}, otherCandidates, c.description) } // With forceManifestMIMEType, the output is always the forced manifest type (in this case oci manifest) for _, c := range cases { src := fakeImageSource(c.sourceType) ic := &imageCopier{ manifestUpdates: &types.ManifestUpdateOptions{}, src: src, canModifyManifest: true, } preferredMIMEType, otherCandidates, err := ic.determineManifestConversion(context.Background(), c.destTypes, v1.MediaTypeImageManifest) require.NoError(t, err, c.description) assert.Equal(t, v1.MediaTypeImageManifest, ic.manifestUpdates.ManifestMIMEType, c.description) assert.Equal(t, v1.MediaTypeImageManifest, preferredMIMEType, c.description) assert.Equal(t, []string{}, otherCandidates, c.description) } // Error reading the manifest — smoke test only. ic := imageCopier{ manifestUpdates: &types.ManifestUpdateOptions{}, src: fakeImageSource(""), canModifyManifest: true, } _, _, err := ic.determineManifestConversion(context.Background(), supportS1S2, "") assert.Error(t, err) } func TestIsMultiImage(t *testing.T) { // MIME type is available; more or less a smoke test, other cases are handled in manifest.MIMETypeIsMultiImage for _, c := range []struct { mt string expected bool }{ {manifest.DockerV2ListMediaType, true}, {manifest.DockerV2Schema2MediaType, false}, } { src := fakeImageSource(c.mt) res, err := isMultiImage(context.Background(), src) require.NoError(t, err) assert.Equal(t, c.expected, res, c.mt) } // Error getting manifest MIME type src := fakeImageSource("") _, err := isMultiImage(context.Background(), src) assert.Error(t, err) } image-4.0.1/copy/progress_reader.go000066400000000000000000000011351354546467100173040ustar00rootroot00000000000000package copy import ( "io" "time" "github.com/containers/image/v4/types" ) // progressReader is a reader that reports its progress on an interval. type progressReader struct { source io.Reader channel chan types.ProgressProperties interval time.Duration artifact types.BlobInfo lastTime time.Time offset uint64 } func (r *progressReader) Read(p []byte) (int, error) { n, err := r.source.Read(p) r.offset += uint64(n) if time.Since(r.lastTime) > r.interval { r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} r.lastTime = time.Now() } return n, err } image-4.0.1/copy/sign.go000066400000000000000000000017671354546467100150710ustar00rootroot00000000000000package copy import ( "github.com/containers/image/v4/signature" "github.com/containers/image/v4/transports" "github.com/pkg/errors" ) // createSignature creates a new signature of manifest using keyIdentity. func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "Error initializing GPG") } defer mech.Close() if err := mech.SupportsSigning(); err != nil { return nil, errors.Wrap(err, "Signing not supported") } dockerReference := c.dest.Reference().DockerReference() if dockerReference == nil { return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) } c.Printf("Signing manifest\n") newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) if err != nil { return nil, errors.Wrap(err, "Error creating signature") } return newSig, nil } image-4.0.1/copy/sign_test.go000066400000000000000000000051661354546467100161250ustar00rootroot00000000000000package copy import ( "context" "io/ioutil" "os" "testing" "github.com/containers/image/v4/directory" "github.com/containers/image/v4/docker" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/signature" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( testGPGHomeDirectory = "../signature/fixtures" // TestKeyFingerprint is the fingerprint of the private key in testGPGHomeDirectory. // Keep this in sync with signature/fixtures_info_test.go testKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8" ) func TestCreateSignature(t *testing.T) { manifestBlob := []byte("Something") manifestDigest, err := manifest.Digest(manifestBlob) require.NoError(t, err) mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{}) require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { t.Skipf("Signing not supported: %v", err) } os.Setenv("GNUPGHOME", testGPGHomeDirectory) defer os.Unsetenv("GNUPGHOME") // Signing a directory: reference, which does not have a DockerRefrence(), fails. tempDir, err := ioutil.TempDir("", "signature-dir-dest") require.NoError(t, err) defer os.RemoveAll(tempDir) dirRef, err := directory.NewReference(tempDir) require.NoError(t, err) dirDest, err := dirRef.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dirDest.Close() c := &copier{ dest: dirDest, reportWriter: ioutil.Discard, } _, err = c.createSignature(manifestBlob, testKeyFingerprint) assert.Error(t, err) // Set up a docker: reference dockerRef, err := docker.ParseReference("//busybox") require.NoError(t, err) dockerDest, err := dockerRef.NewImageDestination(context.Background(), &types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) require.NoError(t, err) defer dockerDest.Close() c = &copier{ dest: dockerDest, reportWriter: ioutil.Discard, } // Signing with an unknown key fails _, err = c.createSignature(manifestBlob, "this key does not exist") assert.Error(t, err) // Success mech, err = signature.NewGPGSigningMechanism() require.NoError(t, err) defer mech.Close() sig, err := c.createSignature(manifestBlob, testKeyFingerprint) require.NoError(t, err) verified, err := signature.VerifyDockerManifestSignature(sig, manifestBlob, "docker.io/library/busybox:latest", mech, testKeyFingerprint) require.NoError(t, err) assert.Equal(t, "docker.io/library/busybox:latest", verified.DockerReference) assert.Equal(t, manifestDigest, verified.DockerManifestDigest) } image-4.0.1/directory/000077500000000000000000000000001354546467100146215ustar00rootroot00000000000000image-4.0.1/directory/directory_dest.go000066400000000000000000000231461354546467100202010ustar00rootroot00000000000000package directory import ( "context" "io" "io/ioutil" "os" "path/filepath" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const version = "Directory Transport Version: 1.1\n" // ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created // using the 'dir' transport var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") type dirImageDestination struct { ref dirReference compress bool } // newImageDestination returns an ImageDestination for writing to a directory. func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { d := &dirImageDestination{ref: ref, compress: compress} // If directory exists check if it is empty // if not empty, check whether the contents match that of a container image directory and overwrite the contents // if the contents don't match throw an error dirExists, err := pathExists(d.ref.resolvedPath) if err != nil { return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) } if dirExists { isEmpty, err := isDirEmpty(d.ref.resolvedPath) if err != nil { return nil, err } if !isEmpty { versionExists, err := pathExists(d.ref.versionPath()) if err != nil { return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) } if versionExists { contents, err := ioutil.ReadFile(d.ref.versionPath()) if err != nil { return nil, err } // check if contents of version file is what we expect it to be if string(contents) != version { return nil, ErrNotContainerImageDir } } else { return nil, ErrNotContainerImageDir } // delete directory contents so that only one image is in the directory at a time if err = removeDirContents(d.ref.resolvedPath); err != nil { return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) } logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) } } else { // create directory if it doesn't exist if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) } } // create version file err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) if err != nil { return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) } return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *dirImageDestination) Reference() types.ImageReference { return d.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *dirImageDestination) Close() error { return nil } func (d *dirImageDestination) SupportedManifestMIMETypes() []string { return nil } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *dirImageDestination) SupportsSignatures(ctx context.Context) error { return nil } func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression { if d.compress { return types.Compress } return types.PreserveOriginal } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { return false } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *dirImageDestination) MustMatchRuntimeOS() bool { return false } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *dirImageDestination) HasThreadSafePutBlob() bool { return false } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") if err != nil { return types.BlobInfo{}, err } succeeded := false defer func() { blobFile.Close() if !succeeded { os.Remove(blobFile.Name()) } }() digester := digest.Canonical.Digester() tee := io.TeeReader(stream, digester.Hash()) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, tee) if err != nil { return types.BlobInfo{}, err } computedDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } if err := blobFile.Chmod(0644); err != nil { return types.BlobInfo{}, err } blobPath := d.ref.layerPath(computedDigest) if err := os.Rename(blobFile.Name(), blobPath); err != nil { return types.BlobInfo{}, err } succeeded = true return types.BlobInfo{Digest: computedDigest, Size: size}, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } blobPath := d.ref.layerPath(info.Digest) finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { return false, types.BlobInfo{}, nil } if err != nil { return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte) error { return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) } func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { for i, sig := range signatures { if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { return err } } return nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *dirImageDestination) Commit(ctx context.Context) error { return nil } // returns true if path exists func pathExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if err != nil && os.IsNotExist(err) { return false, nil } return false, err } // returns true if directory is empty func isDirEmpty(path string) (bool, error) { files, err := ioutil.ReadDir(path) if err != nil { return false, err } return len(files) == 0, nil } // deletes the contents of a directory func removeDirContents(path string) error { files, err := ioutil.ReadDir(path) if err != nil { return err } for _, file := range files { if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { return err } } return nil } image-4.0.1/directory/directory_src.go000066400000000000000000000071261354546467100200310ustar00rootroot00000000000000package directory import ( "context" "io" "io/ioutil" "os" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) type dirImageSource struct { ref dirReference } // newImageSource returns an ImageSource reading from an existing directory. // The caller must call .Close() on the returned ImageSource. func newImageSource(ref dirReference) types.ImageSource { return &dirImageSource{ref} } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (s *dirImageSource) Reference() types.ImageReference { return s.ref } // Close removes resources associated with an initialized ImageSource, if any. func (s *dirImageSource) Close() error { return nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) } m, err := ioutil.ReadFile(s.ref.manifestPath()) if err != nil { return nil, "", err } return m, manifest.GuessMIMEType(m), err } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *dirImageSource) HasThreadSafeGetBlob() bool { return false } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { r, err := os.Open(s.ref.layerPath(info.Digest)) if err != nil { return nil, -1, err } fi, err := r.Stat() if err != nil { return nil, -1, err } return r, fi.Size(), nil } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) } signatures := [][]byte{} for i := 0; ; i++ { signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) if err != nil { if os.IsNotExist(err) { break } return nil, err } signatures = append(signatures, signature) } return signatures, nil } // LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. func (s *dirImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } image-4.0.1/directory/directory_test.go000066400000000000000000000121341354546467100202140ustar00rootroot00000000000000package directory import ( "bytes" "context" "io/ioutil" "os" "testing" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDestinationReference(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() ref2 := dest.Reference() assert.Equal(t, tmpDir, ref2.StringWithinTransport()) } func TestGetPutManifest(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) man := []byte("test-manifest") dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() err = dest.PutManifest(context.Background(), man) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) src, err := ref.NewImageSource(context.Background(), nil) require.NoError(t, err) defer src.Close() m, mt, err := src.GetManifest(context.Background(), nil) assert.NoError(t, err) assert.Equal(t, man, m) assert.Equal(t, "", mt) // Non-default instances are not supported md, err := manifest.Digest(man) require.NoError(t, err) _, _, err = src.GetManifest(context.Background(), &md) assert.Error(t, err) } func TestGetPutBlob(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) cache := memory.New() blob := []byte("test-blob") dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() assert.Equal(t, types.PreserveOriginal, dest.DesiredLayerCompression()) info, err := dest.PutBlob(context.Background(), bytes.NewReader(blob), types.BlobInfo{Digest: digest.Digest("sha256:digest-test"), Size: int64(9)}, cache, false) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) assert.Equal(t, int64(9), info.Size) assert.Equal(t, digest.FromBytes(blob), info.Digest) src, err := ref.NewImageSource(context.Background(), nil) require.NoError(t, err) defer src.Close() rc, size, err := src.GetBlob(context.Background(), info, cache) assert.NoError(t, err) defer rc.Close() b, err := ioutil.ReadAll(rc) assert.NoError(t, err) assert.Equal(t, blob, b) assert.Equal(t, int64(len(blob)), size) } // readerFromFunc allows implementing Reader by any function, e.g. a closure. type readerFromFunc func([]byte) (int, error) func (fn readerFromFunc) Read(p []byte) (int, error) { return fn(p) } // TestPutBlobDigestFailure simulates behavior on digest verification failure. func TestPutBlobDigestFailure(t *testing.T) { const digestErrorString = "Simulated digest error" const blobDigest = digest.Digest("sha256:test-digest") ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(dirReference) require.True(t, ok) blobPath := dirRef.layerPath(blobDigest) cache := memory.New() firstRead := true reader := readerFromFunc(func(p []byte) (int, error) { _, err := os.Lstat(blobPath) require.Error(t, err) require.True(t, os.IsNotExist(err)) if firstRead { if len(p) > 0 { firstRead = false } for i := 0; i < len(p); i++ { p[i] = 0xAA } return len(p), nil } return 0, errors.Errorf(digestErrorString) }) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false) assert.Error(t, err) assert.Contains(t, digestErrorString, err.Error()) err = dest.Commit(context.Background()) assert.NoError(t, err) _, err = os.Lstat(blobPath) require.Error(t, err) require.True(t, os.IsNotExist(err)) } func TestGetPutSignatures(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() man := []byte("test-manifest") signatures := [][]byte{ []byte("sig1"), []byte("sig2"), } err = dest.SupportsSignatures(context.Background()) assert.NoError(t, err) err = dest.PutManifest(context.Background(), man) require.NoError(t, err) err = dest.PutSignatures(context.Background(), signatures) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) src, err := ref.NewImageSource(context.Background(), nil) require.NoError(t, err) defer src.Close() sigs, err := src.GetSignatures(context.Background(), nil) assert.NoError(t, err) assert.Equal(t, signatures, sigs) // Non-default instances are not supported md, err := manifest.Digest(man) require.NoError(t, err) _, err = src.GetSignatures(context.Background(), &md) assert.Error(t, err) } func TestSourceReference(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) src, err := ref.NewImageSource(context.Background(), nil) require.NoError(t, err) defer src.Close() ref2 := src.Reference() assert.Equal(t, tmpDir, ref2.StringWithinTransport()) } image-4.0.1/directory/directory_transport.go000066400000000000000000000207031354546467100212720ustar00rootroot00000000000000package directory import ( "context" "fmt" "path/filepath" "strings" "github.com/containers/image/v4/directory/explicitfilepath" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } // Transport is an ImageTransport for directory paths. var Transport = dirTransport{} type dirTransport struct{} func (t dirTransport) Name() string { return "dir" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { return NewReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { if !strings.HasPrefix(scope, "/") { return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) } // Refuse also "/", otherwise "/" and "" would have the same semantics, // and "" could be unexpectedly shadowed by the "/" entry. if scope == "/" { return errors.New(`Invalid scope "/": Use the generic default scope ""`) } cleaned := filepath.Clean(scope) if cleaned != scope { return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) } return nil } // dirReference is an ImageReference for directory paths. type dirReference struct { // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid // being exposed to symlinks and renames in the parent directories to the working directory). // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) path string // As specified by the user. May be relative, contain symlinks, etc. resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. } // There is no directory.ParseReference because it is rather pointless. // Callers who need a transport-independent interface will go through // dirTransport.ParseReference; callers who intentionally deal with directories // can use directory.NewReference. // NewReference returns a directory reference for a specified path. // // We do not expose an API supplying the resolvedPath; we could, but recomputing it // is generally cheap enough that we prefer being confident about the properties of resolvedPath. func NewReference(path string) (types.ImageReference, error) { resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) if err != nil { return nil, err } return dirReference{path: path, resolvedPath: resolved}, nil } func (ref dirReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dirReference) StringWithinTransport() string { return ref.path } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref dirReference) DockerReference() reference.Named { return nil } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref dirReference) PolicyConfigurationIdentity() string { return ref.resolvedPath } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref dirReference) PolicyConfigurationNamespaces() []string { res := []string{} path := ref.resolvedPath for { lastSlash := strings.LastIndex(path, "/") if lastSlash == -1 || lastSlash == 0 { break } path = path[:lastSlash] res = append(res, path) } // Note that we do not include "/"; it is redundant with the default "" global default, // and rejected by dirTransport.ValidatePolicyConfigurationScope above. return res } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref dirReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ref) return image.FromSource(ctx, sys, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref dirReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(ref), nil } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref dirReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { compress := false if sys != nil { compress = sys.DirForceCompress } return newImageDestination(ref, compress) } // DeleteImage deletes the named image from the registry, if supported. func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { return errors.Errorf("Deleting images not implemented for dir: images") } // manifestPath returns a path for the manifest within a directory using our conventions. func (ref dirReference) manifestPath() string { return filepath.Join(ref.path, "manifest.json") } // layerPath returns a path for a layer tarball within a directory using our conventions. func (ref dirReference) layerPath(digest digest.Digest) string { // FIXME: Should we keep the digest identification? return filepath.Join(ref.path, digest.Hex()) } // signaturePath returns a path for a signature within a directory using our conventions. func (ref dirReference) signaturePath(index int) string { return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) } // versionPath returns a path for the version file within a directory using our conventions. func (ref dirReference) versionPath() string { return filepath.Join(ref.path, "version") } image-4.0.1/directory/directory_transport_test.go000066400000000000000000000154071354546467100223360ustar00rootroot00000000000000package directory import ( "context" "io/ioutil" "os" "path/filepath" "testing" _ "github.com/containers/image/v4/internal/testing/explicitfilepath-tmpdir" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTransportName(t *testing.T) { assert.Equal(t, "dir", Transport.Name()) } func TestTransportParseReference(t *testing.T) { testNewReference(t, Transport.ParseReference) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ "/etc", "/this/does/not/exist", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } for _, scope := range []string{ "relative/path", "/double//slashes", "/has/./dot", "/has/dot/../dot", "/trailing/slash/", "/", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } func TestNewReference(t *testing.T) { testNewReference(t, NewReference) } // testNewReference is a test shared for Transport.ParseReference and NewReference. func testNewReference(t *testing.T, fn func(string) (types.ImageReference, error)) { tmpDir, err := ioutil.TempDir("", "dir-transport-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, path := range []string{ "/", "/etc", tmpDir, "relativepath", tmpDir + "/thisdoesnotexist", } { ref, err := fn(path) require.NoError(t, err, path) dirRef, ok := ref.(dirReference) require.True(t, ok) assert.Equal(t, path, dirRef.path, path) } _, err = fn(tmpDir + "/thisparentdoesnotexist/something") assert.Error(t, err) } // refToTempDir creates a temporary directory and returns a reference to it. // The caller should // defer os.RemoveAll(tmpDir) func refToTempDir(t *testing.T) (ref types.ImageReference, tmpDir string) { tmpDir, err := ioutil.TempDir("", "dir-transport-test") require.NoError(t, err) ref, err = NewReference(tmpDir) require.NoError(t, err) return ref, tmpDir } func TestReferenceTransport(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) assert.Equal(t, tmpDir, ref.StringWithinTransport()) } func TestReferenceDockerReference(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) assert.Nil(t, ref.DockerReference()) } func TestReferencePolicyConfigurationIdentity(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) // A non-canonical path. Test just one, the various other cases are // tested in explicitfilepath.ResolvePathToFullyExplicit. ref, err := NewReference(tmpDir + "/.") require.NoError(t, err) assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) // "/" as a corner case. ref, err = NewReference("/") require.NoError(t, err) assert.Equal(t, "/", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) // We don't really know enough to make a full equality test here. ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns) assert.NotEmpty(t, ns) assert.Equal(t, filepath.Dir(tmpDir), ns[0]) // Test with a known path which should exist. Test just one non-canonical // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. // // It would be nice to test a deeper hierarchy, but it is not obvious what // deeper path is always available in the various distros, AND is not likely // to contains a symbolic link. for _, path := range []string{"/usr/share", "/usr/share/./."} { _, err := os.Lstat(path) require.NoError(t, err) ref, err := NewReference(path) require.NoError(t, err) ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns) assert.Equal(t, []string{"/usr"}, ns) } // "/" as a corner case. ref, err := NewReference("/") require.NoError(t, err) assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces()) } func TestReferenceNewImage(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() mFixture, err := ioutil.ReadFile("../manifest/fixtures/v2s1.manifest.json") require.NoError(t, err) err = dest.PutManifest(context.Background(), mFixture) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) img, err := ref.NewImage(context.Background(), nil) assert.NoError(t, err) defer img.Close() } func TestReferenceNewImageNoValidManifest(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() err = dest.PutManifest(context.Background(), []byte(`{"schemaVersion":1}`)) assert.NoError(t, err) err = dest.Commit(context.Background()) assert.NoError(t, err) _, err = ref.NewImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceNewImageSource(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) src, err := ref.NewImageSource(context.Background(), nil) assert.NoError(t, err) defer src.Close() } func TestReferenceNewImageDestination(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) assert.NoError(t, err) defer dest.Close() } func TestReferenceDeleteImage(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) err := ref.DeleteImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceManifestPath(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(dirReference) require.True(t, ok) assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath()) } func TestReferenceLayerPath(t *testing.T) { const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(dirReference) require.True(t, ok) assert.Equal(t, tmpDir+"/"+hex, dirRef.layerPath("sha256:"+hex)) } func TestReferenceSignaturePath(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(dirReference) require.True(t, ok) assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0)) assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9)) } func TestReferenceVersionPath(t *testing.T) { ref, tmpDir := refToTempDir(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(dirReference) require.True(t, ok) assert.Equal(t, tmpDir+"/version", dirRef.versionPath()) } image-4.0.1/directory/explicitfilepath/000077500000000000000000000000001354546467100201575ustar00rootroot00000000000000image-4.0.1/directory/explicitfilepath/path.go000066400000000000000000000045651354546467100214540ustar00rootroot00000000000000package explicitfilepath import ( "os" "path/filepath" "github.com/pkg/errors" ) // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. // To do so, all elements of the input path must exist; as a special case, the final component may be // a non-existent name (but not a symlink pointing to a non-existent name) // This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. func ResolvePathToFullyExplicit(path string) (string, error) { switch _, err := os.Lstat(path); { case err == nil: return resolveExistingPathToFullyExplicit(path) case os.IsNotExist(err): parent, file := filepath.Split(path) resolvedParent, err := resolveExistingPathToFullyExplicit(parent) if err != nil { return "", err } if file == "." || file == ".." { // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components // in the resulting path, and especially not at the end. return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) } resolvedPath := filepath.Join(resolvedParent, file) // As a sanity check, ensure that there are no "." or ".." components. cleanedResolvedPath := filepath.Clean(resolvedPath) if cleanedResolvedPath != resolvedPath { // Coverage: This should never happen. return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) } return resolvedPath, nil default: // err != nil, unrecognized return "", err } } // resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, // but without the special case for missing final component. func resolveExistingPathToFullyExplicit(path string) (string, error) { resolved, err := filepath.Abs(path) if err != nil { return "", err // Coverage: This can fail only if os.Getwd() fails. } resolved, err = filepath.EvalSymlinks(resolved) if err != nil { return "", err } return filepath.Clean(resolved), nil } image-4.0.1/directory/explicitfilepath/path_test.go000066400000000000000000000122751354546467100225100ustar00rootroot00000000000000package explicitfilepath import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" _ "github.com/containers/image/v4/internal/testing/explicitfilepath-tmpdir" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type pathResolvingTestCase struct { setup func(*testing.T, string) string expected string } var testCases = []pathResolvingTestCase{ { // A straightforward subdirectory hierarchy func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) require.NoError(t, err) return "dir1/dir2/dir3" }, "dir1/dir2/dir3", }, { // Missing component func(t *testing.T, top string) string { return "thisismissing/dir2" }, "", }, { // Symlink on the path func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) require.NoError(t, err) err = os.Symlink("dir1", filepath.Join(top, "link1")) require.NoError(t, err) return "link1/dir2" }, "dir1/dir2", }, { // Trailing symlink func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) require.NoError(t, err) err = os.Symlink("dir2", filepath.Join(top, "dir1/link2")) require.NoError(t, err) return "dir1/link2" }, "dir1/dir2", }, { // Symlink pointing nowhere, as a non-final component func(t *testing.T, top string) string { err := os.Symlink("thisismissing", filepath.Join(top, "link1")) require.NoError(t, err) return "link1/dir2" }, "", }, { // Trailing symlink pointing nowhere (but note that a missing non-symlink would be accepted) func(t *testing.T, top string) string { err := os.Symlink("thisismissing", filepath.Join(top, "link1")) require.NoError(t, err) return "link1" }, "", }, { // Relative components in a path func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) require.NoError(t, err) return "dir1/./dir2/../dir2/dir3" }, "dir1/dir2/dir3", }, { // Trailing relative components func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) require.NoError(t, err) return "dir1/dir2/.." }, "dir1", }, { // Relative components in symlink func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2"), 0755) require.NoError(t, err) err = os.Symlink("../dir1/dir2", filepath.Join(top, "dir1/link2")) require.NoError(t, err) return "dir1/link2" }, "dir1/dir2", }, { // Relative component pointing "into" a symlink func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "dir1/dir2/dir3"), 0755) require.NoError(t, err) err = os.Symlink("dir3", filepath.Join(top, "dir1/dir2/link3")) require.NoError(t, err) return "dir1/dir2/link3/../.." }, "dir1", }, { // Unreadable directory func(t *testing.T, top string) string { err := os.MkdirAll(filepath.Join(top, "unreadable/dir2"), 0755) require.NoError(t, err) err = os.Chmod(filepath.Join(top, "unreadable"), 000) require.NoError(t, err) return "unreadable/dir2" }, "", }, } func testPathsAreSameFile(t *testing.T, path1, path2, description string) { fi1, err := os.Stat(path1) require.NoError(t, err) fi2, err := os.Stat(path2) require.NoError(t, err) assert.True(t, os.SameFile(fi1, fi2), description) } func runPathResolvingTestCase(t *testing.T, f func(string) (string, error), c pathResolvingTestCase, suffix string) { topDir, err := ioutil.TempDir("", "pathResolving") defer func() { // Clean up after the "Unreadable directory" case; os.RemoveAll just fails. _ = os.Chmod(filepath.Join(topDir, "unreadable"), 0755) // Ignore errors, especially if this does not exist. os.RemoveAll(topDir) }() input := c.setup(t, topDir) + suffix // Do not call filepath.Join() on input, it calls filepath.Clean() internally! description := fmt.Sprintf("%s vs. %s%s", input, c.expected, suffix) fullOutput, err := ResolvePathToFullyExplicit(topDir + "/" + input) if c.expected == "" { assert.Error(t, err, description) } else { require.NoError(t, err, input) fullExpected := topDir + "/" + c.expected + suffix assert.Equal(t, fullExpected, fullOutput) // Either the two paths resolve to the same existing file, or to the same name in the same existing parent. if _, err := os.Lstat(fullExpected); err == nil { testPathsAreSameFile(t, fullOutput, fullExpected, description) } else { require.True(t, os.IsNotExist(err)) _, err := os.Stat(fullOutput) require.Error(t, err) require.True(t, os.IsNotExist(err)) parentExpected, fileExpected := filepath.Split(fullExpected) parentOutput, fileOutput := filepath.Split(fullOutput) assert.Equal(t, fileExpected, fileOutput) testPathsAreSameFile(t, parentOutput, parentExpected, description) } } } func TestResolvePathToFullyExplicit(t *testing.T) { for _, c := range testCases { runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "") runPathResolvingTestCase(t, ResolvePathToFullyExplicit, c, "/trailing") } } func TestResolveExistingPathToFullyExplicit(t *testing.T) { for _, c := range testCases { runPathResolvingTestCase(t, resolveExistingPathToFullyExplicit, c, "") } } image-4.0.1/doc.go000066400000000000000000000011501354546467100137060ustar00rootroot00000000000000// Package image provides libraries and commands to interact with containers images. // // package main // // import ( // "context" // "fmt" // // "github.com/containers/image/v4/docker" // ) // // func main() { // ref, err := docker.ParseReference("//fedora") // if err != nil { // panic(err) // } // ctx := context.Background() // img, err := ref.NewImage(ctx, nil) // if err != nil { // panic(err) // } // defer img.Close() // b, _, err := img.Manifest(ctx) // if err != nil { // panic(err) // } // fmt.Printf("%s", string(b)) // } // // // TODO(runcom) package image image-4.0.1/docker/000077500000000000000000000000001354546467100140645ustar00rootroot00000000000000image-4.0.1/docker/archive/000077500000000000000000000000001354546467100155055ustar00rootroot00000000000000image-4.0.1/docker/archive/dest.go000066400000000000000000000052351354546467100170000ustar00rootroot00000000000000package archive import ( "context" "io" "os" "github.com/containers/image/v4/docker/tarfile" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) type archiveImageDestination struct { *tarfile.Destination // Implements most of types.ImageDestination ref archiveReference writer io.Closer } func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { // ref.path can be either a pipe or a regular file // in the case of a pipe, we require that we can open it for write // in the case of a regular file, we don't want to overwrite any pre-existing file // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { return nil, errors.Wrapf(err, "error opening file %q", ref.path) } fhStat, err := fh.Stat() if err != nil { return nil, errors.Wrapf(err, "error statting file %q", ref.path) } if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { return nil, errors.New("docker-archive doesn't support modifying existing images") } tarDest := tarfile.NewDestination(fh, ref.destinationRef) if sys != nil && sys.DockerArchiveAdditionalTags != nil { tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags) } return &archiveImageDestination{ Destination: tarDest, ref: ref, writer: fh, }, nil } // DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression { return types.Decompress } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *archiveImageDestination) Reference() types.ImageReference { return d.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *archiveImageDestination) Close() error { return d.writer.Close() } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *archiveImageDestination) Commit(ctx context.Context) error { return d.Destination.Commit(ctx) } image-4.0.1/docker/archive/fixtures/000077500000000000000000000000001354546467100173565ustar00rootroot00000000000000image-4.0.1/docker/archive/fixtures/almostempty.tar000066400000000000000000000250001354546467100224410ustar00rootroot000000000000009d7f147c0d0c4d4538a04c7ef385809e56eb1aac7bf800fbe976612188025b68.json0100644000000000000000000000241613046414666020713 0ustar0000000000000000{"architecture":"amd64","config":{"Hostname":"2bc2ec52e2d6","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"2bc2ec52e2d6f32cf49a5772157759046f886ae19ba89022e808d51260be2423","container_config":{"Hostname":"2bc2ec52e2d6","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:54946f8cfc2693368a889f9f55fefa4a7102d0daf51354f09c6620a48d3c625c in /emptyfile"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2017-02-07T19:02:14.382332032Z","docker_version":"1.10.3","history":[{"created":"2017-02-07T19:02:14.382332032Z","created_by":"/bin/sh -c #(nop) ADD file:54946f8cfc2693368a889f9f55fefa4a7102d0daf51354f09c6620a48d3c625c in /emptyfile"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:0b916d257bd406111a3fced53f81b47de9a30f7c7d514a89769b3483aaddca7e"]}}c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f/0040755000000000000000000000000013046414666017756 5ustar0000000000000000c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f/VERSION0100644000000000000000000000000313046414666021014 0ustar00000000000000001.0c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f/json0100644000000000000000000000206213046414666020647 0ustar0000000000000000{"id":"c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f","created":"2017-02-07T19:02:14.382332032Z","container":"2bc2ec52e2d6f32cf49a5772157759046f886ae19ba89022e808d51260be2423","container_config":{"Hostname":"2bc2ec52e2d6","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:54946f8cfc2693368a889f9f55fefa4a7102d0daf51354f09c6620a48d3c625c in /emptyfile"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"docker_version":"1.10.3","config":{"Hostname":"2bc2ec52e2d6","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux"}c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f/layer.tar0100644000000000000000000000300013046414666021570 0ustar0000000000000000emptyfile0100664000000000000000000000000013046414530011156 0ustar0000000000000000manifest.json0100644000000000000000000000031600000000000011707 0ustar0000000000000000[{"Config":"9d7f147c0d0c4d4538a04c7ef385809e56eb1aac7bf800fbe976612188025b68.json","RepoTags":["emptyimage:latest"],"Layers":["c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f/layer.tar"]}] repositories0100644000000000000000000000013500000000000011657 0ustar0000000000000000{"emptyimage":{"latest":"c7b98db321d22702b8dd264fa7d58936951867854969a873d3dd20520eadca8f"}} image-4.0.1/docker/archive/src.go000066400000000000000000000025331354546467100166260ustar00rootroot00000000000000package archive import ( "context" "github.com/containers/image/v4/docker/tarfile" "github.com/containers/image/v4/types" "github.com/sirupsen/logrus" ) type archiveImageSource struct { *tarfile.Source // Implements most of types.ImageSource ref archiveReference } // newImageSource returns a types.ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) { if ref.destinationRef != nil { logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") } src, err := tarfile.NewSourceFromFile(ref.path) if err != nil { return nil, err } return &archiveImageSource{ Source: src, ref: ref, }, nil } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (s *archiveImageSource) Reference() types.ImageReference { return s.ref } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (s *archiveImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } image-4.0.1/docker/archive/transport.go000066400000000000000000000156651354546467100201050ustar00rootroot00000000000000package archive import ( "context" "fmt" "strings" "github.com/containers/image/v4/docker/reference" ctrImage "github.com/containers/image/v4/image" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } // Transport is an ImageTransport for local Docker archives. var Transport = archiveTransport{} type archiveTransport struct{} func (t archiveTransport) Name() string { return "docker-archive" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { return ParseReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { // See the explanation in archiveReference.PolicyConfigurationIdentity. return errors.New(`docker-archive: does not support any scopes except the default "" one`) } // archiveReference is an ImageReference for Docker images. type archiveReference struct { // only used for destinations // archiveReference.destinationRef is optional and can be nil for destinations as well. destinationRef reference.NamedTagged path string } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func ParseReference(refString string) (types.ImageReference, error) { if refString == "" { return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) } parts := strings.SplitN(refString, ":", 2) path := parts[0] var destinationRef reference.NamedTagged // A :tag was specified, which is only necessary for destinations. if len(parts) == 2 { ref, err := reference.ParseNormalizedNamed(parts[1]) if err != nil { return nil, errors.Wrapf(err, "docker-archive parsing reference") } ref = reference.TagNameOnly(ref) if _, isDigest := ref.(reference.Canonical); isDigest { return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) } refTagged, isTagged := ref.(reference.NamedTagged) if !isTagged { // Really shouldn't be hit... return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) } destinationRef = refTagged } return archiveReference{ destinationRef: destinationRef, path: path, }, nil } func (ref archiveReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref archiveReference) StringWithinTransport() string { if ref.destinationRef == nil { return ref.path } return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref archiveReference) DockerReference() reference.Named { return ref.destinationRef } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref archiveReference) PolicyConfigurationIdentity() string { // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. return "" } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref archiveReference) PolicyConfigurationNamespaces() []string { // TODO return []string{} } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } return ctrImage.FromSource(ctx, sys, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(ctx, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref archiveReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, ref) } // DeleteImage deletes the named image from the registry, if supported. func (ref archiveReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { // Not really supported, for safety reasons. return errors.New("Deleting images not implemented for docker-archive: images") } image-4.0.1/docker/archive/transport_test.go000066400000000000000000000156421354546467100211370ustar00rootroot00000000000000package archive import ( "context" "fmt" "io/ioutil" "os" "path/filepath" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256digest = "@sha256:" + sha256digestHex tarFixture = "fixtures/almostempty.tar" ) func TestTransportName(t *testing.T) { assert.Equal(t, "docker-archive", Transport.Name()) } func TestTransportParseReference(t *testing.T) { testParseReference(t, Transport.ParseReference) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ // A semi-representative assortment of values; everything is rejected. "docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io", "", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } func TestParseReference(t *testing.T) { testParseReference(t, ParseReference) } // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { for _, c := range []struct{ input, expectedPath, expectedRef string }{ {"", "", ""}, // Empty input is explicitly rejected {"/path", "/path", ""}, {"/path:busybox:notlatest", "/path", "docker.io/library/busybox:notlatest"}, // Explicit tag {"/path:busybox" + sha256digest, "", ""}, // Digest references are forbidden {"/path:busybox", "/path", "docker.io/library/busybox:latest"}, // Default tag // A github.com/distribution/reference value can have a tag and a digest at the same time! {"/path:busybox:latest" + sha256digest, "", ""}, // Both tag and digest is rejected {"/path:docker.io/library/busybox:latest", "/path", "docker.io/library/busybox:latest"}, // All implied values explicitly specified {"/path:UPPERCASEISINVALID", "", ""}, // Invalid input } { ref, err := fn(c.input) if c.expectedPath == "" { assert.Error(t, err, c.input) } else { require.NoError(t, err, c.input) archiveRef, ok := ref.(archiveReference) require.True(t, ok, c.input) assert.Equal(t, c.expectedPath, archiveRef.path, c.input) if c.expectedRef == "" { assert.Nil(t, archiveRef.destinationRef, c.input) } else { require.NotNil(t, archiveRef.destinationRef, c.input) assert.Equal(t, c.expectedRef, archiveRef.destinationRef.String(), c.input) } } } } // refWithTagAndDigest is a reference.NamedTagged and reference.Canonical at the same time. type refWithTagAndDigest struct{ reference.Canonical } func (ref refWithTagAndDigest) Tag() string { return "notLatest" } // A common list of reference formats to test for the various ImageReference methods. var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ {"/pathonly", "", "/pathonly"}, {"/path:busybox:notlatest", "docker.io/library/busybox:notlatest", "/path:docker.io/library/busybox:notlatest"}, // Explicit tag {"/path:docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "/path:docker.io/library/busybox:latest"}, // All implied values explicitly specified {"/path:example.com/ns/foo:bar", "example.com/ns/foo:bar", "/path:example.com/ns/foo:bar"}, // All values explicitly specified } func TestReferenceTransport(t *testing.T) { ref, err := ParseReference("/tmp/archive.tar") require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) stringRef := ref.StringWithinTransport() assert.Equal(t, c.stringWithinTransport, stringRef, c.input) // Do one more round to verify that the output can be parsed, to an equal value. ref2, err := Transport.ParseReference(stringRef) require.NoError(t, err, c.input) stringRef2 := ref2.StringWithinTransport() assert.Equal(t, stringRef, stringRef2, c.input) } } func TestReferenceDockerReference(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) dockerRef := ref.DockerReference() if c.dockerRef != "" { require.NotNil(t, dockerRef, c.input) assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) } else { require.Nil(t, dockerRef, c.input) } } } func TestReferencePolicyConfigurationIdentity(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) assert.Equal(t, "", ref.PolicyConfigurationIdentity(), c.input) } } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) assert.Empty(t, "", ref.PolicyConfigurationNamespaces(), c.input) } } func TestReferenceNewImage(t *testing.T) { for _, suffix := range []string{"", ":thisisignoredbutaccepted"} { ref, err := ParseReference(tarFixture + suffix) require.NoError(t, err, suffix) img, err := ref.NewImage(context.Background(), nil) assert.NoError(t, err, suffix) defer img.Close() } } func TestReferenceNewImageSource(t *testing.T) { for _, suffix := range []string{"", ":thisisignoredbutaccepted"} { ref, err := ParseReference(tarFixture + suffix) require.NoError(t, err, suffix) src, err := ref.NewImageSource(context.Background(), nil) assert.NoError(t, err, suffix) defer src.Close() } } func TestReferenceNewImageDestination(t *testing.T) { tmpDir, err := ioutil.TempDir("", "docker-archive-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) ref, err := ParseReference(filepath.Join(tmpDir, "no-reference")) require.NoError(t, err) dest, err := ref.NewImageDestination(context.Background(), nil) assert.NoError(t, err) dest.Close() ref, err = ParseReference(filepath.Join(tmpDir, "with-reference") + "busybox:latest") require.NoError(t, err) dest, err = ref.NewImageDestination(context.Background(), nil) assert.NoError(t, err) defer dest.Close() } func TestReferenceDeleteImage(t *testing.T) { tmpDir, err := ioutil.TempDir("", "docker-archive-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) for i, suffix := range []string{"", ":thisisignoredbutaccepted"} { testFile := filepath.Join(tmpDir, fmt.Sprintf("file%d.tar", i)) err := ioutil.WriteFile(testFile, []byte("nonempty"), 0644) require.NoError(t, err, suffix) ref, err := ParseReference(testFile + suffix) require.NoError(t, err, suffix) err = ref.DeleteImage(context.Background(), nil) assert.Error(t, err, suffix) _, err = os.Lstat(testFile) assert.NoError(t, err, suffix) } } image-4.0.1/docker/cache.go000066400000000000000000000016001354546467100154530ustar00rootroot00000000000000package docker import ( "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" ) // bicTransportScope returns a BICTransportScope appropriate for ref. func bicTransportScope(ref dockerReference) types.BICTransportScope { // Blobs can be reused across the whole registry. return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} } // newBICLocationReference returns a BICLocationReference appropriate for ref. func newBICLocationReference(ref dockerReference) types.BICLocationReference { // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). return types.BICLocationReference{Opaque: ref.ref.Name()} } // parseBICLocationReference returns a repository for encoded lr. func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { return reference.ParseNormalizedNamed(lr.Opaque) } image-4.0.1/docker/daemon/000077500000000000000000000000001354546467100153275ustar00rootroot00000000000000image-4.0.1/docker/daemon/client.go000066400000000000000000000046371354546467100171460ustar00rootroot00000000000000package daemon import ( "net/http" "path/filepath" "github.com/containers/image/v4/types" dockerclient "github.com/docker/docker/client" "github.com/docker/go-connections/tlsconfig" ) const ( // The default API version to be used in case none is explicitly specified defaultAPIVersion = "1.22" ) // NewDockerClient initializes a new API client based on the passed SystemContext. func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { host := dockerclient.DefaultDockerHost if sys != nil && sys.DockerDaemonHost != "" { host = sys.DockerDaemonHost } // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. // // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. // // Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set // TLSClientConfig to nil. This can be achieved by using the form `http://` url, err := dockerclient.ParseHostURL(host) if err != nil { return nil, err } var httpClient *http.Client if url.Scheme != "unix" { if url.Scheme == "http" { httpClient = httpConfig() } else { hc, err := tlsConfig(sys) if err != nil { return nil, err } httpClient = hc } } return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) } func tlsConfig(sys *types.SystemContext) (*http.Client, error) { options := tlsconfig.Options{} if sys != nil && sys.DockerDaemonInsecureSkipTLSVerify { options.InsecureSkipVerify = true } if sys != nil && sys.DockerDaemonCertPath != "" { options.CAFile = filepath.Join(sys.DockerDaemonCertPath, "ca.pem") options.CertFile = filepath.Join(sys.DockerDaemonCertPath, "cert.pem") options.KeyFile = filepath.Join(sys.DockerDaemonCertPath, "key.pem") } tlsc, err := tlsconfig.Client(options) if err != nil { return nil, err } return &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsc, }, CheckRedirect: dockerclient.CheckRedirect, }, nil } func httpConfig() *http.Client { return &http.Client{ Transport: &http.Transport{ TLSClientConfig: nil, }, CheckRedirect: dockerclient.CheckRedirect, } } image-4.0.1/docker/daemon/client_test.go000066400000000000000000000061771354546467100202060ustar00rootroot00000000000000package daemon import "testing" import ( "github.com/containers/image/v4/types" dockerclient "github.com/docker/docker/client" "github.com/stretchr/testify/assert" "net/http" "os" "path/filepath" ) func TestDockerClientFromNilSystemContext(t *testing.T) { client, err := newDockerClient(nil) assert.Nil(t, err, "There should be no error creating the Docker client") assert.NotNil(t, client, "A Docker client reference should have been returned") assert.Equal(t, dockerclient.DefaultDockerHost, client.DaemonHost(), "The default docker host should have been used") assert.Equal(t, defaultAPIVersion, client.ClientVersion(), "The default api version should have been used") } func TestDockerClientFromCertContext(t *testing.T) { testDir := testDir(t) host := "tcp://127.0.0.1:2376" systemCtx := &types.SystemContext{ DockerDaemonCertPath: filepath.Join(testDir, "testdata", "certs"), DockerDaemonHost: host, DockerDaemonInsecureSkipTLSVerify: true, } client, err := newDockerClient(systemCtx) assert.Nil(t, err, "There should be no error creating the Docker client") assert.NotNil(t, client, "A Docker client reference should have been returned") assert.Equal(t, host, client.DaemonHost()) assert.Equal(t, "1.22", client.ClientVersion()) } func TestTlsConfigFromInvalidCertPath(t *testing.T) { ctx := &types.SystemContext{ DockerDaemonCertPath: "/foo/bar", } _, err := tlsConfig(ctx) if assert.Error(t, err, "An error was expected") { assert.Regexp(t, "could not read CA certificate", err.Error()) } } func TestTlsConfigFromCertPath(t *testing.T) { testDir := testDir(t) ctx := &types.SystemContext{ DockerDaemonCertPath: filepath.Join(testDir, "testdata", "certs"), DockerDaemonInsecureSkipTLSVerify: true, } httpClient, err := tlsConfig(ctx) assert.NoError(t, err, "There should be no error creating the HTTP client") tlsConfig := httpClient.Transport.(*http.Transport).TLSClientConfig assert.True(t, tlsConfig.InsecureSkipVerify, "TLS verification should be skipped") assert.Len(t, tlsConfig.Certificates, 1, "There should be one certificate") } func TestSkipTLSVerifyOnly(t *testing.T) { //testDir := testDir(t) ctx := &types.SystemContext{ DockerDaemonInsecureSkipTLSVerify: true, } httpClient, err := tlsConfig(ctx) assert.NoError(t, err, "There should be no error creating the HTTP client") tlsConfig := httpClient.Transport.(*http.Transport).TLSClientConfig assert.True(t, tlsConfig.InsecureSkipVerify, "TLS verification should be skipped") assert.Len(t, tlsConfig.Certificates, 0, "There should be no certificate") } func TestSpecifyPlainHTTPViaHostScheme(t *testing.T) { host := "http://127.0.0.1:2376" ctx := &types.SystemContext{ DockerDaemonHost: host, } client, err := newDockerClient(ctx) assert.Nil(t, err, "There should be no error creating the Docker client") assert.NotNil(t, client, "A Docker client reference should have been returned") assert.Equal(t, host, client.DaemonHost()) } func testDir(t *testing.T) string { testDir, err := os.Getwd() if err != nil { t.Fatal("Unable to determine the current test directory") } return testDir } image-4.0.1/docker/daemon/daemon_dest.go000066400000000000000000000127421354546467100201460ustar00rootroot00000000000000package daemon import ( "context" "io" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/docker/tarfile" "github.com/containers/image/v4/types" "github.com/docker/docker/client" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type daemonImageDestination struct { ref daemonReference mustMatchRuntimeOS bool *tarfile.Destination // Implements most of types.ImageDestination // For talking to imageLoadGoroutine goroutineCancel context.CancelFunc statusChannel <-chan error writer *io.PipeWriter // Other state committed bool // writer has been closed } // newImageDestination returns a types.ImageDestination for the specified image reference. func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { if ref.ref == nil { return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } namedTaggedRef, ok := ref.ref.(reference.NamedTagged) if !ok { return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } var mustMatchRuntimeOS = true if sys != nil && sys.DockerDaemonHost != client.DefaultDockerHost { mustMatchRuntimeOS = false } c, err := newDockerClient(sys) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } reader, writer := io.Pipe() // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. statusChannel := make(chan error, 1) goroutineContext, goroutineCancel := context.WithCancel(ctx) go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) return &daemonImageDestination{ ref: ref, mustMatchRuntimeOS: mustMatchRuntimeOS, Destination: tarfile.NewDestination(writer, namedTaggedRef), goroutineCancel: goroutineCancel, statusChannel: statusChannel, writer: writer, committed: false, }, nil } // imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") defer func() { logrus.Debugf("docker-daemon: sending done, status %v", err) statusChannel <- err }() defer func() { if err == nil { reader.Close() } else { reader.CloseWithError(err) } }() resp, err := c.ImageLoad(ctx, reader, true) if err != nil { err = errors.Wrap(err, "Error saving image to docker engine") return } defer resp.Body.Close() } // DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression { return types.PreserveOriginal } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *daemonImageDestination) MustMatchRuntimeOS() bool { return d.mustMatchRuntimeOS } // Close removes resources associated with an initialized ImageDestination, if any. func (d *daemonImageDestination) Close() error { if !d.committed { logrus.Debugf("docker-daemon: Closing tar stream to abort loading") // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the // net/http version with native Context support in Go 1.7) do not always actually immediately cancel // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and // return early if the context is canceled without terminating the goroutine at all. // So we need this CloseWithError to terminate sending the HTTP request Body // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. // Whether that works or not, closing the PipeWriter seems desirable in any case. d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) } d.goroutineCancel() return nil } func (d *daemonImageDestination) Reference() types.ImageReference { return d.ref } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *daemonImageDestination) Commit(ctx context.Context) error { logrus.Debugf("docker-daemon: Closing tar stream") if err := d.Destination.Commit(ctx); err != nil { return err } if err := d.writer.Close(); err != nil { return err } d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. logrus.Debugf("docker-daemon: Waiting for status") select { case <-ctx.Done(): return ctx.Err() case err := <-d.statusChannel: return err } } image-4.0.1/docker/daemon/daemon_src.go000066400000000000000000000043631354546467100177760ustar00rootroot00000000000000package daemon import ( "context" "github.com/containers/image/v4/docker/tarfile" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) type daemonImageSource struct { ref daemonReference *tarfile.Source // Implements most of types.ImageSource } type layerInfo struct { path string size int64 } // newImageSource returns a types.ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. // // It would be great if we were able to stream the input tar as it is being // sent; but Docker sends the top-level manifest, which determines which paths // to look for, at the end, so in we will need to seek back and re-read, several times. // (We could, perhaps, expect an exact sequence, assume that the first plaintext file // is the config, and that the following len(RootFS) files are the layers, but that feels // way too brittle.) func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (types.ImageSource, error) { c, err := newDockerClient(sys) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. // Either way ImageSave should create a tarball with exactly one image. inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()}) if err != nil { return nil, errors.Wrap(err, "Error loading image from docker engine") } defer inputStream.Close() src, err := tarfile.NewSourceFromStream(inputStream) if err != nil { return nil, err } return &daemonImageSource{ ref: ref, Source: src, }, nil } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (s *daemonImageSource) Reference() types.ImageReference { return s.ref } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (s *daemonImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } image-4.0.1/docker/daemon/daemon_transport.go000066400000000000000000000266071354546467100212500ustar00rootroot00000000000000package daemon import ( "context" "fmt" "github.com/containers/image/v4/docker/policyconfiguration" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } // Transport is an ImageTransport for images managed by a local Docker daemon. var Transport = daemonTransport{} type daemonTransport struct{} // Name returns the name of the transport, which must be unique among other transports. func (t daemonTransport) Name() string { return "docker-daemon" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { return ParseReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { // ID values cannot be effectively namespaced, and are clearly invalid host:port values. if _, err := digest.Parse(scope); err == nil { return errors.Errorf(`docker-daemon: can not use algo:digest value %s as a namespace`, scope) } // FIXME? We could be verifying the various character set and length restrictions // from docker/distribution/reference.regexp.go, but other than that there // are few semantically invalid strings. return nil } // daemonReference is an ImageReference for images managed by a local Docker daemon // Exactly one of id and ref can be set. // For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) // For daemonImageDestination, it must be a ref, which is NamedTagged. // (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. // Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) type daemonReference struct { id digest.Digest ref reference.Named // !reference.IsNameOnly } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func ParseReference(refString string) (types.ImageReference, error) { // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). // reference.ParseAnyReference interprets such strings as digests. if dgst, err := digest.Parse(refString); err == nil { // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. // Other digest references are ambiguous, so refuse them. if dgst.Algorithm() != digest.Canonical { return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) } return NewReference(dgst, nil) } ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values if err != nil { return nil, err } if reference.FamiliarName(ref) == digest.Canonical.String() { return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) } return NewReference("", ref) } // NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { if id != "" && ref != nil { return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") } if ref != nil { if reference.IsNameOnly(ref) { return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. // This MAY be accepted in the future. // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop // the tag or the digest first?) _, isTagged := ref.(reference.NamedTagged) _, isDigested := ref.(reference.Canonical) if isTagged && isDigested { return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") } } return daemonReference{ id: id, ref: ref, }, nil } func (ref daemonReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). func (ref daemonReference) StringWithinTransport() string { switch { case ref.id != "": return ref.id.String() case ref.ref != nil: return reference.FamiliarString(ref.ref) default: // Coverage: Should never happen, NewReference above should refuse such values. panic("Internal inconsistency: daemonReference has empty id and nil ref") } } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref daemonReference) DockerReference() reference.Named { return ref.ref // May be nil } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref daemonReference) PolicyConfigurationIdentity() string { // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. // But the existence of image IDs means that we can’t truly well namespace the input: // a single image can be namespaced either using the name or the ID depending on how it is named. // // That’s fairly unexpected, but we have to cope somehow. // // So, use the ordinary docker/policyconfiguration namespacing for named images. // image IDs all fall into the root namespace. // Users can set up the root namespace to be either untrusted or rejected, // and to set up specific trust for named namespaces. This allows verifying image // identity when a name is known, and unnamed images would be untrusted or rejected. switch { case ref.id != "": return "" // This still allows using the default "" scope to define a global policy for ID-identified images. case ref.ref != nil: res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) } return res default: // Coverage: Should never happen, NewReference above should refuse such values. panic("Internal inconsistency: daemonReference has empty id and nil ref") } } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref daemonReference) PolicyConfigurationNamespaces() []string { // See the explanation in daemonReference.PolicyConfigurationIdentity. switch { case ref.id != "": return []string{} case ref.ref != nil: return policyconfiguration.DockerReferenceNamespaces(ref.ref) default: // Coverage: Should never happen, NewReference above should refuse such values. panic("Internal inconsistency: daemonReference has empty id and nil ref") } } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref daemonReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, sys, ref) if err != nil { return nil, err } return image.FromSource(ctx, sys, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref daemonReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(ctx, sys, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref daemonReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(ctx, sys, ref) } // DeleteImage deletes the named image from the registry, if supported. func (ref daemonReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { // Should this just untag the image? Should this stop running containers? // The semantics is not quite as clear as for remote repositories. // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. return errors.Errorf("Deleting images not implemented for docker-daemon: images") } image-4.0.1/docker/daemon/daemon_transport_test.go000066400000000000000000000220361354546467100222770ustar00rootroot00000000000000package daemon import ( "context" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256digest = "sha256:" + sha256digestHex ) func TestTransportName(t *testing.T) { assert.Equal(t, "docker-daemon", Transport.Name()) } func TestTransportParseReference(t *testing.T) { testParseReference(t, Transport.ParseReference) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { // docker/policyconfiguation-accepted identities and scopes are accepted for _, scope := range []string{ "registry.example.com/ns/stream" + sha256digest, "registry.example.com/ns/stream:notlatest", "registry.example.com/ns/stream", "registry.example.com/ns", "registry.example.com", sha256digestHex, // Accept also unqualified hexdigest valies, they are in principle possible host names. } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } // Hexadecimal IDs are rejected. algo:hexdigest is clearly an invalid host:port value. err := Transport.ValidatePolicyConfigurationScope(sha256digest) assert.Error(t, err) } func TestParseReference(t *testing.T) { testParseReference(t, ParseReference) } // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { for _, c := range []struct{ input, expectedID, expectedRef string }{ {sha256digest, sha256digest, ""}, // Valid digest format {"sha512:" + sha256digestHex + sha256digestHex, "", ""}, // Non-digest.Canonical digest {"sha256:ab", "", ""}, // Invalid digest value (too short) {sha256digest + "ab", "", ""}, // Invalid digest value (too long) {"sha256:XX23456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", "", ""}, // Invalid digest value {"UPPERCASEISINVALID", "", ""}, // Invalid reference input {"busybox", "", ""}, // Missing tag or digest {"busybox:latest", "", "docker.io/library/busybox:latest"}, // Explicit tag {"busybox@" + sha256digest, "", "docker.io/library/busybox@" + sha256digest}, // Explicit digest // A github.com/distribution/reference value can have a tag and a digest at the same time! // Most versions of docker/reference do not handle that (ignoring the tag), so we reject such input. {"busybox:latest@" + sha256digest, "", ""}, // Both tag and digest {"docker.io/library/busybox:latest", "", "docker.io/library/busybox:latest"}, // All implied values explicitly specified } { ref, err := fn(c.input) if c.expectedID == "" && c.expectedRef == "" { assert.Error(t, err, c.input) } else { require.NoError(t, err, c.input) daemonRef, ok := ref.(daemonReference) require.True(t, ok, c.input) // If we don't reject the input, the interpretation must be consistent with reference.ParseAnyReference dockerRef, err := reference.ParseAnyReference(c.input) require.NoError(t, err, c.input) if c.expectedRef == "" { assert.Equal(t, c.expectedID, daemonRef.id.String(), c.input) assert.Nil(t, daemonRef.ref, c.input) _, ok := dockerRef.(reference.Digested) require.True(t, ok, c.input) assert.Equal(t, c.expectedID, dockerRef.String(), c.input) } else { assert.Equal(t, "", daemonRef.id.String(), c.input) require.NotNil(t, daemonRef.ref, c.input) assert.Equal(t, c.expectedRef, daemonRef.ref.String(), c.input) _, ok := dockerRef.(reference.Named) require.True(t, ok, c.input) assert.Equal(t, c.expectedRef, dockerRef.String(), c.input) } } } } // A common list of reference formats to test for the various ImageReference methods. // (For IDs it is much simpler, we simply use them unmodified) var validNamedReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ {"busybox:notlatest", "docker.io/library/busybox:notlatest", "busybox:notlatest"}, // Explicit tag {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "busybox" + sha256digest}, // Explicit digest {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "busybox:latest"}, // All implied values explicitly specified {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "example.com/ns/foo:bar"}, // All values explicitly specified } func TestNewReference(t *testing.T) { // An ID reference. id, err := digest.Parse(sha256digest) require.NoError(t, err) ref, err := NewReference(id, nil) require.NoError(t, err) daemonRef, ok := ref.(daemonReference) require.True(t, ok) assert.Equal(t, id, daemonRef.id) assert.Nil(t, daemonRef.ref) // Named references for _, c := range validNamedReferenceTestCases { parsed, err := reference.ParseNormalizedNamed(c.input) require.NoError(t, err) ref, err := NewReference("", parsed) require.NoError(t, err, c.input) daemonRef, ok := ref.(daemonReference) require.True(t, ok, c.input) assert.Equal(t, "", daemonRef.id.String()) require.NotNil(t, daemonRef.ref) assert.Equal(t, c.dockerRef, daemonRef.ref.String(), c.input) } // Both an ID and a named reference provided parsed, err := reference.ParseNormalizedNamed("busybox:latest") require.NoError(t, err) _, err = NewReference(id, parsed) assert.Error(t, err) // A reference with neither a tag nor digest parsed, err = reference.ParseNormalizedNamed("busybox") require.NoError(t, err) _, err = NewReference("", parsed) assert.Error(t, err) // A github.com/distribution/reference value can have a tag and a digest at the same time! parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@" + sha256digest) require.NoError(t, err) _, ok = parsed.(reference.Canonical) require.True(t, ok) _, ok = parsed.(reference.NamedTagged) require.True(t, ok) _, err = NewReference("", parsed) assert.Error(t, err) } func TestReferenceTransport(t *testing.T) { ref, err := ParseReference(sha256digest) require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) ref, err = ParseReference("busybox:latest") require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { ref, err := ParseReference(sha256digest) require.NoError(t, err) assert.Equal(t, sha256digest, ref.StringWithinTransport()) for _, c := range validNamedReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) stringRef := ref.StringWithinTransport() assert.Equal(t, c.stringWithinTransport, stringRef, c.input) // Do one more round to verify that the output can be parsed, to an equal value. ref2, err := Transport.ParseReference(stringRef) require.NoError(t, err, c.input) stringRef2 := ref2.StringWithinTransport() assert.Equal(t, stringRef, stringRef2, c.input) } } func TestReferenceDockerReference(t *testing.T) { ref, err := ParseReference(sha256digest) require.NoError(t, err) assert.Nil(t, ref.DockerReference()) for _, c := range validNamedReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) dockerRef := ref.DockerReference() require.NotNil(t, dockerRef, c.input) assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) } } func TestReferencePolicyConfigurationIdentity(t *testing.T) { // id-only references have no identity. ref, err := ParseReference(sha256digest) require.NoError(t, err) assert.Equal(t, "", ref.PolicyConfigurationIdentity()) // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err = ParseReference("busybox:notlatest") require.NoError(t, err) assert.Equal(t, "docker.io/library/busybox:notlatest", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { // id-only references have no identity. ref, err := ParseReference(sha256digest) require.NoError(t, err) assert.Empty(t, ref.PolicyConfigurationNamespaces()) // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err = ParseReference("busybox:notlatest") assert.Equal(t, []string{ "docker.io/library/busybox", "docker.io/library", "docker.io", }, ref.PolicyConfigurationNamespaces()) } // daemonReference.NewImage, daemonReference.NewImageSource, openshiftReference.NewImageDestination // untested because just creating the objects immediately connects to the daemon. func TestReferenceDeleteImage(t *testing.T) { ref, err := ParseReference(sha256digest) require.NoError(t, err) err = ref.DeleteImage(context.Background(), nil) assert.Error(t, err) for _, c := range validNamedReferenceTestCases { ref, err := ParseReference(c.input) require.NoError(t, err, c.input) err = ref.DeleteImage(context.Background(), nil) assert.Error(t, err, c.input) } } image-4.0.1/docker/daemon/testdata/000077500000000000000000000000001354546467100171405ustar00rootroot00000000000000image-4.0.1/docker/daemon/testdata/certs/000077500000000000000000000000001354546467100202605ustar00rootroot00000000000000image-4.0.1/docker/daemon/testdata/certs/ca.pem000066400000000000000000000020121354546467100213410ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICzjCCAbagAwIBAgIRAIGgYBNZse0EqRVzxe7aQGIwDQYJKoZIhvcNAQELBQAw EDEOMAwGA1UEChMFaGFyZHkwHhcNMTcxMDA0MDgzNDAwWhcNMjAwOTE4MDgzNDAw WjAQMQ4wDAYDVQQKEwVoYXJkeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC ggEBAMlrdtoXWlZMPFwgeKZHrGxjVe4KXkQy5MFBUfO48htyIe2OlZAd3HGyap41 7L4YciFhw0bp7wHnYtSTiCHQrnA4SLzNuaU2NM5nJw+E4c5kNrkvhLJqpTNCaYCy Xbh3H8REW+5UJIgnyeKLx//kvlDm6p4O55+OLlGgzxNaTIgldKLPmx543VVt6VDT qgFlaYsRz8hZ12+qAqu5am/Wpfal2+Df7Pmmn5M90UBTUwY8CLc/ZiWbv6hihDWV I28JoM0onEqAx7phRd0SwwK4mYfEe/u614r3bZaI36e9ojU9/St4nbMoMeyZP96t DOdX9A1SMbsqLOYKXBKM+jXPEaECAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKsMA8G A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBALah7CjpwbEY6yjA2KDv VaAHEgz4Xd8USW/L2292EiQLmFdIaEJiiWRjtKFiF427TXfAPXvxHA2q9OElDW4d G6XAcBJg5mcBh8WRTHwfLQ8llfj7dH1/sfazSUZeat6lTIyhQfkF99LAJTqlfYAF aNqIQio7FAjGyJqIPYLa1FKmfLdZr9azb9IjTZLhBGBWdLF0+JOn+JBsl7g9BvUp ArCI0Wib/vsr368xkzWzKjij1exZdfw0TmsieNYvViFoFJGNCB5XLPo0bHrmMVVe 25EGam+xPkG/JQP5Eb3iikSEn8y5SIeJ0nS0EQE6uXPv+lQj1LmVv8OYzjXqpoJT n6g= -----END CERTIFICATE----- image-4.0.1/docker/daemon/testdata/certs/cert.pem000066400000000000000000000020621354546467100217200ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIC6zCCAdOgAwIBAgIQEh1UsPL20u9KnyOByuhYWDANBgkqhkiG9w0BAQsFADAQ MQ4wDAYDVQQKEwVoYXJkeTAeFw0xNzEwMDQwODM0MDBaFw0yMDA5MTgwODM0MDBa MBwxGjAYBgNVBAoMEWhhcmR5Ljxib290c3RyYXA+MIIBIjANBgkqhkiG9w0BAQEF AAOCAQ8AMIIBCgKCAQEAyJm29vB/urzreEwF012iAAWW3fgE1VEeNLTP/sZTYV3z UNGKao5x7dUIiah8rptkK3+FN4TID8Z2c1DpKzMTisdpRF3UoRWmjm1UTbxEENhk EptkFwGFM6BcZSyiLlyCBVM+wGsqzHAASe833S/yiu8miNc2S+jd0FIluKWe0yzG u2oaJfA28dBfqWyn9hh6msqBVYK6sDle9t0ditNubCyD+vrnoK8825LOIPV6QafL kVyW0/mj4GJutPOVop37HyQMcuQnDWBA+934l3tpeaJ93d3u8XjU7dXuOobKMohw +33/pTALu9P0WtDbEeo/xcEICgimqpir92KMSXxUbwIDAQABozUwMzAOBgNVHQ8B Af8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADANBgkq hkiG9w0BAQsFAAOCAQEAnYffv9ipGQVW/t3sFxKu9LXQ7ZkhUSgoxPIA51goaYop YM9QR3ZBf2tMJwjKXuOLEkxxweNjP3dMKh2gykFory+jv6OQYIiLf9M82ty8rOPi mWLMDAIWWagkj5Yy6b+/aLkpXQ+lEsxLyi6po+D+I+AwRUYvfSc74a7XxkJk77JF /0SVgNdDtL08zVNOGDgepP/95e1pKMKgsOiCDnFCOAY+l6HcvizwBH+EI+XtdLVb qBmOAYiwYObBaRuyhVbbDKqKRGFUNkmmDv6vCQoTL1C9wrBnAiJe2khbLm1ix9Re 3MW15CLuipneSgRAWXSdMbDIv9+KQE8fo2TWqikrCw== -----END CERTIFICATE----- image-4.0.1/docker/daemon/testdata/certs/key.pem000066400000000000000000000032171354546467100215560ustar00rootroot00000000000000-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAyJm29vB/urzreEwF012iAAWW3fgE1VEeNLTP/sZTYV3zUNGK ao5x7dUIiah8rptkK3+FN4TID8Z2c1DpKzMTisdpRF3UoRWmjm1UTbxEENhkEptk FwGFM6BcZSyiLlyCBVM+wGsqzHAASe833S/yiu8miNc2S+jd0FIluKWe0yzGu2oa JfA28dBfqWyn9hh6msqBVYK6sDle9t0ditNubCyD+vrnoK8825LOIPV6QafLkVyW 0/mj4GJutPOVop37HyQMcuQnDWBA+934l3tpeaJ93d3u8XjU7dXuOobKMohw+33/ pTALu9P0WtDbEeo/xcEICgimqpir92KMSXxUbwIDAQABAoIBAQCyuKjXR5w1Ll4I FotWLmTH6jLo3jDIMPZddP6e+emNpRvD1HyixPhiMdvicXdsRUuwqXNx7F4mF+au hNbIwz/U9CcoXwSy48w5ttRWUba+31wBa+p3yMX5IhVPmr1/2rGItwsAejpuXBcV yAiYi0BnYfyODFf2t6jwElBDO2POtdEoYVYwgtMTMy5pmDA2QA3mKkjCcJviectZ 9yFb8DFiwIYkryErWrGWaKls/oYV2O0A0mCaIqgw3HfhIl6F1pk+9oYnmsq6IzF5 wSIg2evd4GMm/L2sqlVFqb4Kj54fbyfdOFK0bQso6VQZvB5tZ6NLHfv2f3BBFHVu jO+On/ixAoGBAOJkPHavnAb/lLDnMJjjXYNUQqyxxSlwOwNifG4evf/KAezIIalJ kC7jZoFsUkARVbRKQag0T2Xvxw/dDqmNastR1NxsBkhOWjYiQbALYP3u2f06Nhf8 YlX6hyEje/3bb838//sH5jnaN8GcZnDBrAoPzW+V87pQoCyVrjs2t8qXAoGBAOLV +PviAUWFjUO//dYk9H9IWotr6rdkzmpLbrj+NoLNSGeoZbByPmT5BuNswXvNyk+9 smOQ8yqBiMpjxKwR4WQnS6Ydh6HTT33IWLLVazDFMf7ACmXWoScFhCAW6qGfdrYQ hkCSbwgun8jbL2D477jJl6ZyQG48lVnnZDjkFbfpAoGAUOqCsekSW23+Nzxqojqh sc7sBc2EKstyTENnNfTG9CW/imH9pgQlBJ1Chf+xZjTL7SSdUwFfX4/UFldsZi2l fgZBjocNt8pJdA/KaqGmiRxVzayAqRIME673nWCRcKp9y6Ih3Bd2sjbMtuavtp2C YBZF1xxBgNZQaZ8WJxPnnQECgYEAzLgGJPWc5iyZCJsesQTbMICRTyEPTYKKFD6N 6CFt+vDgNsUxOWRx0Vk6kUhW+rAItZzjgZ6RBzyuwtH17sGYZHZefMZL4Y2/QSru ej/IpNRjwaF6AN0KxhfhXcCw8zrivX/+WgqOcJj7lh/TC7a/S0uNNSgJ5DODKwd9 WSboPvkCgYEAzqdWfetko7hEI4076pufJrHPnnCJSHkkQ1QnfVl71mq7UmKXLDxD L5oWtU53+dswzvxGrzkOWsRJC5nN30BYJuYlwKzo3+MCKlUzJSuIMVTbTPlwKudh AF19s4GFZVo29FlgIQhA5dfIkZgFXAlVxYcGTLUixEmPwrc6yguULPs= -----END RSA PRIVATE KEY----- image-4.0.1/docker/docker_client.go000066400000000000000000000556201354546467100172300ustar00rootroot00000000000000package docker import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "sync" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/pkg/docker/config" "github.com/containers/image/v4/pkg/sysregistriesv2" "github.com/containers/image/v4/pkg/tlsclientconfig" "github.com/containers/image/v4/types" "github.com/docker/distribution/registry/client" "github.com/docker/go-connections/tlsconfig" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( dockerHostname = "docker.io" dockerV1Hostname = "index.docker.io" dockerRegistry = "registry-1.docker.io" resolvedPingV2URL = "%s://%s/v2/" resolvedPingV1URL = "%s://%s/v1/_ping" tagsPath = "/v2/%s/tags/list" manifestPath = "/v2/%s/manifests/%s" blobsPath = "/v2/%s/blobs/%s" blobUploadPath = "/v2/%s/blobs/uploads/" extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" minimumTokenLifetimeSeconds = 60 extensionSignatureSchemaVersion = 2 // extensionSignature.Version extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type ) var ( // ErrV1NotSupported is returned when we're trying to talk to a // docker V1 registry. ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") // ErrUnauthorizedForCredentials is returned when the status code returned is 401 ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} ) // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // signature represents a Docker image signature. type extensionSignature struct { Version int `json:"schemaVersion"` // Version specifies the schema version Name string `json:"name"` // Name must be in "sha256:@signatureName" format Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" Content []byte `json:"content"` // Content contains the signature } // signatureList represents list of Docker image signatures. type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } type bearerToken struct { Token string `json:"token"` AccessToken string `json:"access_token"` ExpiresIn int `json:"expires_in"` IssuedAt time.Time `json:"issued_at"` expirationTime time.Time } // dockerClient is configuration for dealing with a single Docker registry. type dockerClient struct { // The following members are set by newDockerClient and do not change afterwards. sys *types.SystemContext registry string // tlsClientConfig is setup by newDockerClient and will be used and updated // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. tlsClientConfig *tls.Config // The following members are not set by newDockerClient and must be set by callers if needed. username string password string signatureBase signatureStorageBase scope authScope // The following members are detected registry properties: // They are set after a successful detectProperties(), and never change afterwards. client *http.Client scheme string challenges []challenge supportsSignatures bool // Private state for setupRequestAuth (key: string, value: bearerToken) tokenCache sync.Map // Private state for detectProperties: detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. detectPropertiesError error // detectPropertiesError caches the initial error. } type authScope struct { remoteName string actions string } // sendAuth determines whether we need authentication for v2 or v1 endpoint. type sendAuth int const ( // v2 endpoint with authentication. v2Auth sendAuth = iota // v1 endpoint with authentication. // TODO: Get v1Auth working // v1Auth // no authentication, works for both v1 and v2. noAuth ) func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { token := new(bearerToken) if err := json.Unmarshal(blob, &token); err != nil { return nil, err } if token.Token == "" { token.Token = token.AccessToken } if token.ExpiresIn < minimumTokenLifetimeSeconds { token.ExpiresIn = minimumTokenLifetimeSeconds logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) } if token.IssuedAt.IsZero() { token.IssuedAt = time.Now().UTC() } token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) return token, nil } // this is cloned from docker/go-connections because upstream docker has changed // it and make deps here fails otherwise. // We'll drop this once we upgrade to docker 1.13.x deps. func serverDefault() *tls.Config { return &tls.Config{ // Avoid fallback to SSL protocols < TLS1.0 MinVersion: tls.VersionTLS10, PreferServerCipherSuites: true, CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, } } // dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { if sys != nil && sys.DockerCertPath != "" { return sys.DockerCertPath, nil } if sys != nil && sys.DockerPerHostCertDirPath != "" { return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil } var ( hostCertDir string fullCertDirPath string ) for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) } else { hostCertDir = systemPerHostCertDirPath } fullCertDirPath = filepath.Join(hostCertDir, hostPort) _, err := os.Stat(fullCertDirPath) if err == nil { break } if os.IsNotExist(err) { continue } if os.IsPermission(err) { logrus.Debugf("error accessing certs directory due to permissions: %v", err) continue } if err != nil { return "", err } } return fullCertDirPath, nil } // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { registry := reference.Domain(ref.ref) username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } sigBase, err := configuredSignatureStorageBase(sys, ref, write) if err != nil { return nil, err } client, err := newDockerClient(sys, registry, ref.ref.Name()) if err != nil { return nil, err } client.username = username client.password = password client.signatureBase = sigBase client.scope.actions = actions client.scope.remoteName = reference.Path(ref.ref) return client, nil } // newDockerClient returns a new dockerClient instance for the given registry // and reference. The reference is used to query the registry configuration // and can either be a registry (e.g, "registry.com[:5000]"), a repository // (e.g., "registry.com[:5000][/some/namespace]/repo"). // Please note that newDockerClient does not set all members of dockerClient // (e.g., username and password); those must be set by callers if necessary. func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { hostName := registry if registry == dockerHostname { registry = dockerRegistry } tlsClientConfig := serverDefault() // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is // undocumented and may change if docker/docker changes. certDir, err := dockerCertDir(sys, hostName) if err != nil { return nil, err } if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { return nil, err } // Check if TLS verification shall be skipped (default=false) which can // be specified in the sysregistriesv2 configuration. skipVerify := false reg, err := sysregistriesv2.FindRegistry(sys, reference) if err != nil { return nil, errors.Wrapf(err, "error loading registries") } if reg != nil { if reg.Blocked { return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) } skipVerify = reg.Insecure } tlsClientConfig.InsecureSkipVerify = skipVerify return &dockerClient{ sys: sys, registry: registry, tlsClientConfig: tlsClientConfig, }, nil } // CheckAuth validates the credentials by attempting to log into the registry // returns an error if an error occurred while making the http request or the status code received was 401 func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { client, err := newDockerClient(sys, registry, registry) if err != nil { return errors.Wrapf(err, "error creating new docker client") } client.username = username client.password = password resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) if err != nil { return err } defer resp.Body.Close() switch resp.StatusCode { case http.StatusOK: return nil case http.StatusUnauthorized: return ErrUnauthorizedForCredentials default: return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode)) } } // SearchResult holds the information of each matching image // It matches the output returned by the v1 endpoint type SearchResult struct { Name string `json:"name"` Description string `json:"description"` // StarCount states the number of stars the image has StarCount int `json:"star_count"` IsTrusted bool `json:"is_trusted"` // IsAutomated states whether the image is an automated build IsAutomated bool `json:"is_automated"` // IsOfficial states whether the image is an official build IsOfficial bool `json:"is_official"` } // SearchRegistry queries a registry for images that contain "image" in their name // The limit is the max number of results desired // Note: The limit value doesn't work with all registries // for example registry.access.redhat.com returns all the results without limiting it to the limit value func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { type V2Results struct { // Repositories holds the results returned by the /v2/_catalog endpoint Repositories []string `json:"repositories"` } type V1Results struct { // Results holds the results returned by the /v1/search endpoint Results []SearchResult `json:"results"` } v2Res := &V2Results{} v1Res := &V1Results{} // Get credentials from authfile for the underlying hostname username, password, err := config.GetAuthentication(sys, registry) if err != nil { return nil, errors.Wrapf(err, "error getting username and password") } // The /v2/_catalog endpoint has been disabled for docker.io therefore // the call made to that endpoint will fail. So using the v1 hostname // for docker.io for simplicity of implementation and the fact that it // returns search results. hostname := registry if registry == dockerHostname { hostname = dockerV1Hostname } client, err := newDockerClient(sys, hostname, registry) if err != nil { return nil, errors.Wrapf(err, "error creating new docker client") } client.username = username client.password = password // Only try the v1 search endpoint if the search query is not empty. If it is // empty skip to the v2 endpoint. if image != "" { // set up the query values for the v1 endpoint u := url.URL{ Path: "/v1/search", } q := u.Query() q.Set("q", image) q.Set("n", strconv.Itoa(limit)) u.RawQuery = q.Encode() logrus.Debugf("trying to talk to v1 search endpoint") resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil) if err != nil { logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) } else { if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { return nil, err } return v1Res.Results, nil } } } logrus.Debugf("trying to talk to v2 search endpoint") resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil) if err != nil { logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) } else { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode)) } else { if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { return nil, err } searchRes := []SearchResult{} for _, repo := range v2Res.Repositories { if strings.Contains(repo, image) { res := SearchResult{ Name: repo, } searchRes = append(searchRes, res) } } return searchRes, nil } } return nil, errors.Wrapf(err, "couldn't search registry %q", registry) } // makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { if err := c.detectProperties(ctx); err != nil { return nil, err } url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope) } // makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. // streamLen, if not -1, specifies the length of the data expected on stream. // makeRequest should generally be preferred. // TODO(runcom): too many arguments here, use a struct func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { req, err := http.NewRequest(method, url, stream) if err != nil { return nil, err } req = req.WithContext(ctx) if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. req.ContentLength = streamLen } req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") for n, h := range headers { for _, hh := range h { req.Header.Add(n, hh) } } if c.sys != nil && c.sys.DockerRegistryUserAgent != "" { req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent) } if auth == v2Auth { if err := c.setupRequestAuth(req, extraScope); err != nil { return nil, err } } logrus.Debugf("%s %s", method, url) res, err := c.client.Do(req) if err != nil { return nil, err } return res, nil } // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // // 1) docker does that as well // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request // // debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { if len(c.challenges) == 0 { return nil } schemeNames := make([]string, 0, len(c.challenges)) for _, challenge := range c.challenges { schemeNames = append(schemeNames, challenge.Scheme) switch challenge.Scheme { case "basic": req.SetBasicAuth(c.username, c.password) return nil case "bearer": cacheKey := "" scopes := []authScope{c.scope} if extraScope != nil { // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) scopes = append(scopes, *extraScope) } var token bearerToken t, inCache := c.tokenCache.Load(cacheKey) if inCache { token = t.(bearerToken) } if !inCache || time.Now().After(token.expirationTime) { t, err := c.getBearerToken(req.Context(), challenge, scopes) if err != nil { return err } token = *t c.tokenCache.Store(cacheKey, token) } req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) } } logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) return nil } func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { realm, ok := challenge.Parameters["realm"] if !ok { return nil, errors.Errorf("missing realm in bearer auth challenge") } authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } authReq = authReq.WithContext(ctx) getParams := authReq.URL.Query() if c.username != "" { getParams.Add("account", c.username) } if service, ok := challenge.Parameters["service"]; ok && service != "" { getParams.Add("service", service) } for _, scope := range scopes { if scope.remoteName != "" && scope.actions != "" { getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) } } authReq.URL.RawQuery = getParams.Encode() if c.username != "" && c.password != "" { authReq.SetBasicAuth(c.username, c.password) } logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) res, err := c.client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: err := client.HandleErrorResponse(res) logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error()) return nil, ErrUnauthorizedForCredentials case http.StatusOK: break default: return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL) } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } return newBearerTokenFromJSONBlob(tokenBlob) } // detectPropertiesHelper performs the work of detectProperties which executes // it at most once. func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly // specified by the system context if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue } tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = c.tlsClientConfig c.client = &http.Client{Transport: tr} ping := func(scheme string) error { url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return err } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode)) } c.challenges = parseAuthHeader(resp.Header) c.scheme = scheme c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" return nil } err := ping("https") if err != nil && c.tlsClientConfig.InsecureSkipVerify { err = ping("http") } if err != nil { err = errors.Wrap(err, "pinging docker registry returned") if c.sys != nil && c.sys.DockerDisableV1Ping { return err } // best effort to understand if we're talking to a V1 registry pingV1 := func(scheme string) bool { url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) if err != nil { logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err) return false } defer resp.Body.Close() logrus.Debugf("Ping %s status %d", url, resp.StatusCode) if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { return false } return true } isV1 := pingV1("https") if !isV1 && c.tlsClientConfig.InsecureSkipVerify { isV1 = pingV1("http") } if isV1 { err = ErrV1NotSupported } } return err } // detectProperties detects various properties of the registry. // See the dockerClient documentation for members which are affected by this. func (c *dockerClient) detectProperties(ctx context.Context) error { c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) return c.detectPropertiesError } // getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, // using the original data structures. func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name()) } body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } var parsedBody extensionSignatureList if err := json.Unmarshal(body, &parsedBody); err != nil { return nil, errors.Wrapf(err, "Error decoding signature list") } return &parsedBody, nil } image-4.0.1/docker/docker_client_test.go000066400000000000000000000115541354546467100202650ustar00rootroot00000000000000package docker import ( "fmt" "path/filepath" "testing" "time" "github.com/stretchr/testify/require" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" ) func TestDockerCertDir(t *testing.T) { const nondefaultFullPath = "/this/is/not/the/default/full/path" const nondefaultPerHostDir = "/this/is/not/the/default/certs.d" const variableReference = "$HOME" const rootPrefix = "/root/prefix" const registryHostPort = "thishostdefinitelydoesnotexist:5000" systemPerHostResult := filepath.Join(systemPerHostCertDirPaths[len(systemPerHostCertDirPaths)-1], registryHostPort) for _, c := range []struct { sys *types.SystemContext expected string }{ // The common case {nil, systemPerHostResult}, // There is a context, but it does not override the path. {&types.SystemContext{}, systemPerHostResult}, // Full path overridden {&types.SystemContext{DockerCertPath: nondefaultFullPath}, nondefaultFullPath}, // Per-host path overridden { &types.SystemContext{DockerPerHostCertDirPath: nondefaultPerHostDir}, filepath.Join(nondefaultPerHostDir, registryHostPort), }, // Both overridden { &types.SystemContext{ DockerCertPath: nondefaultFullPath, DockerPerHostCertDirPath: nondefaultPerHostDir, }, nondefaultFullPath, }, // Root overridden { &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, filepath.Join(rootPrefix, systemPerHostResult), }, // Root and path overrides present simultaneously, { &types.SystemContext{ DockerCertPath: nondefaultFullPath, RootForImplicitAbsolutePaths: rootPrefix, }, nondefaultFullPath, }, { &types.SystemContext{ DockerPerHostCertDirPath: nondefaultPerHostDir, RootForImplicitAbsolutePaths: rootPrefix, }, filepath.Join(nondefaultPerHostDir, registryHostPort), }, // … and everything at once { &types.SystemContext{ DockerCertPath: nondefaultFullPath, DockerPerHostCertDirPath: nondefaultPerHostDir, RootForImplicitAbsolutePaths: rootPrefix, }, nondefaultFullPath, }, // No environment expansion happens in the overridden paths {&types.SystemContext{DockerCertPath: variableReference}, variableReference}, { &types.SystemContext{DockerPerHostCertDirPath: variableReference}, filepath.Join(variableReference, registryHostPort), }, } { path, err := dockerCertDir(c.sys, registryHostPort) require.Equal(t, nil, err) assert.Equal(t, c.expected, path) } } func TestNewBearerTokenFromJsonBlob(t *testing.T) { expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 100, IssuedAt: time.Unix(1514800802, 0)} tokenBlob := []byte(`{"token":"IAmAToken","expires_in":100,"issued_at":"2018-01-01T10:00:02+00:00"}`) token, err := newBearerTokenFromJSONBlob(tokenBlob) if err != nil { t.Fatalf("unexpected error: %v", err) } assertBearerTokensEqual(t, expected, token) } func TestNewBearerAccessTokenFromJsonBlob(t *testing.T) { expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 100, IssuedAt: time.Unix(1514800802, 0)} tokenBlob := []byte(`{"access_token":"IAmAToken","expires_in":100,"issued_at":"2018-01-01T10:00:02+00:00"}`) token, err := newBearerTokenFromJSONBlob(tokenBlob) if err != nil { t.Fatalf("unexpected error: %v", err) } assertBearerTokensEqual(t, expected, token) } func TestNewBearerTokenFromInvalidJsonBlob(t *testing.T) { tokenBlob := []byte("IAmNotJson") _, err := newBearerTokenFromJSONBlob(tokenBlob) if err == nil { t.Fatalf("unexpected an error unmarshalling JSON") } } func TestNewBearerTokenSmallExpiryFromJsonBlob(t *testing.T) { expected := &bearerToken{Token: "IAmAToken", ExpiresIn: 60, IssuedAt: time.Unix(1514800802, 0)} tokenBlob := []byte(`{"token":"IAmAToken","expires_in":1,"issued_at":"2018-01-01T10:00:02+00:00"}`) token, err := newBearerTokenFromJSONBlob(tokenBlob) if err != nil { t.Fatalf("unexpected error: %v", err) } assertBearerTokensEqual(t, expected, token) } func TestNewBearerTokenIssuedAtZeroFromJsonBlob(t *testing.T) { zeroTime := time.Time{}.Format(time.RFC3339) now := time.Now() tokenBlob := []byte(fmt.Sprintf(`{"token":"IAmAToken","expires_in":100,"issued_at":"%s"}`, zeroTime)) token, err := newBearerTokenFromJSONBlob(tokenBlob) if err != nil { t.Fatalf("unexpected error: %v", err) } if token.IssuedAt.Before(now) { t.Fatalf("expected [%s] not to be before [%s]", token.IssuedAt, now) } } func assertBearerTokensEqual(t *testing.T, expected, subject *bearerToken) { if expected.Token != subject.Token { t.Fatalf("expected [%s] to equal [%s], it did not", subject.Token, expected.Token) } if expected.ExpiresIn != subject.ExpiresIn { t.Fatalf("expected [%d] to equal [%d], it did not", subject.ExpiresIn, expected.ExpiresIn) } if !expected.IssuedAt.Equal(subject.IssuedAt) { t.Fatalf("expected [%s] to equal [%s], it did not", subject.IssuedAt, expected.IssuedAt) } } image-4.0.1/docker/docker_image.go000066400000000000000000000057341354546467100170350ustar00rootroot00000000000000package docker import ( "context" "encoding/json" "fmt" "net/http" "net/url" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) // Image is a Docker-specific implementation of types.ImageCloser with a few extra methods // which are specific to Docker. type Image struct { types.ImageCloser src *dockerImageSource } // newImage returns a new Image interface type after setting up // a client to the registry hosting the given image. // The caller must call .Close() on the returned Image. func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { s, err := newImageSource(ctx, sys, ref) if err != nil { return nil, err } img, err := image.FromSource(ctx, sys, s) if err != nil { return nil, err } return &Image{ImageCloser: img, src: s}, nil } // SourceRefFullName returns a fully expanded name for the repository this image is in. func (i *Image) SourceRefFullName() string { return i.src.ref.ref.Name() } // GetRepositoryTags list all tags available in the repository. The tag // provided inside the ImageReference will be ignored. (This is a // backward-compatible shim method which calls the module-level // GetRepositoryTags) func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref) } // GetRepositoryTags list all tags available in the repository. The tag // provided inside the ImageReference will be ignored. func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { dr, ok := ref.(dockerReference) if !ok { return nil, errors.Errorf("ref must be a dockerReference") } path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) client, err := newDockerClientFromRef(sys, dr, false, "pull") if err != nil { return nil, errors.Wrap(err, "failed to create client") } tags := make([]string, 0) for { res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != http.StatusOK { // print url also return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) } var tagsHolder struct { Tags []string } if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { return nil, err } tags = append(tags, tagsHolder.Tags...) link := res.Header.Get("Link") if link == "" { break } linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") linkURL, err := url.Parse(linkURLStr) if err != nil { return tags, err } // can be relative or absolute, but we only want the path (and I // guess we're in trouble if it forwards to a new place...) path = linkURL.Path if linkURL.RawQuery != "" { path += "?" path += linkURL.RawQuery } } return tags, nil } image-4.0.1/docker/docker_image_dest.go000066400000000000000000000614141354546467100200510ustar00rootroot00000000000000package docker import ( "bytes" "context" "crypto/rand" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache/none" "github.com/containers/image/v4/types" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type dockerImageDestination struct { ref dockerReference c *dockerClient // State manifestDigest digest.Digest // or "" if not yet known. } // newImageDestination creates a new ImageDestination for the specified image reference. func newImageDestination(sys *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { c, err := newDockerClientFromRef(sys, ref, true, "pull,push") if err != nil { return nil, err } return &dockerImageDestination{ ref: ref, c: c, }, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *dockerImageDestination) Reference() types.ImageReference { return d.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *dockerImageDestination) Close() error { return nil } func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { return []string{ imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { if err := d.c.detectProperties(ctx); err != nil { return err } switch { case d.c.signatureBase != nil: return nil case d.c.supportsSignatures: return nil default: return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") } } func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression { return types.Compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { return true } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *dockerImageDestination) MustMatchRuntimeOS() bool { return false } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool { return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. } // sizeCounter is an io.Writer which only counts the total size of its input. type sizeCounter struct{ size int64 } func (c *sizeCounter) Write(p []byte) (n int, err error) { c.size += int64(len(p)) return len(p), nil } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *dockerImageDestination) HasThreadSafePutBlob() bool { return true } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { if inputInfo.Digest.String() != "" { // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. // But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_. haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false) if err != nil { return types.BlobInfo{}, err } if haveBlob { return reusedInfo, nil } } // FIXME? Chunked upload, progress reporting, etc. uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) logrus.Debugf("Uploading %s", uploadPath) res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } defer res.Body.Close() if res.StatusCode != http.StatusAccepted { logrus.Debugf("Error initiating layer upload, response %#v", *res) return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry) } uploadLocation, err := res.Location() if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") } digester := digest.Canonical.Digester() sizeCounter := &sizeCounter{} tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil) if err != nil { logrus.Debugf("Error uploading layer chunked, response %#v", res) return types.BlobInfo{}, err } defer res.Body.Close() computedDigest := digester.Digest() uploadLocation, err = res.Location() if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") } // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) locationQuery := uploadLocation.Query() // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 locationQuery.Set("digest", computedDigest.String()) uploadLocation.RawQuery = locationQuery.Encode() res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) if err != nil { return types.BlobInfo{}, err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { logrus.Debugf("Error uploading layer, response %#v", *res) return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) } logrus.Debugf("Upload of layer %s complete", computedDigest) cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref)) return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil } // blobExists returns true iff repo contains a blob with digest, and if so, also its size. // If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) logrus.Debugf("Checking %s", checkPath) res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope) if err != nil { return false, -1, err } defer res.Body.Close() switch res.StatusCode { case http.StatusOK: logrus.Debugf("... already exists") return true, getBlobSize(res), nil case http.StatusUnauthorized: logrus.Debugf("... not authorized") return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name()) case http.StatusNotFound: logrus.Debugf("... not present") return false, -1, nil default: return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode)) } } // mountBlob tries to mount blob srcDigest from srcRepo to the current destination. func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { u := url.URL{ Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), RawQuery: url.Values{ "mount": {srcDigest.String()}, "from": {reference.Path(srcRepo)}, }.Encode(), } mountPath := u.String() logrus.Debugf("Trying to mount %s", mountPath) res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope) if err != nil { return err } defer res.Body.Close() switch res.StatusCode { case http.StatusCreated: logrus.Debugf("... mount OK") return nil case http.StatusAccepted: // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. // Abort, and let the ultimate caller do an upload when its ready, instead. // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. uploadLocation, err := res.Location() if err != nil { return errors.Wrap(err, "Error determining upload URL after a mount attempt") } logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String()) res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope) if err != nil { logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) } else { defer res2.Body.Close() if res2.StatusCode != http.StatusNoContent { logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) } } // Anyway, if canceling the upload fails, ignore it and return the more important error: return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) default: logrus.Debugf("Error mounting, response %#v", *res) return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name()) } } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } // First, check whether the blob happens to already exist at the destination. exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) if err != nil { return false, types.BlobInfo{}, err } if exists { cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil } // Then try reusing blobs from other locations. for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) { candidateRepo, err := parseBICLocationReference(candidate.Location) if err != nil { logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name()) // Sanity checks: if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) continue } if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") continue } // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. // Checking candidateRepo, and mounting from it, requires an // expanded token scope. extraScope := &authScope{ remoteName: reference.Path(candidateRepo), actions: "pull", } // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. // Even worse, docker/distribution does not actually reasonably implement canceling uploads // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); // so, be a nice client and don't create unnecesary upload sessions on the server. exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) if err != nil { logrus.Debugf("... Failed: %v", err) continue } if !exists { // FIXME? Should we drop the blob from cache here (and elsewhere?)? continue // logrus.Debug() already happened in blobExists } if candidateRepo.Name() != d.ref.ref.Name() { if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { logrus.Debugf("... Mount failed: %v", err) continue } } cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil } return false, types.BlobInfo{}, nil } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) error { digest, err := manifest.Digest(m) if err != nil { return err } d.manifestDigest = digest refTail, err := d.ref.tagOrDigest() if err != nil { return err } path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) headers := map[string][]string{} mimeType := manifest.GuessMIMEType(m) if mimeType != "" { headers["Content-Type"] = []string{mimeType} } res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil) if err != nil { return err } defer res.Body.Close() if !successStatus(res.StatusCode) { err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name()) if isManifestInvalidError(errors.Cause(err)) { err = types.ManifestTypeRejectedError{Err: err} } return err } return nil } // successStatus returns true if the argument is a successful HTTP response // code (in the range 200 - 399 inclusive). func successStatus(status int) bool { return status >= 200 && status <= 399 } // isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. func isManifestInvalidError(err error) bool { errors, ok := err.(errcode.Errors) if !ok || len(errors) == 0 { return false } err = errors[0] ec, ok := err.(errcode.ErrorCoder) if !ok { return false } switch ec.ErrorCode() { // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. case v2.ErrorCodeManifestInvalid: return true // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) // when uploading to a tag (because it can’t find a matching tag inside the manifest) case v2.ErrorCodeTagInvalid: return true // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when // uploading an OCI manifest that is (correctly, according to the spec) missing // a top-level media type. See libpod issue #1719 // FIXME: remove this case when ECR behavior is fixed case errcode.ErrorCodeUnsupported: return strings.Contains(err.Error(), "Invalid JSON syntax") default: return false } } func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { // Do not fail if we don’t really need to support signatures. if len(signatures) == 0 { return nil } if err := d.c.detectProperties(ctx); err != nil { return err } switch { case d.c.signatureBase != nil: return d.putSignaturesToLookaside(signatures) case d.c.supportsSignatures: return d.putSignaturesToAPIExtension(ctx, signatures) default: return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") } } // putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { // FIXME? This overwrites files one at a time, definitely not atomic. // A failure when updating signatures with a reordered copy could lose some of them. // Skip dealing with the manifest digest if not necessary. if len(signatures) == 0 { return nil } if d.manifestDigest.String() == "" { // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures return errors.Errorf("Unknown manifest digest, can't add signatures") } // NOTE: Keep this in sync with docs/signature-protocols.md! for i, signature := range signatures { url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) if url == nil { return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } err := d.putOneSignature(url, signature) if err != nil { return err } } // Remove any other signatures, if present. // We stop at the first missing signature; if a previous deleting loop aborted // prematurely, this may not clean up all of them, but one missing signature // is enough for dockerImageSource to stop looking for other signatures, so that // is sufficient. for i := len(signatures); ; i++ { url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) if url == nil { return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } missing, err := d.c.deleteOneSignature(url) if err != nil { return err } if missing { break } } return nil } // putOneSignature stores one signature to url. // NOTE: Keep this in sync with docs/signature-protocols.md! func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { switch url.Scheme { case "file": logrus.Debugf("Writing to %s", url.Path) err := os.MkdirAll(filepath.Dir(url.Path), 0755) if err != nil { return err } err = ioutil.WriteFile(url.Path, signature, 0644) if err != nil { return err } return nil case "http", "https": return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) default: return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) } } // deleteOneSignature deletes a signature from url, if it exists. // If it successfully determines that the signature does not exist, returns (true, nil) // NOTE: Keep this in sync with docs/signature-protocols.md! func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { switch url.Scheme { case "file": logrus.Debugf("Deleting %s", url.Path) err := os.Remove(url.Path) if err != nil && os.IsNotExist(err) { return true, nil } return false, err case "http", "https": return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) default: return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) } } // putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte) error { // Skip dealing with the manifest digest, or reading the old state, if not necessary. if len(signatures) == 0 { return nil } if d.manifestDigest.String() == "" { // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures return errors.Errorf("Unknown manifest digest, can't add signatures") } // Because image signatures are a shared resource in Atomic Registry, the default upload // always adds signatures. Eventually we should also allow removing signatures, // but the X-Registry-Supports-Signatures API extension does not support that yet. existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, d.manifestDigest) if err != nil { return err } existingSigNames := map[string]struct{}{} for _, sig := range existingSignatures.Signatures { existingSigNames[sig.Name] = struct{}{} } sigExists: for _, newSig := range signatures { for _, existingSig := range existingSignatures.Signatures { if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { continue sigExists } } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. var signatureName string for { randBytes := make([]byte, 16) n, err := rand.Read(randBytes) if err != nil || n != 16 { return errors.Wrapf(err, "Error generating random signature len %d", n) } signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) if _, ok := existingSigNames[signatureName]; !ok { break } } sig := extensionSignature{ Version: extensionSignatureSchemaVersion, Name: signatureName, Type: extensionSignatureTypeAtomic, Content: newSig, } body, err := json.Marshal(sig) if err != nil { return err } path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil) if err != nil { return err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { body, err := ioutil.ReadAll(res.Body) if err == nil { logrus.Debugf("Error body %s", string(body)) } logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry) } } return nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *dockerImageDestination) Commit(ctx context.Context) error { return nil } image-4.0.1/docker/docker_image_src.go000066400000000000000000000366431354546467100177070ustar00rootroot00000000000000package docker import ( "context" "fmt" "io" "io/ioutil" "mime" "net/http" "net/url" "os" "strconv" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/sysregistriesv2" "github.com/containers/image/v4/types" "github.com/docker/distribution/registry/client" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type dockerImageSource struct { ref dockerReference c *dockerClient // State cachedManifest []byte // nil if not loaded yet cachedManifestMIMEType string // Only valid if cachedManifest != nil } // newImageSource creates a new ImageSource for the specified image reference. // The caller must call .Close() on the returned ImageSource. func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) if err != nil { return nil, errors.Wrapf(err, "error loading registries configuration") } if registry == nil { // No configuration was found for the provided reference, so use the // equivalent of a default configuration. registry = &sysregistriesv2.Registry{ Endpoint: sysregistriesv2.Endpoint{ Location: ref.ref.String(), }, Prefix: ref.ref.String(), } } primaryDomain := reference.Domain(ref.ref) // Check all endpoints for the manifest availability. If we find one that does // contain the image, it will be used for all future pull actions. Always try the // non-mirror original location last; this both transparently handles the case // of no mirrors configured, and ensures we return the error encountered when // acessing the upstream location if all endpoints fail. manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint") pullSources, err := registry.PullSourcesFromReference(ref.ref) if err != nil { return nil, err } for _, pullSource := range pullSources { logrus.Debugf("Trying to pull %q", pullSource.Reference) dockerRef, err := newReference(pullSource.Reference) if err != nil { return nil, err } endpointSys := sys // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain { copy := *endpointSys copy.DockerAuthConfig = nil endpointSys = © } client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull") if err != nil { return nil, err } client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure testImageSource := &dockerImageSource{ ref: dockerRef, c: client, } manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx) if manifestLoadErr == nil { return testImageSource, nil } } return nil, manifestLoadErr } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (s *dockerImageSource) Reference() types.ImageReference { return s.ref } // Close removes resources associated with an initialized ImageSource, if any. func (s *dockerImageSource) Close() error { return nil } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (s *dockerImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { if contentType == "" { return contentType } mimeType, _, err := mime.ParseMediaType(contentType) if err != nil { return "" } return mimeType } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { return s.fetchManifest(ctx, instanceDigest.String()) } err := s.ensureManifestIsLoaded(ctx) if err != nil { return nil, "", err } return s.cachedManifest, s.cachedManifestMIMEType, nil } func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) headers := map[string][]string{ "Accept": manifest.DefaultRequestedManifestMIMETypes, } res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil) if err != nil { return nil, "", err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name()) } manblob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, "", err } return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil } // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // // ImageSource implementations are not required or expected to do any caching, // but because our signatures are “attached” to the manifest digest, // we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) // and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious // signature verification failures when pulling while a tag is being updated. func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { if s.cachedManifest != nil { return nil } reference, err := s.ref.tagOrDigest() if err != nil { return err } manblob, mt, err := s.fetchManifest(ctx, reference) if err != nil { return err } // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. s.cachedManifest = manblob s.cachedManifestMIMEType = mt return nil } func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { var ( resp *http.Response err error ) for _, url := range urls { resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) if err == nil { if resp.StatusCode != http.StatusOK { err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode)) logrus.Debug(err) continue } break } } if err != nil { return nil, 0, err } return resp.Body, getBlobSize(resp), nil } func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { size = -1 } return size } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *dockerImageSource) HasThreadSafeGetBlob() bool { return true } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if len(info.URLs) != 0 { return s.getExternalBlob(ctx, info.URLs) } path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) logrus.Debugf("Downloading %s", path) res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil) if err != nil { return nil, 0, err } if res.StatusCode != http.StatusOK { // print url also return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode)) } cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref)) return res.Body, getBlobSize(res), nil } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if err := s.c.detectProperties(ctx); err != nil { return nil, err } switch { case s.c.signatureBase != nil: return s.getSignaturesFromLookaside(ctx, instanceDigest) case s.c.supportsSignatures: return s.getSignaturesFromAPIExtension(ctx, instanceDigest) default: return [][]byte{}, nil } } // manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, // or finally, from a fetched manifest. func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { if instanceDigest != nil { return *instanceDigest, nil } if digested, ok := s.ref.ref.(reference.Digested); ok { d := digested.Digest() if d.Algorithm() == digest.Canonical { return d, nil } } if err := s.ensureManifestIsLoaded(ctx); err != nil { return "", err } return manifest.Digest(s.cachedManifest) } // getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } // NOTE: Keep this in sync with docs/signature-protocols.md! signatures := [][]byte{} for i := 0; ; i++ { url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) if url == nil { return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } signature, missing, err := s.getOneSignature(ctx, url) if err != nil { return nil, err } if missing { break } signatures = append(signatures, signature) } return signatures, nil } // getOneSignature downloads one signature from url. // If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. // NOTE: Keep this in sync with docs/signature-protocols.md! func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { switch url.Scheme { case "file": logrus.Debugf("Reading %s", url.Path) sig, err := ioutil.ReadFile(url.Path) if err != nil { if os.IsNotExist(err) { return nil, true, nil } return nil, false, err } return sig, false, nil case "http", "https": logrus.Debugf("GET %s", url) req, err := http.NewRequest("GET", url.String(), nil) if err != nil { return nil, false, err } req = req.WithContext(ctx) res, err := s.c.client.Do(req) if err != nil { return nil, false, err } defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return nil, true, nil } else if res.StatusCode != http.StatusOK { return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode)) } sig, err := ioutil.ReadAll(res.Body) if err != nil { return nil, false, err } return sig, false, nil default: return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) } } // getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) if err != nil { return nil, err } var sigs [][]byte for _, sig := range parsedBody.Signatures { if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { sigs = append(sigs, sig.Content) } } return sigs, nil } // deleteImage deletes the named image from the registry, if supported. func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { // docker/distribution does not document what action should be used for deleting images. // // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). // // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". c, err := newDockerClientFromRef(sys, ref, true, "*") if err != nil { return err } headers := map[string][]string{ "Accept": manifest.DefaultRequestedManifestMIMETypes, } refTail, err := ref.tagOrDigest() if err != nil { return err } getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil) if err != nil { return err } defer get.Body.Close() manifestBody, err := ioutil.ReadAll(get.Body) if err != nil { return err } switch get.StatusCode { case http.StatusOK: case http.StatusNotFound: return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) default: return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) } digest := get.Header.Get("Docker-Content-Digest") deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) // When retrieving the digest from a registry >= 2.3 use the following header: // "Accept": "application/vnd.docker.distribution.manifest.v2+json" delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil) if err != nil { return err } defer delete.Body.Close() body, err := ioutil.ReadAll(delete.Body) if err != nil { return err } if delete.StatusCode != http.StatusAccepted { return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) } if c.signatureBase != nil { manifestDigest, err := manifest.Digest(manifestBody) if err != nil { return err } for i := 0; ; i++ { url := signatureStorageURL(c.signatureBase, manifestDigest, i) if url == nil { return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") } missing, err := c.deleteOneSignature(url) if err != nil { return err } if missing { break } } } return nil } image-4.0.1/docker/docker_image_src_test.go000066400000000000000000000013161354546467100207330ustar00rootroot00000000000000package docker import ( "testing" "github.com/stretchr/testify/assert" ) func TestSimplifyContentType(t *testing.T) { for _, c := range []struct{ input, expected string }{ {"", ""}, {"application/json", "application/json"}, {"application/json;charset=utf-8", "application/json"}, {"application/json; charset=utf-8", "application/json"}, {"application/json ; charset=utf-8", "application/json"}, {"application/json\t;\tcharset=utf-8", "application/json"}, {"application/json ;charset=utf-8", "application/json"}, {`application/json; charset="utf-8"`, "application/json"}, {"completely invalid", ""}, } { out := simplifyContentType(c.input) assert.Equal(t, c.expected, out, c.input) } } image-4.0.1/docker/docker_transport.go000066400000000000000000000175661354546467100200150ustar00rootroot00000000000000package docker import ( "context" "fmt" "strings" "github.com/containers/image/v4/docker/policyconfiguration" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } // Transport is an ImageTransport for Docker registry-hosted images. var Transport = dockerTransport{} type dockerTransport struct{} func (t dockerTransport) Name() string { return "docker" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { return ParseReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { // FIXME? We could be verifying the various character set and length restrictions // from docker/distribution/reference.regexp.go, but other than that there // are few semantically invalid strings. return nil } // dockerReference is an ImageReference for Docker images. type dockerReference struct { ref reference.Named // By construction we know that !reference.IsNameOnly(ref) } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func ParseReference(refString string) (types.ImageReference, error) { if !strings.HasPrefix(refString, "//") { return nil, errors.Errorf("docker: image reference %s does not start with //", refString) } ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) if err != nil { return nil, err } ref = reference.TagNameOnly(ref) return NewReference(ref) } // NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). func NewReference(ref reference.Named) (types.ImageReference, error) { return newReference(ref) } // newReference returns a dockerReference for a named reference. func newReference(ref reference.Named) (dockerReference, error) { if reference.IsNameOnly(ref) { return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) } // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific // tag and digest), so fail. This MAY be accepted in the future. // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop // the tag or the digest first?) _, isTagged := ref.(reference.NamedTagged) _, isDigested := ref.(reference.Canonical) if isTagged && isDigested { return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported") } return dockerReference{ ref: ref, }, nil } func (ref dockerReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref dockerReference) StringWithinTransport() string { return "//" + reference.FamiliarString(ref.ref) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref dockerReference) DockerReference() reference.Named { return ref.ref } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref dockerReference) PolicyConfigurationIdentity() string { res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) } return res } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref dockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { return newImage(ctx, sys, ref) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(ctx, sys, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, ref) } // DeleteImage deletes the named image from the registry, if supported. func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { return deleteImage(ctx, sys, ref) } // tagOrDigest returns a tag or digest from the reference. func (ref dockerReference) tagOrDigest() (string, error) { if ref, ok := ref.ref.(reference.Canonical); ok { return ref.Digest().String(), nil } if ref, ok := ref.ref.(reference.NamedTagged); ok { return ref.Tag(), nil } // This should not happen, NewReference above refuses reference.IsNameOnly values. return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) } image-4.0.1/docker/docker_transport_test.go000066400000000000000000000164331354546467100210440ustar00rootroot00000000000000package docker import ( "context" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256digest = "@sha256:" + sha256digestHex ) func TestTransportName(t *testing.T) { assert.Equal(t, "docker", Transport.Name()) } func TestTransportParseReference(t *testing.T) { testParseReference(t, Transport.ParseReference) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ "docker.io/library/busybox" + sha256digest, "docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } } func TestParseReference(t *testing.T) { testParseReference(t, ParseReference) } // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { for _, c := range []struct{ input, expected string }{ {"busybox", ""}, // Missing // prefix {"//busybox:notlatest", "docker.io/library/busybox:notlatest"}, // Explicit tag {"//busybox" + sha256digest, "docker.io/library/busybox" + sha256digest}, // Explicit digest {"//busybox", "docker.io/library/busybox:latest"}, // Default tag // A github.com/distribution/reference value can have a tag and a digest at the same time! // The docker/distribution API does not really support that (we can’t ask for an image with a specific // tag and digest), so fail. This MAY be accepted in the future. {"//busybox:latest" + sha256digest, ""}, // Both tag and digest {"//docker.io/library/busybox:latest", "docker.io/library/busybox:latest"}, // All implied values explicitly specified {"//UPPERCASEISINVALID", ""}, // Invalid input } { ref, err := fn(c.input) if c.expected == "" { assert.Error(t, err, c.input) } else { require.NoError(t, err, c.input) dockerRef, ok := ref.(dockerReference) require.True(t, ok, c.input) assert.Equal(t, c.expected, dockerRef.ref.String(), c.input) } } } // A common list of reference formats to test for the various ImageReference methods. var validReferenceTestCases = []struct{ input, dockerRef, stringWithinTransport string }{ {"busybox:notlatest", "docker.io/library/busybox:notlatest", "//busybox:notlatest"}, // Explicit tag {"busybox" + sha256digest, "docker.io/library/busybox" + sha256digest, "//busybox" + sha256digest}, // Explicit digest {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "//busybox:latest"}, // All implied values explicitly specified {"example.com/ns/foo:bar", "example.com/ns/foo:bar", "//example.com/ns/foo:bar"}, // All values explicitly specified } func TestNewReference(t *testing.T) { for _, c := range validReferenceTestCases { parsed, err := reference.ParseNormalizedNamed(c.input) require.NoError(t, err) ref, err := NewReference(parsed) require.NoError(t, err, c.input) dockerRef, ok := ref.(dockerReference) require.True(t, ok, c.input) assert.Equal(t, c.dockerRef, dockerRef.ref.String(), c.input) } // Neither a tag nor digest parsed, err := reference.ParseNormalizedNamed("busybox") require.NoError(t, err) _, err = NewReference(parsed) assert.Error(t, err) // A github.com/distribution/reference value can have a tag and a digest at the same time! parsed, err = reference.ParseNormalizedNamed("busybox:notlatest" + sha256digest) require.NoError(t, err) _, ok := parsed.(reference.Canonical) require.True(t, ok) _, ok = parsed.(reference.NamedTagged) require.True(t, ok) _, err = NewReference(parsed) assert.Error(t, err) } func TestReferenceTransport(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference("//" + c.input) require.NoError(t, err, c.input) stringRef := ref.StringWithinTransport() assert.Equal(t, c.stringWithinTransport, stringRef, c.input) // Do one more round to verify that the output can be parsed, to an equal value. ref2, err := Transport.ParseReference(stringRef) require.NoError(t, err, c.input) stringRef2 := ref2.StringWithinTransport() assert.Equal(t, stringRef, stringRef2, c.input) } } func TestReferenceDockerReference(t *testing.T) { for _, c := range validReferenceTestCases { ref, err := ParseReference("//" + c.input) require.NoError(t, err, c.input) dockerRef := ref.DockerReference() require.NotNil(t, dockerRef, c.input) assert.Equal(t, c.dockerRef, dockerRef.String(), c.input) } } func TestReferencePolicyConfigurationIdentity(t *testing.T) { // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err := ParseReference("//busybox") require.NoError(t, err) assert.Equal(t, "docker.io/library/busybox:latest", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err := ParseReference("//busybox") require.NoError(t, err) assert.Equal(t, []string{ "docker.io/library/busybox", "docker.io/library", "docker.io", }, ref.PolicyConfigurationNamespaces()) } func TestReferenceNewImage(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) img, err := ref.NewImage(context.Background(), &types.SystemContext{ RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist", ArchitectureChoice: "amd64", OSChoice: "linux", }) require.NoError(t, err) defer img.Close() } func TestReferenceNewImageSource(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) src, err := ref.NewImageSource(context.Background(), &types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) assert.NoError(t, err) defer src.Close() } func TestReferenceNewImageDestination(t *testing.T) { ref, err := ParseReference("//busybox") require.NoError(t, err) dest, err := ref.NewImageDestination(context.Background(), &types.SystemContext{RegistriesDirPath: "/this/doesnt/exist", DockerPerHostCertDirPath: "/this/doesnt/exist"}) require.NoError(t, err) defer dest.Close() } func TestReferenceTagOrDigest(t *testing.T) { for input, expected := range map[string]string{ "//busybox:notlatest": "notlatest", "//busybox" + sha256digest: "sha256:" + sha256digestHex, } { ref, err := ParseReference(input) require.NoError(t, err, input) dockerRef, ok := ref.(dockerReference) require.True(t, ok, input) tod, err := dockerRef.tagOrDigest() require.NoError(t, err, input) assert.Equal(t, expected, tod, input) } // Invalid input ref, err := reference.ParseNormalizedNamed("busybox") require.NoError(t, err) dockerRef := dockerReference{ref: ref} _, err = dockerRef.tagOrDigest() assert.Error(t, err) } image-4.0.1/docker/fixtures/000077500000000000000000000000001354546467100157355ustar00rootroot00000000000000image-4.0.1/docker/fixtures/registries.d/000077500000000000000000000000001354546467100203375ustar00rootroot00000000000000image-4.0.1/docker/fixtures/registries.d/emptyConfig.yaml000066400000000000000000000000021354546467100234770ustar00rootroot00000000000000{}image-4.0.1/docker/fixtures/registries.d/internal-example.com.yaml000066400000000000000000000010751354546467100252500ustar00rootroot00000000000000docker: example.com: sigstore: https://sigstore.example.com registry.test.example.com: sigstore: http://registry.test.example.com/sigstore registry.test.example.com:8888: sigstore: http://registry.test.example.com:8889/sigstore sigstore-staging: https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist localhost: sigstore: file:///home/mitr/mydevelopment1 localhost:8080: sigstore: file:///home/mitr/mydevelopment2 localhost/invalid/url/test: sigstore: ":emptyscheme" image-4.0.1/docker/fixtures/registries.d/internet-user.yaml000066400000000000000000000007501354546467100240310ustar00rootroot00000000000000default-docker: sigstore: file:///mnt/companywide/signatures/for/other/repositories docker: docker.io/contoso: sigstore: https://sigstore.contoso.com/fordocker docker.io/centos: sigstore: https://sigstore.centos.org/ docker.io/centos/mybetaprooduct: sigstore: http://localhost:9999/mybetaWIP/sigstore sigstore-staging: file:///srv/mybetaWIP/sigstore docker.io/centos/mybetaproduct:latest: sigstore: https://sigstore.centos.org/ image-4.0.1/docker/fixtures/registries.d/invalid-but.notyaml000066400000000000000000000000021354546467100241520ustar00rootroot00000000000000} image-4.0.1/docker/lookaside.go000066400000000000000000000163611354546467100163740ustar00rootroot00000000000000package docker import ( "fmt" "io/ioutil" "net/url" "os" "path" "path/filepath" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/ghodss/yaml" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. // You can override this at build time with // -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' var systemRegistriesDirPath = builtinRegistriesDirPath // builtinRegistriesDirPath is the path to registries.d. // DO NOT change this, instead see systemRegistriesDirPath above. const builtinRegistriesDirPath = "/etc/containers/registries.d" // registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. // NOTE: Keep this in sync with docs/registries.d.md! type registryConfiguration struct { DefaultDocker *registryNamespace `json:"default-docker"` // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), Docker map[string]registryNamespace `json:"docker"` } // registryNamespace defines lookaside locations for a single namespace. type registryNamespace struct { SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. SigStoreStaging string `json:"sigstore-staging"` // For writing only. } // signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. // Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. // configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. func configuredSignatureStorageBase(sys *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { // FIXME? Loading and parsing the config could be cached across calls. dirPath := registriesDirPath(sys) logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) config, err := loadAndMergeConfig(dirPath) if err != nil { return nil, err } topLevel := config.signatureTopLevel(ref, write) if topLevel == "" { return nil, nil } url, err := url.Parse(topLevel) if err != nil { return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) } // NOTE: Keep this in sync with docs/signature-protocols.md! // FIXME? Restrict to explicitly supported schemes? repo := reference.Path(ref.ref) // Note that this is without a tag or digest. if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) } url.Path = url.Path + "/" + repo return url, nil } // registriesDirPath returns a path to registries.d func registriesDirPath(sys *types.SystemContext) string { if sys != nil { if sys.RegistriesDirPath != "" { return sys.RegistriesDirPath } if sys.RootForImplicitAbsolutePaths != "" { return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) } } return systemRegistriesDirPath } // loadAndMergeConfig loads configuration files in dirPath func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} dockerDefaultMergedFrom := "" nsMergedFrom := map[string]string{} dir, err := os.Open(dirPath) if err != nil { if os.IsNotExist(err) { return &mergedConfig, nil } return nil, err } configNames, err := dir.Readdirnames(0) if err != nil { return nil, err } for _, configName := range configNames { if !strings.HasSuffix(configName, ".yaml") { continue } configPath := filepath.Join(dirPath, configName) configBytes, err := ioutil.ReadFile(configPath) if err != nil { return nil, err } var config registryConfiguration err = yaml.Unmarshal(configBytes, &config) if err != nil { return nil, errors.Wrapf(err, "Error parsing %s", configPath) } if config.DefaultDocker != nil { if mergedConfig.DefaultDocker != nil { return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, dockerDefaultMergedFrom, configPath) } mergedConfig.DefaultDocker = config.DefaultDocker dockerDefaultMergedFrom = configPath } for nsName, nsConfig := range config.Docker { // includes config.Docker == nil if _, ok := mergedConfig.Docker[nsName]; ok { return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, nsName, nsMergedFrom[nsName], configPath) } mergedConfig.Docker[nsName] = nsConfig nsMergedFrom[nsName] = configPath } } return &mergedConfig, nil } // config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. // (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { if config.Docker != nil { // Look for a full match. identity := ref.PolicyConfigurationIdentity() if ns, ok := config.Docker[identity]; ok { logrus.Debugf(` Using "docker" namespace %s`, identity) if url := ns.signatureTopLevel(write); url != "" { return url } } // Look for a match of the possible parent namespaces. for _, name := range ref.PolicyConfigurationNamespaces() { if ns, ok := config.Docker[name]; ok { logrus.Debugf(` Using "docker" namespace %s`, name) if url := ns.signatureTopLevel(write); url != "" { return url } } } } // Look for a default location if config.DefaultDocker != nil { logrus.Debugf(` Using "default-docker" configuration`) if url := config.DefaultDocker.signatureTopLevel(write); url != "" { return url } } logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) return "" } // ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. // or "" if nothing has been configured. func (ns registryNamespace) signatureTopLevel(write bool) string { if write && ns.SigStoreStaging != "" { logrus.Debugf(` Using %s`, ns.SigStoreStaging) return ns.SigStoreStaging } if ns.SigStore != "" { logrus.Debugf(` Using %s`, ns.SigStore) return ns.SigStore } return "" } // signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. // Returns nil iff base == nil. // NOTE: Keep this in sync with docs/signature-protocols.md! func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { if base == nil { return nil } url := *base url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) return &url } image-4.0.1/docker/lookaside_test.go000066400000000000000000000246321354546467100174330ustar00rootroot00000000000000package docker import ( "fmt" "io/ioutil" "net/url" "os" "path/filepath" "testing" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func dockerRefFromString(t *testing.T, s string) dockerReference { ref, err := ParseReference(s) require.NoError(t, err, s) dockerRef, ok := ref.(dockerReference) require.True(t, ok, s) return dockerRef } func TestConfiguredSignatureStorageBase(t *testing.T) { // Error reading configuration directory (/dev/null is not a directory) _, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "/dev/null"}, dockerRefFromString(t, "//busybox"), false) assert.Error(t, err) // No match found emptyDir, err := ioutil.TempDir("", "empty-dir") require.NoError(t, err) defer os.RemoveAll(emptyDir) base, err := configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: emptyDir}, dockerRefFromString(t, "//this/is/not/in/the:configuration"), false) assert.NoError(t, err) assert.Nil(t, base) // Invalid URL _, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"}, dockerRefFromString(t, "//localhost/invalid/url/test"), false) assert.Error(t, err) // Success base, err = configuredSignatureStorageBase(&types.SystemContext{RegistriesDirPath: "fixtures/registries.d"}, dockerRefFromString(t, "//example.com/my/project"), false) assert.NoError(t, err) require.NotNil(t, base) assert.Equal(t, "https://sigstore.example.com/my/project", (*url.URL)(base).String()) } func TestRegistriesDirPath(t *testing.T) { const nondefaultPath = "/this/is/not/the/default/registries.d" const variableReference = "$HOME" const rootPrefix = "/root/prefix" for _, c := range []struct { sys *types.SystemContext expected string }{ // The common case {nil, systemRegistriesDirPath}, // There is a context, but it does not override the path. {&types.SystemContext{}, systemRegistriesDirPath}, // Path overridden {&types.SystemContext{RegistriesDirPath: nondefaultPath}, nondefaultPath}, // Root overridden { &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, filepath.Join(rootPrefix, systemRegistriesDirPath), }, // Root and path overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, RegistriesDirPath: nondefaultPath, }, nondefaultPath, }, // No environment expansion happens in the overridden paths {&types.SystemContext{RegistriesDirPath: variableReference}, variableReference}, } { path := registriesDirPath(c.sys) assert.Equal(t, c.expected, path) } } func TestLoadAndMergeConfig(t *testing.T) { tmpDir, err := ioutil.TempDir("", "merge-config") require.NoError(t, err) defer os.RemoveAll(tmpDir) // No registries.d exists config, err := loadAndMergeConfig(filepath.Join(tmpDir, "thisdoesnotexist")) require.NoError(t, err) assert.Equal(t, ®istryConfiguration{Docker: map[string]registryNamespace{}}, config) // Empty registries.d directory emptyDir := filepath.Join(tmpDir, "empty") err = os.Mkdir(emptyDir, 0755) require.NoError(t, err) config, err = loadAndMergeConfig(emptyDir) require.NoError(t, err) assert.Equal(t, ®istryConfiguration{Docker: map[string]registryNamespace{}}, config) // Unreadable registries.d directory unreadableDir := filepath.Join(tmpDir, "unreadable") err = os.Mkdir(unreadableDir, 0000) require.NoError(t, err) config, err = loadAndMergeConfig(unreadableDir) assert.Error(t, err) // An unreadable file in a registries.d directory unreadableFileDir := filepath.Join(tmpDir, "unreadableFile") err = os.Mkdir(unreadableFileDir, 0755) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "0.yaml"), []byte("{}"), 0644) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(unreadableFileDir, "1.yaml"), nil, 0000) require.NoError(t, err) config, err = loadAndMergeConfig(unreadableFileDir) assert.Error(t, err) // Invalid YAML invalidYAMLDir := filepath.Join(tmpDir, "invalidYAML") err = os.Mkdir(invalidYAMLDir, 0755) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(invalidYAMLDir, "0.yaml"), []byte("}"), 0644) require.NoError(t, err) config, err = loadAndMergeConfig(invalidYAMLDir) assert.Error(t, err) // Duplicate DefaultDocker duplicateDefault := filepath.Join(tmpDir, "duplicateDefault") err = os.Mkdir(duplicateDefault, 0755) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(duplicateDefault, "0.yaml"), []byte("default-docker:\n sigstore: file:////tmp/something"), 0644) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(duplicateDefault, "1.yaml"), []byte("default-docker:\n sigstore: file:////tmp/different"), 0644) require.NoError(t, err) config, err = loadAndMergeConfig(duplicateDefault) require.Error(t, err) assert.Contains(t, err.Error(), "0.yaml") assert.Contains(t, err.Error(), "1.yaml") // Duplicate DefaultDocker duplicateNS := filepath.Join(tmpDir, "duplicateNS") err = os.Mkdir(duplicateNS, 0755) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(duplicateNS, "0.yaml"), []byte("docker:\n example.com:\n sigstore: file:////tmp/something"), 0644) require.NoError(t, err) err = ioutil.WriteFile(filepath.Join(duplicateNS, "1.yaml"), []byte("docker:\n example.com:\n sigstore: file:////tmp/different"), 0644) require.NoError(t, err) config, err = loadAndMergeConfig(duplicateNS) assert.Error(t, err) assert.Contains(t, err.Error(), "0.yaml") assert.Contains(t, err.Error(), "1.yaml") // A fully worked example, including an empty-dictionary file and a non-.yaml file config, err = loadAndMergeConfig("fixtures/registries.d") require.NoError(t, err) assert.Equal(t, ®istryConfiguration{ DefaultDocker: ®istryNamespace{SigStore: "file:///mnt/companywide/signatures/for/other/repositories"}, Docker: map[string]registryNamespace{ "example.com": {SigStore: "https://sigstore.example.com"}, "registry.test.example.com": {SigStore: "http://registry.test.example.com/sigstore"}, "registry.test.example.com:8888": {SigStore: "http://registry.test.example.com:8889/sigstore", SigStoreStaging: "https://registry.test.example.com:8889/sigstore/specialAPIserverWhichDoesntExist"}, "localhost": {SigStore: "file:///home/mitr/mydevelopment1"}, "localhost:8080": {SigStore: "file:///home/mitr/mydevelopment2"}, "localhost/invalid/url/test": {SigStore: ":emptyscheme"}, "docker.io/contoso": {SigStore: "https://sigstore.contoso.com/fordocker"}, "docker.io/centos": {SigStore: "https://sigstore.centos.org/"}, "docker.io/centos/mybetaprooduct": { SigStore: "http://localhost:9999/mybetaWIP/sigstore", SigStoreStaging: "file:///srv/mybetaWIP/sigstore", }, "docker.io/centos/mybetaproduct:latest": {SigStore: "https://sigstore.centos.org/"}, }, }, config) } func TestRegistryConfigurationSignaureTopLevel(t *testing.T) { config := registryConfiguration{ DefaultDocker: ®istryNamespace{SigStore: "=default", SigStoreStaging: "=default+w"}, Docker: map[string]registryNamespace{}, } for _, ns := range []string{ "localhost", "localhost:5000", "example.com", "example.com/ns1", "example.com/ns1/ns2", "example.com/ns1/ns2/repo", "example.com/ns1/ns2/repo:notlatest", } { config.Docker[ns] = registryNamespace{SigStore: ns, SigStoreStaging: ns + "+w"} } for _, c := range []struct{ input, expected string }{ {"example.com/ns1/ns2/repo:notlatest", "example.com/ns1/ns2/repo:notlatest"}, {"example.com/ns1/ns2/repo:unmatched", "example.com/ns1/ns2/repo"}, {"example.com/ns1/ns2/notrepo:notlatest", "example.com/ns1/ns2"}, {"example.com/ns1/notns2/repo:notlatest", "example.com/ns1"}, {"example.com/notns1/ns2/repo:notlatest", "example.com"}, {"unknown.example.com/busybox", "=default"}, {"localhost:5000/busybox", "localhost:5000"}, {"localhost/busybox", "localhost"}, {"localhost:9999/busybox", "=default"}, } { dr := dockerRefFromString(t, "//"+c.input) res := config.signatureTopLevel(dr, false) assert.Equal(t, c.expected, res, c.input) res = config.signatureTopLevel(dr, true) // test that forWriting is correctly propagated assert.Equal(t, c.expected+"+w", res, c.input) } config = registryConfiguration{ Docker: map[string]registryNamespace{ "unmatched": {SigStore: "a", SigStoreStaging: "b"}, }, } dr := dockerRefFromString(t, "//thisisnotmatched") res := config.signatureTopLevel(dr, false) assert.Equal(t, "", res) res = config.signatureTopLevel(dr, true) assert.Equal(t, "", res) } func TestRegistryNamespaceSignatureTopLevel(t *testing.T) { for _, c := range []struct { ns registryNamespace forWriting bool expected string }{ {registryNamespace{SigStoreStaging: "a", SigStore: "b"}, true, "a"}, {registryNamespace{SigStoreStaging: "a", SigStore: "b"}, false, "b"}, {registryNamespace{SigStore: "b"}, true, "b"}, {registryNamespace{SigStore: "b"}, false, "b"}, {registryNamespace{SigStoreStaging: "a"}, true, "a"}, {registryNamespace{SigStoreStaging: "a"}, false, ""}, {registryNamespace{}, true, ""}, {registryNamespace{}, false, ""}, } { res := c.ns.signatureTopLevel(c.forWriting) assert.Equal(t, c.expected, res, fmt.Sprintf("%#v %v", c.ns, c.forWriting)) } } func TestSignatureStorageBaseSignatureStorageURL(t *testing.T) { const mdInput = "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" const mdMapped = "sha256=0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" assert.True(t, signatureStorageURL(nil, mdInput, 0) == nil) for _, c := range []struct { base string index int expected string }{ {"file:///tmp", 0, "file:///tmp@" + mdMapped + "/signature-1"}, {"file:///tmp", 1, "file:///tmp@" + mdMapped + "/signature-2"}, {"https://localhost:5555/root", 0, "https://localhost:5555/root@" + mdMapped + "/signature-1"}, {"https://localhost:5555/root", 1, "https://localhost:5555/root@" + mdMapped + "/signature-2"}, {"http://localhost:5555/root", 0, "http://localhost:5555/root@" + mdMapped + "/signature-1"}, {"http://localhost:5555/root", 1, "http://localhost:5555/root@" + mdMapped + "/signature-2"}, } { url, err := url.Parse(c.base) require.NoError(t, err) expectedURL, err := url.Parse(c.expected) require.NoError(t, err) res := signatureStorageURL(url, mdInput, c.index) assert.Equal(t, expectedURL, res, c.expected) } } image-4.0.1/docker/policyconfiguration/000077500000000000000000000000001354546467100201535ustar00rootroot00000000000000image-4.0.1/docker/policyconfiguration/naming.go000066400000000000000000000043261354546467100217600ustar00rootroot00000000000000package policyconfiguration import ( "strings" "github.com/containers/image/v4/docker/reference" "github.com/pkg/errors" ) // DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, // as a backend for ImageReference.PolicyConfigurationIdentity. // The reference must satisfy !reference.IsNameOnly(). func DockerReferenceIdentity(ref reference.Named) (string, error) { res := ref.Name() tagged, isTagged := ref.(reference.NamedTagged) digested, isDigested := ref.(reference.Canonical) switch { case isTagged && isDigested: // Note that this CAN actually happen. return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) case isTagged: res = res + ":" + tagged.Tag() case isDigested: res = res + "@" + digested.Digest().String() default: // Coverage: The above was supposed to be exhaustive. return "", errors.New("Internal inconsistency, unexpected default branch") } return res, nil } // DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, // as a backend for ImageReference.PolicyConfigurationIdentity. // The reference must satisfy !reference.IsNameOnly(). func DockerReferenceNamespaces(ref reference.Named) []string { // Look for a match of the repository, and then of the possible parent // namespaces. Note that this only happens on the expanded host names // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", // then in its parent "docker.io/library"; in none of "busybox", // un-namespaced "library" nor in "" supposedly implicitly representing "library/". // // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last // iteration matches the host name (for any namespace). res := []string{} name := ref.Name() for { res = append(res, name) lastSlash := strings.LastIndex(name, "/") if lastSlash == -1 { break } name = name[:lastSlash] } return res } image-4.0.1/docker/policyconfiguration/naming_test.go000066400000000000000000000064431354546467100230210ustar00rootroot00000000000000package policyconfiguration import ( "fmt" "strings" "testing" "github.com/containers/image/v4/docker/reference" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestDockerReference tests DockerReferenceIdentity and DockerReferenceNamespaces simulatenously // to ensure they are consistent. func TestDockerReference(t *testing.T) { sha256Digest := "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix), // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are // consistent. for inputName, expectedNS := range map[string][]string{ "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"}, "example.com/repo": {"example.com/repo", "example.com"}, "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"}, // Note that "localhost" is special here: notlocalhost/repo is parsed as docker.io/notlocalhost.repo: "localhost/repo": {"localhost/repo", "localhost"}, "notlocalhost/repo": {"docker.io/notlocalhost/repo", "docker.io/notlocalhost", "docker.io"}, "docker.io/ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"}, "docker.io/library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, "docker.io/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, "ns/repo": {"docker.io/ns/repo", "docker.io/ns", "docker.io"}, "library/repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, "repo": {"docker.io/library/repo", "docker.io/library", "docker.io"}, } { for inputSuffix, mappedSuffix := range map[string]string{ ":tag": ":tag", sha256Digest: sha256Digest, } { fullInput := inputName + inputSuffix ref, err := reference.ParseNormalizedNamed(fullInput) require.NoError(t, err, fullInput) identity, err := DockerReferenceIdentity(ref) require.NoError(t, err, fullInput) assert.Equal(t, expectedNS[0]+mappedSuffix, identity, fullInput) ns := DockerReferenceNamespaces(ref) require.NotNil(t, ns, fullInput) require.Len(t, ns, len(expectedNS), fullInput) moreSpecific := identity for i := range expectedNS { assert.Equal(t, ns[i], expectedNS[i], fmt.Sprintf("%s item %d", fullInput, i)) assert.True(t, strings.HasPrefix(moreSpecific, ns[i])) moreSpecific = ns[i] } } } } func TestDockerReferenceIdentity(t *testing.T) { // TestDockerReference above has tested the core of the functionality, this tests only the failure cases. // Neither a tag nor digest parsed, err := reference.ParseNormalizedNamed("busybox") require.NoError(t, err) id, err := DockerReferenceIdentity(parsed) assert.Equal(t, "", id) assert.Error(t, err) // A github.com/distribution/reference value can have a tag and a digest at the same time! parsed, err = reference.ParseNormalizedNamed("busybox:notlatest@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") require.NoError(t, err) _, ok := parsed.(reference.Canonical) require.True(t, ok) _, ok = parsed.(reference.NamedTagged) require.True(t, ok) id, err = DockerReferenceIdentity(parsed) assert.Equal(t, "", id) assert.Error(t, err) } image-4.0.1/docker/reference/000077500000000000000000000000001354546467100160225ustar00rootroot00000000000000image-4.0.1/docker/reference/README.md000066400000000000000000000003531354546467100173020ustar00rootroot00000000000000This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.image-4.0.1/docker/reference/helpers.go000066400000000000000000000021741354546467100200170ustar00rootroot00000000000000package reference import "path" // IsNameOnly returns true if reference only contains a repo name. func IsNameOnly(ref Named) bool { if _, ok := ref.(NamedTagged); ok { return false } if _, ok := ref.(Canonical); ok { return false } return true } // FamiliarName returns the familiar name string // for the given named, familiarizing if needed. func FamiliarName(ref Named) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().Name() } return ref.Name() } // FamiliarString returns the familiar string representation // for the given reference, familiarizing if needed. func FamiliarString(ref Reference) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().String() } return ref.String() } // FamiliarMatch reports whether ref matches the specified pattern. // See https://godoc.org/path#Match for supported patterns. func FamiliarMatch(pattern string, ref Reference) (bool, error) { matched, err := path.Match(pattern, FamiliarString(ref)) if namedRef, isNamed := ref.(Named); isNamed && !matched { matched, _ = path.Match(pattern, FamiliarName(namedRef)) } return matched, err } image-4.0.1/docker/reference/normalize.go000066400000000000000000000125261354546467100203570ustar00rootroot00000000000000package reference import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) var ( legacyDefaultDomain = "index.docker.io" defaultDomain = "docker.io" officialRepoName = "library" defaultTag = "latest" ) // normalizedNamed represents a name which has been // normalized and has a familiar form. A familiar name // is what is used in Docker UI. An example normalized // name is "docker.io/library/ubuntu" and corresponding // familiar name of "ubuntu". type normalizedNamed interface { Named Familiar() Named } // ParseNormalizedNamed parses a string into a named reference // transforming a familiar name from Docker UI to a fully // qualified reference. If the value may be an identifier // use ParseAnyReference. func ParseNormalizedNamed(s string) (Named, error) { if ok := anchoredIdentifierRegexp.MatchString(s); ok { return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) } domain, remainder := splitDockerDomain(s) var remoteName string if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { remoteName = remainder[:tagSep] } else { remoteName = remainder } if strings.ToLower(remoteName) != remoteName { return nil, errors.New("invalid reference format: repository name must be lowercase") } ref, err := Parse(domain + "/" + remainder) if err != nil { return nil, err } named, isNamed := ref.(Named) if !isNamed { return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } // ParseDockerRef normalizes the image reference following the docker convention. This is added // mainly for backward compatibility. // The reference returned can only be either tagged or digested. For reference contains both tag // and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ // sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as // docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. func ParseDockerRef(ref string) (Named, error) { named, err := ParseNormalizedNamed(ref) if err != nil { return nil, err } if _, ok := named.(NamedTagged); ok { if canonical, ok := named.(Canonical); ok { // The reference is both tagged and digested, only // return digested. newNamed, err := WithName(canonical.Name()) if err != nil { return nil, err } newCanonical, err := WithDigest(newNamed, canonical.Digest()) if err != nil { return nil, err } return newCanonical, nil } } return TagNameOnly(named), nil } // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { domain, remainder = defaultDomain, name } else { domain, remainder = name[:i], name[i+1:] } if domain == legacyDefaultDomain { domain = defaultDomain } if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { remainder = officialRepoName + "/" + remainder } return } // familiarizeName returns a shortened version of the name familiar // to to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". // Returns a familiarized named only reference. func familiarizeName(named namedRepository) repository { repo := repository{ domain: named.Domain(), path: named.Path(), } if repo.domain == defaultDomain { repo.domain = "" // Handle official repositories which have the pattern "library/" if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { repo.path = split[1] } } return repo } func (r reference) Familiar() Named { return reference{ namedRepository: familiarizeName(r.namedRepository), tag: r.tag, digest: r.digest, } } func (r repository) Familiar() Named { return familiarizeName(r) } func (t taggedReference) Familiar() Named { return taggedReference{ namedRepository: familiarizeName(t.namedRepository), tag: t.tag, } } func (c canonicalReference) Familiar() Named { return canonicalReference{ namedRepository: familiarizeName(c.namedRepository), digest: c.digest, } } // TagNameOnly adds the default tag "latest" to a reference if it only has // a repo name. func TagNameOnly(ref Named) Named { if IsNameOnly(ref) { namedTagged, err := WithTag(ref, defaultTag) if err != nil { // Default tag must be valid, to create a NamedTagged // type with non-validated input the WithTag function // should be used instead panic(err) } return namedTagged } return ref } // ParseAnyReference parses a reference string as a possible identifier, // full digest, or familiar name. func ParseAnyReference(ref string) (Reference, error) { if ok := anchoredIdentifierRegexp.MatchString(ref); ok { return digestReference("sha256:" + ref), nil } if dgst, err := digest.Parse(ref); err == nil { return digestReference(dgst), nil } return ParseNormalizedNamed(ref) } image-4.0.1/docker/reference/normalize_test.go000066400000000000000000000423041354546467100214130ustar00rootroot00000000000000package reference import ( "strconv" "testing" "github.com/opencontainers/go-digest" ) func TestValidateReferenceName(t *testing.T) { validRepoNames := []string{ "docker/docker", "library/debian", "debian", "docker.io/docker/docker", "docker.io/library/debian", "docker.io/debian", "index.docker.io/docker/docker", "index.docker.io/library/debian", "index.docker.io/debian", "127.0.0.1:5000/docker/docker", "127.0.0.1:5000/library/debian", "127.0.0.1:5000/debian", "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", // This test case was moved from invalid to valid since it is valid input // when specified with a hostname, it removes the ambiguity from about // whether the value is an identifier or repository name "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", } invalidRepoNames := []string{ "https://github.com/docker/docker", "docker/Docker", "-docker", "-docker/docker", "-docker.io/docker/docker", "docker///docker", "docker.io/docker/Docker", "docker.io/docker///docker", "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", } for _, name := range invalidRepoNames { _, err := ParseNormalizedNamed(name) if err == nil { t.Fatalf("Expected invalid repo name for %q", name) } } for _, name := range validRepoNames { _, err := ParseNormalizedNamed(name) if err != nil { t.Fatalf("Error parsing repo name %s, got: %q", name, err) } } } func TestValidateRemoteName(t *testing.T) { validRepositoryNames := []string{ // Sanity check. "docker/docker", // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", // Allow embedded hyphens. "docker-rules/docker", // Allow multiple hyphens as well. "docker---rules/docker", //Username doc and image name docker being tested. "doc/docker", // single character names are now allowed. "d/docker", "jess/t", // Consecutive underscores. "dock__er/docker", } for _, repositoryName := range validRepositoryNames { _, err := ParseNormalizedNamed(repositoryName) if err != nil { t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) } } invalidRepositoryNames := []string{ // Disallow capital letters. "docker/Docker", // Only allow one slash. "docker///docker", // Disallow 64-character hexadecimal. "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", // Disallow leading and trailing hyphens in namespace. "-docker/docker", "docker-/docker", "-docker-/docker", // Don't allow underscores everywhere (as opposed to hyphens). "____/____", "_docker/_docker", // Disallow consecutive periods. "dock..er/docker", "dock_.er/docker", "dock-.er/docker", // No repository. "docker/", //namespace too long "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", } for _, repositoryName := range invalidRepositoryNames { if _, err := ParseNormalizedNamed(repositoryName); err == nil { t.Errorf("Repository name should be invalid: %v", repositoryName) } } } func TestParseRepositoryInfo(t *testing.T) { type tcase struct { RemoteName, FamiliarName, FullName, AmbiguousName, Domain string } tcases := []tcase{ { RemoteName: "fooo/bar", FamiliarName: "fooo/bar", FullName: "docker.io/fooo/bar", AmbiguousName: "index.docker.io/fooo/bar", Domain: "docker.io", }, { RemoteName: "library/ubuntu", FamiliarName: "ubuntu", FullName: "docker.io/library/ubuntu", AmbiguousName: "library/ubuntu", Domain: "docker.io", }, { RemoteName: "nonlibrary/ubuntu", FamiliarName: "nonlibrary/ubuntu", FullName: "docker.io/nonlibrary/ubuntu", AmbiguousName: "", Domain: "docker.io", }, { RemoteName: "other/library", FamiliarName: "other/library", FullName: "docker.io/other/library", AmbiguousName: "", Domain: "docker.io", }, { RemoteName: "private/moonbase", FamiliarName: "127.0.0.1:8000/private/moonbase", FullName: "127.0.0.1:8000/private/moonbase", AmbiguousName: "", Domain: "127.0.0.1:8000", }, { RemoteName: "privatebase", FamiliarName: "127.0.0.1:8000/privatebase", FullName: "127.0.0.1:8000/privatebase", AmbiguousName: "", Domain: "127.0.0.1:8000", }, { RemoteName: "private/moonbase", FamiliarName: "example.com/private/moonbase", FullName: "example.com/private/moonbase", AmbiguousName: "", Domain: "example.com", }, { RemoteName: "privatebase", FamiliarName: "example.com/privatebase", FullName: "example.com/privatebase", AmbiguousName: "", Domain: "example.com", }, { RemoteName: "private/moonbase", FamiliarName: "example.com:8000/private/moonbase", FullName: "example.com:8000/private/moonbase", AmbiguousName: "", Domain: "example.com:8000", }, { RemoteName: "privatebasee", FamiliarName: "example.com:8000/privatebasee", FullName: "example.com:8000/privatebasee", AmbiguousName: "", Domain: "example.com:8000", }, { RemoteName: "library/ubuntu-12.04-base", FamiliarName: "ubuntu-12.04-base", FullName: "docker.io/library/ubuntu-12.04-base", AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", Domain: "docker.io", }, { RemoteName: "library/foo", FamiliarName: "foo", FullName: "docker.io/library/foo", AmbiguousName: "docker.io/foo", Domain: "docker.io", }, { RemoteName: "library/foo/bar", FamiliarName: "library/foo/bar", FullName: "docker.io/library/foo/bar", AmbiguousName: "", Domain: "docker.io", }, { RemoteName: "store/foo/bar", FamiliarName: "store/foo/bar", FullName: "docker.io/store/foo/bar", AmbiguousName: "", Domain: "docker.io", }, } for _, tcase := range tcases { refStrings := []string{tcase.FamiliarName, tcase.FullName} if tcase.AmbiguousName != "" { refStrings = append(refStrings, tcase.AmbiguousName) } var refs []Named for _, r := range refStrings { named, err := ParseNormalizedNamed(r) if err != nil { t.Fatal(err) } refs = append(refs, named) } for _, r := range refs { if expected, actual := tcase.FamiliarName, FamiliarName(r); expected != actual { t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.FullName, r.String(); expected != actual { t.Fatalf("Invalid canonical reference for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.Domain, Domain(r); expected != actual { t.Fatalf("Invalid domain for %q. Expected %q, got %q", r, expected, actual) } if expected, actual := tcase.RemoteName, Path(r); expected != actual { t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) } } } } func TestParseReferenceWithTagAndDigest(t *testing.T) { shortRef := "busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa" ref, err := ParseNormalizedNamed(shortRef) if err != nil { t.Fatal(err) } if expected, actual := "docker.io/library/"+shortRef, ref.String(); actual != expected { t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) } if _, isTagged := ref.(NamedTagged); !isTagged { t.Fatalf("Reference from %q should support tag", ref) } if _, isCanonical := ref.(Canonical); !isCanonical { t.Fatalf("Reference from %q should support digest", ref) } if expected, actual := shortRef, FamiliarString(ref); actual != expected { t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) } } func TestInvalidReferenceComponents(t *testing.T) { if _, err := ParseNormalizedNamed("-foo"); err == nil { t.Fatal("Expected WithName to detect invalid name") } ref, err := ParseNormalizedNamed("busybox") if err != nil { t.Fatal(err) } if _, err := WithTag(ref, "-foo"); err == nil { t.Fatal("Expected WithName to detect invalid tag") } if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { t.Fatal("Expected WithDigest to detect invalid digest") } } func equalReference(r1, r2 Reference) bool { switch v1 := r1.(type) { case digestReference: if v2, ok := r2.(digestReference); ok { return v1 == v2 } case repository: if v2, ok := r2.(repository); ok { return v1 == v2 } case taggedReference: if v2, ok := r2.(taggedReference); ok { return v1 == v2 } case canonicalReference: if v2, ok := r2.(canonicalReference); ok { return v1 == v2 } case reference: if v2, ok := r2.(reference); ok { return v1 == v2 } } return false } func TestParseAnyReference(t *testing.T) { tcases := []struct { Reference string Equivalent string Expected Reference }{ { Reference: "redis", Equivalent: "docker.io/library/redis", }, { Reference: "redis:latest", Equivalent: "docker.io/library/redis:latest", }, { Reference: "docker.io/library/redis:latest", Equivalent: "docker.io/library/redis:latest", }, { Reference: "redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Equivalent: "docker.io/library/redis@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "dmcgowan/myapp", Equivalent: "docker.io/dmcgowan/myapp", }, { Reference: "dmcgowan/myapp:latest", Equivalent: "docker.io/dmcgowan/myapp:latest", }, { Reference: "docker.io/mcgowan/myapp:latest", Equivalent: "docker.io/mcgowan/myapp:latest", }, { Reference: "dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Equivalent: "docker.io/dmcgowan/myapp@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", Expected: digestReference("sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c"), Equivalent: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, { Reference: "dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", Equivalent: "docker.io/library/dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9", }, } for _, tcase := range tcases { var ref Reference var err error ref, err = ParseAnyReference(tcase.Reference) if err != nil { t.Fatalf("Error parsing reference %s: %v", tcase.Reference, err) } if ref.String() != tcase.Equivalent { t.Fatalf("Unexpected string: %s, expected %s", ref.String(), tcase.Equivalent) } expected := tcase.Expected if expected == nil { expected, err = Parse(tcase.Equivalent) if err != nil { t.Fatalf("Error parsing reference %s: %v", tcase.Equivalent, err) } } if !equalReference(ref, expected) { t.Errorf("Unexpected reference %#v, expected %#v", ref, expected) } } } func TestNormalizedSplitHostname(t *testing.T) { testcases := []struct { input string domain string name string }{ { input: "test.com/foo", domain: "test.com", name: "foo", }, { input: "test_com/foo", domain: "docker.io", name: "test_com/foo", }, { input: "docker/migrator", domain: "docker.io", name: "docker/migrator", }, { input: "test.com:8080/foo", domain: "test.com:8080", name: "foo", }, { input: "test-com:8080/foo", domain: "test-com:8080", name: "foo", }, { input: "foo", domain: "docker.io", name: "library/foo", }, { input: "xn--n3h.com/foo", domain: "xn--n3h.com", name: "foo", }, { input: "xn--n3h.com:18080/foo", domain: "xn--n3h.com:18080", name: "foo", }, { input: "docker.io/foo", domain: "docker.io", name: "library/foo", }, { input: "docker.io/library/foo", domain: "docker.io", name: "library/foo", }, { input: "docker.io/library/foo/bar", domain: "docker.io", name: "library/foo/bar", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } named, err := ParseNormalizedNamed(testcase.input) if err != nil { failf("error parsing name: %s", err) } domain, name := SplitHostname(named) if domain != testcase.domain { failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) } } } func TestMatchError(t *testing.T) { named, err := ParseAnyReference("foo") if err != nil { t.Fatal(err) } _, err = FamiliarMatch("[-x]", named) if err == nil { t.Fatalf("expected an error, got nothing") } } func TestMatch(t *testing.T) { matchCases := []struct { reference string pattern string expected bool }{ { reference: "foo", pattern: "foo/**/ba[rz]", expected: false, }, { reference: "foo/any/bat", pattern: "foo/**/ba[rz]", expected: false, }, { reference: "foo/a/bar", pattern: "foo/**/ba[rz]", expected: true, }, { reference: "foo/b/baz", pattern: "foo/**/ba[rz]", expected: true, }, { reference: "foo/c/baz:tag", pattern: "foo/**/ba[rz]", expected: true, }, { reference: "foo/c/baz:tag", pattern: "foo/*/baz:tag", expected: true, }, { reference: "foo/c/baz:tag", pattern: "foo/c/baz:tag", expected: true, }, { reference: "example.com/foo/c/baz:tag", pattern: "*/foo/c/baz", expected: true, }, { reference: "example.com/foo/c/baz:tag", pattern: "example.com/foo/c/baz", expected: true, }, } for _, c := range matchCases { named, err := ParseAnyReference(c.reference) if err != nil { t.Fatal(err) } actual, err := FamiliarMatch(c.pattern, named) if err != nil { t.Fatal(err) } if actual != c.expected { t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) } } } func TestParseDockerRef(t *testing.T) { testcases := []struct { name string input string expected string }{ { name: "nothing", input: "busybox", expected: "docker.io/library/busybox:latest", }, { name: "tag only", input: "busybox:latest", expected: "docker.io/library/busybox:latest", }, { name: "digest only", input: "busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", expected: "docker.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", }, { name: "path only", input: "library/busybox", expected: "docker.io/library/busybox:latest", }, { name: "hostname only", input: "docker.io/busybox", expected: "docker.io/library/busybox:latest", }, { name: "no tag", input: "docker.io/library/busybox", expected: "docker.io/library/busybox:latest", }, { name: "no path", input: "docker.io/busybox:latest", expected: "docker.io/library/busybox:latest", }, { name: "no hostname", input: "library/busybox:latest", expected: "docker.io/library/busybox:latest", }, { name: "full reference with tag", input: "docker.io/library/busybox:latest", expected: "docker.io/library/busybox:latest", }, { name: "gcr reference without tag", input: "gcr.io/library/busybox", expected: "gcr.io/library/busybox:latest", }, { name: "both tag and digest", input: "gcr.io/library/busybox:latest@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", expected: "gcr.io/library/busybox@sha256:e6693c20186f837fc393390135d8a598a96a833917917789d63766cab6c59582", }, } for _, test := range testcases { t.Run(test.name, func(t *testing.T) { normalized, err := ParseDockerRef(test.input) if err != nil { t.Fatal(err) } output := normalized.String() if output != test.expected { t.Fatalf("expected %q to be parsed as %v, got %v", test.input, test.expected, output) } _, err = Parse(output) if err != nil { t.Fatalf("%q should be a valid reference, but got an error: %v", output, err) } }) } } image-4.0.1/docker/reference/reference.go000066400000000000000000000256161354546467100203210ustar00rootroot00000000000000// Package reference provides a general type to represent any way of referencing images within the registry. // Its main purpose is to abstract tags and digests (content-addressable hash). // // Grammar // // reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // // identifier := /[a-f0-9]{64}/ // short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) const ( // NameTotalLengthMax is the maximum total number of characters in a repository name. NameTotalLengthMax = 255 ) var ( // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. ErrReferenceInvalidFormat = errors.New("invalid reference format") // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. ErrTagInvalidFormat = errors.New("invalid tag format") // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. ErrNameContainsUppercase = errors.New("repository name must be lowercase") // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include // modifiers such as a hostname, name, tag, and digest. type Reference interface { // String returns the full reference String() string } // Field provides a wrapper type for resolving correct reference types when // working with encoding. type Field struct { reference Reference } // AsField wraps a reference in a Field for encoding. func AsField(reference Reference) Field { return Field{reference} } // Reference unwraps the reference type from the field to // return the Reference object. This object should be // of the appropriate type to further check for different // reference types. func (f Field) Reference() Reference { return f.reference } // MarshalText serializes the field to byte text which // is the string of the reference. func (f Field) MarshalText() (p []byte, err error) { return []byte(f.reference.String()), nil } // UnmarshalText parses text bytes by invoking the // reference parser to ensure the appropriately // typed reference object is wrapped by field. func (f *Field) UnmarshalText(p []byte) error { r, err := Parse(string(p)) if err != nil { return err } f.reference = r return nil } // Named is an object with a full name type Named interface { Reference Name() string } // Tagged is an object which has a tag type Tagged interface { Reference Tag() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Digested is an object which has a digest // in which it can be referenced by type Digested interface { Reference Digest() digest.Digest } // Canonical reference is an object with a fully unique // name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } // namedRepository is a reference to a repository with a name. // A namedRepository has both domain and path components. type namedRepository interface { Named Domain() string Path() string } // Domain returns the domain part of the Named reference func Domain(named Named) string { if r, ok := named.(namedRepository); ok { return r.Domain() } domain, _ := splitDomain(named.Name()) return domain } // Path returns the name without the domain part of the Named reference func Path(named Named) (name string) { if r, ok := named.(namedRepository); ok { return r.Path() } _, path := splitDomain(named.Name()) return path } func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { return "", name } return match[1], match[2] } // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name // DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() } return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. // NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { if s == "" { return nil, ErrNameEmpty } if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { return nil, ErrNameContainsUppercase } return nil, ErrReferenceInvalidFormat } if len(matches[1]) > NameTotalLengthMax { return nil, ErrNameTooLong } var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) if len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { repo.domain = "" repo.path = matches[1] } ref := reference{ namedRepository: repo, tag: matches[2], } if matches[3] != "" { var err error ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } } r := getBestReferenceType(ref) if r == nil { return nil, ErrNameEmpty } return r, nil } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name and be in the canonical // form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } if named.String() != s { return nil, ErrNameNotCanonical } return named, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } return repository{ domain: match[1], path: match[2], }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if canonical, ok := name.(Canonical); ok { return reference{ namedRepository: repo, tag: tag, digest: canonical.Digest(), }, nil } return taggedReference{ namedRepository: repo, tag: tag, }, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if tagged, ok := name.(Tagged); ok { return reference{ namedRepository: repo, tag: tagged.Tag(), digest: digest, }, nil } return canonicalReference{ namedRepository: repo, digest: digest, }, nil } // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { domain, path := SplitHostname(ref) return repository{ domain: domain, path: path, } } func getBestReferenceType(ref reference) Reference { if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) } return nil } if ref.tag == "" { if ref.digest != "" { return canonicalReference{ namedRepository: ref.namedRepository, digest: ref.digest, } } return ref.namedRepository } if ref.digest == "" { return taggedReference{ namedRepository: ref.namedRepository, tag: ref.tag, } } return ref } type reference struct { namedRepository tag string digest digest.Digest } func (r reference) String() string { return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { return r.tag } func (r reference) Digest() digest.Digest { return r.digest } type repository struct { domain string path string } func (r repository) String() string { return r.Name() } func (r repository) Name() string { if r.domain == "" { return r.path } return r.domain + "/" + r.path } func (r repository) Domain() string { return r.domain } func (r repository) Path() string { return r.path } type digestReference digest.Digest func (d digestReference) String() string { return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { return digest.Digest(d) } type taggedReference struct { namedRepository tag string } func (t taggedReference) String() string { return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { return t.tag } type canonicalReference struct { namedRepository digest digest.Digest } func (c canonicalReference) String() string { return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { return c.digest } image-4.0.1/docker/reference/reference_test.go000066400000000000000000000403761354546467100213600ustar00rootroot00000000000000package reference import ( _ "crypto/sha256" _ "crypto/sha512" "encoding/json" "strconv" "strings" "testing" "github.com/opencontainers/go-digest" ) func TestReferenceParse(t *testing.T) { // referenceTestcases is a unified set of testcases for // testing the parsing of references referenceTestcases := []struct { // input is the repository name or name component testcase input string // err is the error expected from Parse, or nil err error // repository is the string representation for the reference repository string // domain is the domain expected in the reference domain string // tag is the tag for the reference tag string // digest is the digest for the reference (enforces digest reference) digest string }{ { input: "test_com", repository: "test_com", }, { input: "test.com:tag", repository: "test.com", tag: "tag", }, { input: "test.com:5000", repository: "test.com", tag: "5000", }, { input: "test.com/repo:tag", domain: "test.com", repository: "test.com/repo", tag: "tag", }, { input: "test:5000/repo", domain: "test:5000", repository: "test:5000/repo", }, { input: "test:5000/repo:tag", domain: "test:5000", repository: "test:5000/repo", tag: "tag", }, { input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", domain: "test:5000", repository: "test:5000/repo", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", domain: "test:5000", repository: "test:5000/repo", tag: "tag", digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "test:5000/repo", domain: "test:5000", repository: "test:5000/repo", }, { input: "", err: ErrNameEmpty, }, { input: ":justtag", err: ErrReferenceInvalidFormat, }, { input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: "repo@sha256:ffffffffffffffffffffffffffffffffff", err: digest.ErrDigestInvalidLength, }, { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: digest.ErrDigestUnsupported, }, { input: "Uppercase:tag", err: ErrNameContainsUppercase, }, // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 //{ // input: "Uppercase/lowercase:tag", // err: ErrNameContainsUppercase, //}, { input: "test:5000/Uppercase/lowercase:tag", err: ErrNameContainsUppercase, }, { input: "lowercase:Uppercase", repository: "lowercase", tag: "Uppercase", }, { input: strings.Repeat("a/", 128) + "a:tag", err: ErrNameTooLong, }, { input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", domain: "a", repository: strings.Repeat("a/", 127) + "a", tag: "tag-puts-this-over-max", }, { input: "aa/asdf$$^/aa", err: ErrReferenceInvalidFormat, }, { input: "sub-dom1.foo.com/bar/baz/quux", domain: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", }, { input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", domain: "sub-dom1.foo.com", repository: "sub-dom1.foo.com/bar/baz/quux", tag: "some-long-tag", }, { input: "b.gcr.io/test.example.com/my-app:test.example.com", domain: "b.gcr.io", repository: "b.gcr.io/test.example.com/my-app", tag: "test.example.com", }, { input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode domain: "xn--n3h.com", repository: "xn--n3h.com/myimage", tag: "xn--n3h.com", }, { input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode domain: "xn--7o8h.com", repository: "xn--7o8h.com/myimage", tag: "xn--7o8h.com", digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "foo_bar.com:8080", repository: "foo_bar.com", tag: "8080", }, { input: "foo/foo_bar.com:8080", domain: "foo", repository: "foo/foo_bar.com", tag: "8080", }, } for _, testcase := range referenceTestcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } repo, err := Parse(testcase.input) if testcase.err != nil { if err == nil { failf("missing expected error: %v", testcase.err) } else if testcase.err != err { failf("mismatched error: got %v, expected %v", err, testcase.err) } continue } else if err != nil { failf("unexpected parse error: %v", err) continue } if repo.String() != testcase.input { failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) } if named, ok := repo.(Named); ok { if named.Name() != testcase.repository { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) } domain, _ := SplitHostname(named) if domain != testcase.domain { failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } } else if testcase.repository != "" || testcase.domain != "" { failf("expected named type, got %T", repo) } tagged, ok := repo.(Tagged) if testcase.tag != "" { if ok { if tagged.Tag() != testcase.tag { failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) } } else { failf("expected tagged type, got %T", repo) } } else if ok { failf("unexpected tagged type") } digested, ok := repo.(Digested) if testcase.digest != "" { if ok { if digested.Digest().String() != testcase.digest { failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) } } else { failf("expected digested type, got %T", repo) } } else if ok { failf("unexpected digested type") } } } // TestWithNameFailure tests cases where WithName should fail. Cases where it // should succeed are covered by TestSplitHostname, below. func TestWithNameFailure(t *testing.T) { testcases := []struct { input string err error }{ { input: "", err: ErrNameEmpty, }, { input: ":justtag", err: ErrReferenceInvalidFormat, }, { input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, { input: strings.Repeat("a/", 128) + "a:tag", err: ErrNameTooLong, }, { input: "aa/asdf$$^/aa", err: ErrReferenceInvalidFormat, }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } _, err := WithName(testcase.input) if err == nil { failf("no error parsing name. expected: %s", testcase.err) } } } func TestSplitHostname(t *testing.T) { testcases := []struct { input string domain string name string }{ { input: "test.com/foo", domain: "test.com", name: "foo", }, { input: "test_com/foo", domain: "", name: "test_com/foo", }, { input: "test:8080/foo", domain: "test:8080", name: "foo", }, { input: "test.com:8080/foo", domain: "test.com:8080", name: "foo", }, { input: "test-com:8080/foo", domain: "test-com:8080", name: "foo", }, { input: "xn--n3h.com:18080/foo", domain: "xn--n3h.com:18080", name: "foo", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } named, err := WithName(testcase.input) if err != nil { failf("error parsing name: %s", err) } domain, name := SplitHostname(named) if domain != testcase.domain { failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) } } } type serializationType struct { Description string Field Field } func TestSerialization(t *testing.T) { testcases := []struct { description string input string name string tag string digest string err error }{ { description: "empty value", err: ErrNameEmpty, }, { description: "just a name", input: "example.com:8000/named", name: "example.com:8000/named", }, { description: "name with a tag", input: "example.com:8000/named:tagged", name: "example.com:8000/named", tag: "tagged", }, { description: "name with digest", input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", name: "other.com/named", digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } m := map[string]string{ "Description": testcase.description, "Field": testcase.input, } b, err := json.Marshal(m) if err != nil { failf("error marshalling: %v", err) } t := serializationType{} if err := json.Unmarshal(b, &t); err != nil { if testcase.err == nil { failf("error unmarshalling: %v", err) } if err != testcase.err { failf("wrong error, expected %v, got %v", testcase.err, err) } continue } else if testcase.err != nil { failf("expected error unmarshalling: %v", testcase.err) } if t.Description != testcase.description { failf("wrong description, expected %q, got %q", testcase.description, t.Description) } ref := t.Field.Reference() if named, ok := ref.(Named); ok { if named.Name() != testcase.name { failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) } } else if testcase.name != "" { failf("expected named type, got %T", ref) } tagged, ok := ref.(Tagged) if testcase.tag != "" { if ok { if tagged.Tag() != testcase.tag { failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) } } else { failf("expected tagged type, got %T", ref) } } else if ok { failf("unexpected tagged type") } digested, ok := ref.(Digested) if testcase.digest != "" { if ok { if digested.Digest().String() != testcase.digest { failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) } } else { failf("expected digested type, got %T", ref) } } else if ok { failf("unexpected digested type") } t = serializationType{ Description: testcase.description, Field: AsField(ref), } b2, err := json.Marshal(t) if err != nil { failf("error marshing serialization type: %v", err) } if string(b) != string(b2) { failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) } // Ensure t.Field is not implementing "Reference" directly, getting // around the Reference type system var fieldInterface interface{} = t.Field if _, ok := fieldInterface.(Reference); ok { failf("field should not implement Reference interface") } } } func TestWithTag(t *testing.T) { testcases := []struct { name string digest digest.Digest tag string combined string }{ { name: "test.com/foo", tag: "tag", combined: "test.com/foo:tag", }, { name: "foo", tag: "tag2", combined: "foo:tag2", }, { name: "test.com:8000/foo", tag: "tag4", combined: "test.com:8000/foo:tag4", }, { name: "test.com:8000/foo", tag: "TAG5", combined: "test.com:8000/foo:TAG5", }, { name: "test.com:8000/foo", digest: "sha256:1234567890098765432112345667890098765", tag: "TAG5", combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.name)+": "+format, v...) t.Fail() } named, err := WithName(testcase.name) if err != nil { failf("error parsing name: %s", err) } if testcase.digest != "" { canonical, err := WithDigest(named, testcase.digest) if err != nil { failf("error adding digest") } named = canonical } tagged, err := WithTag(named, testcase.tag) if err != nil { failf("WithTag failed: %s", err) } if tagged.String() != testcase.combined { failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) } } } func TestWithDigest(t *testing.T) { testcases := []struct { name string digest digest.Digest tag string combined string }{ { name: "test.com/foo", digest: "sha256:1234567890098765432112345667890098765", combined: "test.com/foo@sha256:1234567890098765432112345667890098765", }, { name: "foo", digest: "sha256:1234567890098765432112345667890098765", combined: "foo@sha256:1234567890098765432112345667890098765", }, { name: "test.com:8000/foo", digest: "sha256:1234567890098765432112345667890098765", combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", }, { name: "test.com:8000/foo", digest: "sha256:1234567890098765432112345667890098765", tag: "latest", combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.name)+": "+format, v...) t.Fail() } named, err := WithName(testcase.name) if err != nil { failf("error parsing name: %s", err) } if testcase.tag != "" { tagged, err := WithTag(named, testcase.tag) if err != nil { failf("error adding tag") } named = tagged } digested, err := WithDigest(named, testcase.digest) if err != nil { failf("WithDigest failed: %s", err) } if digested.String() != testcase.combined { failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) } } } func TestParseNamed(t *testing.T) { testcases := []struct { input string domain string name string err error }{ { input: "test.com/foo", domain: "test.com", name: "foo", }, { input: "test:8080/foo", domain: "test:8080", name: "foo", }, { input: "test_com/foo", err: ErrNameNotCanonical, }, { input: "test.com", err: ErrNameNotCanonical, }, { input: "foo", err: ErrNameNotCanonical, }, { input: "library/foo", err: ErrNameNotCanonical, }, { input: "docker.io/library/foo", domain: "docker.io", name: "library/foo", }, // Ambiguous case, parser will add "library/" to foo { input: "docker.io/foo", err: ErrNameNotCanonical, }, } for _, testcase := range testcases { failf := func(format string, v ...interface{}) { t.Logf(strconv.Quote(testcase.input)+": "+format, v...) t.Fail() } named, err := ParseNamed(testcase.input) if err != nil && testcase.err == nil { failf("error parsing name: %s", err) continue } else if err == nil && testcase.err != nil { failf("parsing succeded: expected error %v", testcase.err) continue } else if err != testcase.err { failf("unexpected error %v, expected %v", err, testcase.err) continue } else if err != nil { continue } domain, name := SplitHostname(named) if domain != testcase.domain { failf("unexpected domain: got %q, expected %q", domain, testcase.domain) } if name != testcase.name { failf("unexpected name: got %q, expected %q", name, testcase.name) } } } image-4.0.1/docker/reference/regexp.go000066400000000000000000000122221354546467100176420ustar00rootroot00000000000000package reference import "regexp" var ( // alphaNumericRegexp defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. alphaNumericRegexp = match(`[a-z0-9]+`) // separatorRegexp defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple // dashes. separatorRegexp = match(`(?:[._]|__|[-]*)`) // nameComponentRegexp restricts registry path component names to start // with at least one letter or number, with following parts able to be // separated by one period, one or two underscore and multiple dashes. nameComponentRegexp = expression( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) // domainComponentRegexp restricts the registry domain component of a // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. DomainRegexp = expression( domainComponentRegexp, optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. anchoredNameRegexp = anchored( optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) // IdentifierRegexp is the format for string identifier used as a // content addressable identifier using sha256. These identifiers // are like digests without the algorithm, since sha256 is used. IdentifierRegexp = match(`([a-f0-9]{64})`) // ShortIdentifierRegexp is the format used to represent a prefix // of an identifier. A prefix may be used to match a sha256 identifier // within a list of trusted identifiers. ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) // anchoredIdentifierRegexp is used to check or match an // identifier value, anchored at start and end of string. anchoredIdentifierRegexp = anchored(IdentifierRegexp) // anchoredShortIdentifierRegexp is used to check if a value // is a possible identifier prefix, anchored at start and end // of string. anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. var match = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. func literal(s string) *regexp.Regexp { re := match(regexp.QuoteMeta(s)) if _, complete := re.LiteralPrefix(); !complete { panic("must be a literal") } return re } // expression defines a full expression, where each regular expression must // follow the previous. func expression(res ...*regexp.Regexp) *regexp.Regexp { var s string for _, re := range res { s += re.String() } return match(s) } // optional wraps the expression in a non-capturing group and makes the // production optional. func optional(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `?`) } // repeated wraps the regexp in a non-capturing group to get one or more // matches. func repeated(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `+`) } // group wraps the regexp in a non-capturing group. func group(res ...*regexp.Regexp) *regexp.Regexp { return match(`(?:` + expression(res...).String() + `)`) } // capture wraps the expression in a capturing group. func capture(res ...*regexp.Regexp) *regexp.Regexp { return match(`(` + expression(res...).String() + `)`) } // anchored anchors the regular expression by adding start and end delimiters. func anchored(res ...*regexp.Regexp) *regexp.Regexp { return match(`^` + expression(res...).String() + `$`) } image-4.0.1/docker/reference/regexp_test.go000066400000000000000000000260321354546467100207050ustar00rootroot00000000000000package reference import ( "regexp" "strings" "testing" ) type regexpMatch struct { input string match bool subs []string } func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { matches := r.FindStringSubmatch(m.input) if m.match && matches != nil { if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { t.Fatalf("Bad match result %#v for %q", matches, m.input) } if len(matches) < (len(m.subs) + 1) { t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) } for i := range m.subs { if m.subs[i] != matches[i+1] { t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) } } } else if m.match { t.Errorf("Expected match for %q", m.input) } else if matches != nil { t.Errorf("Unexpected match for %q", m.input) } } func TestDomainRegexp(t *testing.T) { hostcases := []regexpMatch{ { input: "test.com", match: true, }, { input: "test.com:10304", match: true, }, { input: "test.com:http", match: false, }, { input: "localhost", match: true, }, { input: "localhost:8080", match: true, }, { input: "a", match: true, }, { input: "a.b", match: true, }, { input: "ab.cd.com", match: true, }, { input: "a-b.com", match: true, }, { input: "-ab.com", match: false, }, { input: "ab-.com", match: false, }, { input: "ab.c-om", match: true, }, { input: "ab.-com", match: false, }, { input: "ab.com-", match: false, }, { input: "0101.com", match: true, // TODO(dmcgowan): valid if this should be allowed }, { input: "001a.com", match: true, }, { input: "b.gbc.io:443", match: true, }, { input: "b.gbc.io", match: true, }, { input: "xn--n3h.com", // ☃.com in punycode match: true, }, { input: "Asdf.com", // uppercase character match: true, }, } r := regexp.MustCompile(`^` + DomainRegexp.String() + `$`) for i := range hostcases { checkRegexp(t, r, hostcases[i]) } } func TestFullNameRegexp(t *testing.T) { if anchoredNameRegexp.NumSubexp() != 2 { t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) } testcases := []regexpMatch{ { input: "", match: false, }, { input: "short", match: true, subs: []string{"", "short"}, }, { input: "simple/name", match: true, subs: []string{"simple", "name"}, }, { input: "library/ubuntu", match: true, subs: []string{"library", "ubuntu"}, }, { input: "docker/stevvooe/app", match: true, subs: []string{"docker", "stevvooe/app"}, }, { input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", match: true, subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, }, { input: "aa/aa/bb/bb/bb", match: true, subs: []string{"aa", "aa/bb/bb/bb"}, }, { input: "a/a/a/a", match: true, subs: []string{"a", "a/a/a"}, }, { input: "a/a/a/a/", match: false, }, { input: "a//a/a", match: false, }, { input: "a", match: true, subs: []string{"", "a"}, }, { input: "a/aa", match: true, subs: []string{"a", "aa"}, }, { input: "a/aa/a", match: true, subs: []string{"a", "aa/a"}, }, { input: "foo.com", match: true, subs: []string{"", "foo.com"}, }, { input: "foo.com/", match: false, }, { input: "foo.com:8080/bar", match: true, subs: []string{"foo.com:8080", "bar"}, }, { input: "foo.com:http/bar", match: false, }, { input: "foo.com/bar", match: true, subs: []string{"foo.com", "bar"}, }, { input: "foo.com/bar/baz", match: true, subs: []string{"foo.com", "bar/baz"}, }, { input: "localhost:8080/bar", match: true, subs: []string{"localhost:8080", "bar"}, }, { input: "sub-dom1.foo.com/bar/baz/quux", match: true, subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, }, { input: "blog.foo.com/bar/baz", match: true, subs: []string{"blog.foo.com", "bar/baz"}, }, { input: "a^a", match: false, }, { input: "aa/asdf$$^/aa", match: false, }, { input: "asdf$$^/aa", match: false, }, { input: "aa-a/a", match: true, subs: []string{"aa-a", "a"}, }, { input: strings.Repeat("a/", 128) + "a", match: true, subs: []string{"a", strings.Repeat("a/", 127) + "a"}, }, { input: "a-/a/a/a", match: false, }, { input: "foo.com/a-/a/a", match: false, }, { input: "-foo/bar", match: false, }, { input: "foo/bar-", match: false, }, { input: "foo-/bar", match: false, }, { input: "foo/-bar", match: false, }, { input: "_foo/bar", match: false, }, { input: "foo_bar", match: true, subs: []string{"", "foo_bar"}, }, { input: "foo_bar.com", match: true, subs: []string{"", "foo_bar.com"}, }, { input: "foo_bar.com:8080", match: false, }, { input: "foo_bar.com:8080/app", match: false, }, { input: "foo.com/foo_bar", match: true, subs: []string{"foo.com", "foo_bar"}, }, { input: "____/____", match: false, }, { input: "_docker/_docker", match: false, }, { input: "docker_/docker_", match: false, }, { input: "b.gcr.io/test.example.com/my-app", match: true, subs: []string{"b.gcr.io", "test.example.com/my-app"}, }, { input: "xn--n3h.com/myimage", // ☃.com in punycode match: true, subs: []string{"xn--n3h.com", "myimage"}, }, { input: "xn--7o8h.com/myimage", // 🐳.com in punycode match: true, subs: []string{"xn--7o8h.com", "myimage"}, }, { input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode match: true, subs: []string{"example.com", "xn--7o8h.com/myimage"}, }, { input: "example.com/some_separator__underscore/myimage", match: true, subs: []string{"example.com", "some_separator__underscore/myimage"}, }, { input: "example.com/__underscore/myimage", match: false, }, { input: "example.com/..dots/myimage", match: false, }, { input: "example.com/.dots/myimage", match: false, }, { input: "example.com/nodouble..dots/myimage", match: false, }, { input: "example.com/nodouble..dots/myimage", match: false, }, { input: "docker./docker", match: false, }, { input: ".docker/docker", match: false, }, { input: "docker-/docker", match: false, }, { input: "-docker/docker", match: false, }, { input: "do..cker/docker", match: false, }, { input: "do__cker:8080/docker", match: false, }, { input: "do__cker/docker", match: true, subs: []string{"", "do__cker/docker"}, }, { input: "b.gcr.io/test.example.com/my-app", match: true, subs: []string{"b.gcr.io", "test.example.com/my-app"}, }, { input: "registry.io/foo/project--id.module--name.ver---sion--name", match: true, subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, }, { input: "Asdf.com/foo/bar", // uppercase character in hostname match: true, }, { input: "Foo/FarB", // uppercase characters in remote name match: false, }, } for i := range testcases { checkRegexp(t, anchoredNameRegexp, testcases[i]) } } func TestReferenceRegexp(t *testing.T) { if ReferenceRegexp.NumSubexp() != 3 { t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", ReferenceRegexp, ReferenceRegexp.NumSubexp()) } testcases := []regexpMatch{ { input: "registry.com:8080/myapp:tag", match: true, subs: []string{"registry.com:8080/myapp", "tag", ""}, }, { input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp@sha256:badbadbadbad", match: false, }, { input: "registry.com:8080/myapp:invalid~tag", match: false, }, { input: "bad_hostname.com:8080/myapp:tag", match: false, }, { input:// localhost treated as name, missing tag with 8080 as tag "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: false, }, { // localhost will be treated as an image name without a host input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", match: true, subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, }, { input: "registry.com:8080/myapp@bad", match: false, }, { input: "registry.com:8080/myapp@2bad", match: false, // TODO(dmcgowan): Support this as valid }, } for i := range testcases { checkRegexp(t, ReferenceRegexp, testcases[i]) } } func TestIdentifierRegexp(t *testing.T) { fullCases := []regexpMatch{ { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", match: true, }, { input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", match: false, }, { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", match: false, }, { input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", match: false, }, { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", match: false, }, } shortCases := []regexpMatch{ { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", match: true, }, { input: "7EC43B381E5AEFE6E04EFB0B3F0693FF2A4A50652D64AEC573905F2DB5889A1C", match: false, }, { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf", match: true, }, { input: "sha256:da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf9821", match: false, }, { input: "da304e823d8ca2b9d863a3c897baeb852ba21ea9a9f1414736394ae7fcaf98218482", match: false, }, { input: "da304", match: false, }, { input: "da304e", match: true, }, } for i := range fullCases { checkRegexp(t, anchoredIdentifierRegexp, fullCases[i]) } for i := range shortCases { checkRegexp(t, anchoredShortIdentifierRegexp, shortCases[i]) } } image-4.0.1/docker/tarfile/000077500000000000000000000000001354546467100155125ustar00rootroot00000000000000image-4.0.1/docker/tarfile/dest.go000066400000000000000000000374511354546467100170120ustar00rootroot00000000000000package tarfile import ( "archive/tar" "bytes" "context" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/internal/tmpdir" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. type Destination struct { writer io.Writer tar *tar.Writer repoTags []reference.NamedTagged // Other state. blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs config []byte } // NewDestination returns a tarfile.Destination for the specified io.Writer. func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { repoTags := []reference.NamedTagged{} if ref != nil { repoTags = append(repoTags, ref) } return &Destination{ writer: dest, tar: tar.NewWriter(dest), repoTags: repoTags, blobs: make(map[digest.Digest]types.BlobInfo), } } // AddRepoTags adds the specified tags to the destination's repoTags. func (d *Destination) AddRepoTags(tags []reference.NamedTagged) { d.repoTags = append(d.repoTags, tags...) } // SupportedManifestMIMETypes tells which manifest mime types the destination supports // If an empty slice or nil it's returned, then any mime type can be tried to upload func (d *Destination) SupportedManifestMIMETypes() []string { return []string{ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. } } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *Destination) SupportsSignatures(ctx context.Context) error { return errors.Errorf("Storing signatures for docker tar files is not supported") } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *Destination) AcceptsForeignLayerURLs() bool { return false } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *Destination) MustMatchRuntimeOS() bool { return false } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *Destination) IgnoresEmbeddedDockerReference() bool { return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false. } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *Destination) HasThreadSafePutBlob() bool { return false } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { // Ouch, we need to stream the blob into a temporary file just to determine the size. // When the layer is decompressed, we also have to generate the digest on uncompressed datas. if inputInfo.Size == -1 || inputInfo.Digest.String() == "" { logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") if err != nil { return types.BlobInfo{}, err } defer os.Remove(streamCopy.Name()) defer streamCopy.Close() digester := digest.Canonical.Digester() tee := io.TeeReader(stream, digester.Hash()) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(streamCopy, tee) if err != nil { return types.BlobInfo{}, err } _, err = streamCopy.Seek(0, os.SEEK_SET) if err != nil { return types.BlobInfo{}, err } inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. if inputInfo.Digest == "" { inputInfo.Digest = digester.Digest() } stream = streamCopy logrus.Debugf("... streaming done") } // Maybe the blob has been already sent ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false) if err != nil { return types.BlobInfo{}, err } if ok { return reusedInfo, nil } if isConfig { buf, err := ioutil.ReadAll(stream) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream") } d.config = buf if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file") } } else { // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way // writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers // in the root of the tarball. if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil { return types.BlobInfo{}, err } } d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size} return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest") } if blob, ok := d.blobs[info.Digest]; ok { return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil } return false, types.BlobInfo{}, nil } func (d *Destination) createRepositoriesFile(rootLayerID string) error { repositories := map[string]map[string]string{} for _, repoTag := range d.repoTags { if val, ok := repositories[repoTag.Name()]; ok { val[repoTag.Tag()] = rootLayerID } else { repositories[repoTag.Name()] = map[string]string{repoTag.Tag(): rootLayerID} } } b, err := json.Marshal(repositories) if err != nil { return errors.Wrap(err, "Error marshaling repositories") } if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil { return errors.Wrap(err, "Error writing config json file") } return nil } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *Destination) PutManifest(ctx context.Context, m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") } layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors) if err != nil { return err } if len(man.LayersDescriptors) > 0 { if err := d.createRepositoriesFile(lastLayerID); err != nil { return err } } repoTags := []string{} for _, tag := range d.repoTags { // For github.com/docker/docker consumers, this works just as well as // refString := ref.String() // because when reading the RepoTags strings, github.com/docker/docker/reference // normalizes both of them to the same value. // // Doing it this way to include the normalized-out `docker.io[/library]` does make // a difference for github.com/projectatomic/docker consumers, with the // “Add --add-registry and --block-registry options to docker daemon” patch. // These consumers treat reference strings which include a hostname and reference // strings without a hostname differently. // // Using the host name here is more explicit about the intent, and it has the same // effect as (docker pull) in projectatomic/docker, which tags the result using // a hostname-qualified reference. // See https://github.com/containers/image/issues/72 for a more detailed // analysis and explanation. refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag()) repoTags = append(repoTags, refString) } items := []ManifestItem{{ Config: man.ConfigDescriptor.Digest.Hex() + ".json", RepoTags: repoTags, Layers: layerPaths, Parent: "", LayerSources: nil, }} itemsBytes, err := json.Marshal(&items) if err != nil { return err } // FIXME? Do we also need to support the legacy format? return d.sendBytes(manifestFileName, itemsBytes) } // writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) { var chainID digest.Digest lastLayerID = "" for i, l := range layerDescriptors { // This chainID value matches the computation in docker/docker/layer.CreateChainID … if chainID == "" { chainID = l.Digest } else { chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String()) } // … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent // versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop. // // Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID // (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more // times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation // which also mixes in the full image configuration seems unnecessary, at least as long as we are storing // only a single image per tarball, i.e. all DiffID prefixes are unique (can’t differ only with // configuration). layerID := chainID.Hex() physicalLayerPath := l.Digest.Hex() + ".tar" // The layer itself has been stored into physicalLayerPath in PutManifest. // So, use that path for layerPaths used in the non-legacy manifest layerPaths = append(layerPaths, physicalLayerPath) // ... and create a symlink for the legacy format; if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { return nil, "", errors.Wrap(err, "Error creating layer symbolic link") } b := []byte("1.0") if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil { return nil, "", errors.Wrap(err, "Error writing VERSION file") } // The legacy format requires a config file per layer layerConfig := make(map[string]interface{}) layerConfig["id"] = layerID // The root layer doesn't have any parent if lastLayerID != "" { layerConfig["parent"] = lastLayerID } // The root layer configuration file is generated by using subpart of the image configuration if i == len(layerDescriptors)-1 { var config map[string]*json.RawMessage err := json.Unmarshal(d.config, &config) if err != nil { return nil, "", errors.Wrap(err, "Error unmarshaling config") } for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} { layerConfig[attr] = config[attr] } } b, err := json.Marshal(layerConfig) if err != nil { return nil, "", errors.Wrap(err, "Error marshaling layer config") } if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil { return nil, "", errors.Wrap(err, "Error writing config json file") } lastLayerID = layerID } return layerPaths, lastLayerID, nil } type tarFI struct { path string size int64 isSymlink bool } func (t *tarFI) Name() string { return t.path } func (t *tarFI) Size() int64 { return t.size } func (t *tarFI) Mode() os.FileMode { if t.isSymlink { return os.ModeSymlink } return 0444 } func (t *tarFI) ModTime() time.Time { return time.Unix(0, 0) } func (t *tarFI) IsDir() bool { return false } func (t *tarFI) Sys() interface{} { return nil } // sendSymlink sends a symlink into the tar stream. func (d *Destination) sendSymlink(path string, target string) error { hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target) if err != nil { return nil } logrus.Debugf("Sending as tar link %s -> %s", path, target) return d.tar.WriteHeader(hdr) } // sendBytes sends a path into the tar stream. func (d *Destination) sendBytes(path string, b []byte) error { return d.sendFile(path, int64(len(b)), bytes.NewReader(b)) } // sendFile sends a file into the tar stream. func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") if err != nil { return nil } logrus.Debugf("Sending as tar file %s", path) if err := d.tar.WriteHeader(hdr); err != nil { return err } // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. size, err := io.Copy(d.tar, stream) if err != nil { return err } if size != expectedSize { return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) } return nil } // PutSignatures adds the given signatures to the docker tarfile (currently not // supported). MUST be called after PutManifest (signatures reference manifest // contents) func (d *Destination) PutSignatures(ctx context.Context, signatures [][]byte) error { if len(signatures) != 0 { return errors.Errorf("Storing signatures for docker tar files is not supported") } return nil } // Commit finishes writing data to the underlying io.Writer. // It is the caller's responsibility to close it, if necessary. func (d *Destination) Commit(ctx context.Context) error { return d.tar.Close() } image-4.0.1/docker/tarfile/doc.go000066400000000000000000000002331354546467100166040ustar00rootroot00000000000000// Package tarfile is an internal implementation detail of some transports. // Do not use outside of the github.com/containers/image repo! package tarfile image-4.0.1/docker/tarfile/src.go000066400000000000000000000405021354546467100166310ustar00rootroot00000000000000package tarfile import ( "archive/tar" "bytes" "context" "encoding/json" "io" "io/ioutil" "os" "path" "sync" "github.com/containers/image/v4/internal/tmpdir" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/compression" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // Source is a partial implementation of types.ImageSource for reading from tarPath. type Source struct { tarPath string removeTarPathOnClose bool // Remove temp file on close if true // The following data is only available after ensureCachedDataIsPresent() succeeds tarManifest *ManifestItem // nil if not available yet. configBytes []byte configDigest digest.Digest orderedDiffIDList []digest.Digest knownLayers map[digest.Digest]*layerInfo // Other state generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe cacheDataResult error // Private state for ensureCachedDataIsPresent } type layerInfo struct { path string size int64 } // TODO: We could add support for multiple images in a single archive, so // that people could use docker-archive:opensuse.tar:opensuse:leap as // the source of an image. // To do for both the NewSourceFromFile and NewSourceFromStream functions // NewSourceFromFile returns a tarfile.Source for the specified path. func NewSourceFromFile(path string) (*Source, error) { file, err := os.Open(path) if err != nil { return nil, errors.Wrapf(err, "error opening file %q", path) } defer file.Close() // If the file is already not compressed we can just return the file itself // as a source. Otherwise we pass the stream to NewSourceFromStream. stream, isCompressed, err := compression.AutoDecompress(file) if err != nil { return nil, errors.Wrapf(err, "Error detecting compression for file %q", path) } defer stream.Close() if !isCompressed { return &Source{ tarPath: path, }, nil } return NewSourceFromStream(stream) } // NewSourceFromStream returns a tarfile.Source for the specified inputStream, // which can be either compressed or uncompressed. The caller can close the // inputStream immediately after NewSourceFromFile returns. func NewSourceFromStream(inputStream io.Reader) (*Source, error) { // FIXME: use SystemContext here. // Save inputStream to a temporary file tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar") if err != nil { return nil, errors.Wrap(err, "error creating temporary file") } defer tarCopyFile.Close() succeeded := false defer func() { if !succeeded { os.Remove(tarCopyFile.Name()) } }() // In order to be compatible with docker-load, we need to support // auto-decompression (it's also a nice quality-of-life thing to avoid // giving users really confusing "invalid tar header" errors). uncompressedStream, _, err := compression.AutoDecompress(inputStream) if err != nil { return nil, errors.Wrap(err, "Error auto-decompressing input") } defer uncompressedStream.Close() // Copy the plain archive to the temporary file. // // TODO: This can take quite some time, and should ideally be cancellable // using a context.Context. if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil { return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name()) } succeeded = true return &Source{ tarPath: tarCopyFile.Name(), removeTarPathOnClose: true, }, nil } // tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. type tarReadCloser struct { *tar.Reader backingFile *os.File } func (t *tarReadCloser) Close() error { return t.backingFile.Close() } // openTarComponent returns a ReadCloser for the specific file within the archive. // This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), // and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. // The caller should call .Close() on the returned stream. func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { f, err := os.Open(s.tarPath) if err != nil { return nil, err } succeeded := false defer func() { if !succeeded { f.Close() } }() tarReader, header, err := findTarComponent(f, componentPath) if err != nil { return nil, err } if header == nil { return nil, os.ErrNotExist } if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested // We follow only one symlink; so no loops are possible. if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, err } // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, // so we don't care. tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) if err != nil { return nil, err } if header == nil { return nil, os.ErrNotExist } } if !header.FileInfo().Mode().IsRegular() { return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) } succeeded = true return &tarReadCloser{Reader: tarReader, backingFile: f}, nil } // findTarComponent returns a header and a reader matching path within inputFile, // or (nil, nil, nil) if not found. func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { t := tar.NewReader(inputFile) for { h, err := t.Next() if err == io.EOF { break } if err != nil { return nil, nil, err } if h.Name == path { return t, h, nil } } return nil, nil, nil } // readTarComponent returns full contents of componentPath. func (s *Source) readTarComponent(path string) ([]byte, error) { file, err := s.openTarComponent(path) if err != nil { return nil, errors.Wrapf(err, "Error loading tar component %s", path) } defer file.Close() bytes, err := ioutil.ReadAll(file) if err != nil { return nil, err } return bytes, nil } // ensureCachedDataIsPresent loads data necessary for any of the public accessors. // It is safe to call this from multi-threaded code. func (s *Source) ensureCachedDataIsPresent() error { s.cacheDataLock.Do(func() { s.cacheDataResult = s.ensureCachedDataIsPresentPrivate() }) return s.cacheDataResult } // ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent. // Call ensureCachedDataIsPresent instead. func (s *Source) ensureCachedDataIsPresentPrivate() error { // Read and parse manifest.json tarManifest, err := s.loadTarManifest() if err != nil { return err } // Check to make sure length is 1 if len(tarManifest) != 1 { return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) } // Read and parse config. configBytes, err := s.readTarComponent(tarManifest[0].Config) if err != nil { return err } var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) if err != nil { return err } // Success; commit. s.tarManifest = &tarManifest[0] s.configBytes = configBytes s.configDigest = digest.FromBytes(configBytes) s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs s.knownLayers = knownLayers return nil } // loadTarManifest loads and decodes the manifest.json. func (s *Source) loadTarManifest() ([]ManifestItem, error) { // FIXME? Do we need to deal with the legacy format? bytes, err := s.readTarComponent(manifestFileName) if err != nil { return nil, err } var items []ManifestItem if err := json.Unmarshal(bytes, &items); err != nil { return nil, errors.Wrap(err, "Error decoding tar manifest.json") } return items, nil } // Close removes resources associated with an initialized Source, if any. func (s *Source) Close() error { if s.removeTarPathOnClose { return os.Remove(s.tarPath) } return nil } // LoadTarManifest loads and decodes the manifest.json func (s *Source) LoadTarManifest() ([]ManifestItem, error) { return s.loadTarManifest() } func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) } knownLayers := map[digest.Digest]*layerInfo{} unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. for i, diffID := range parsedConfig.RootFS.DiffIDs { if _, ok := knownLayers[diffID]; ok { // Apparently it really can happen that a single image contains the same layer diff more than once. // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. continue } layerPath := tarManifest.Layers[i] if _, ok := unknownLayerSizes[layerPath]; ok { return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) } li := &layerInfo{ // A new element in each iteration path: layerPath, size: -1, } knownLayers[diffID] = li unknownLayerSizes[layerPath] = li } // Scan the tar file to collect layer sizes. file, err := os.Open(s.tarPath) if err != nil { return nil, err } defer file.Close() t := tar.NewReader(file) for { h, err := t.Next() if err == io.EOF { break } if err != nil { return nil, err } if li, ok := unknownLayerSizes[h.Name]; ok { // Since GetBlob will decompress layers that are compressed we need // to do the decompression here as well, otherwise we will // incorrectly report the size. Pretty critical, since tools like // umoci always compress layer blobs. Obviously we only bother with // the slower method of checking if it's compressed. uncompressedStream, isCompressed, err := compression.AutoDecompress(t) if err != nil { return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name) } defer uncompressedStream.Close() uncompressedSize := h.Size if isCompressed { uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream) if err != nil { return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name) } } li.size = uncompressedSize delete(unknownLayerSizes, h.Name) } } if len(unknownLayerSizes) != 0 { return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. } return knownLayers, nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) } if s.generatedManifest == nil { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } m := manifest.Schema2{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, ConfigDescriptor: manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, LayersDescriptors: []manifest.Schema2Descriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ Digest: diffID, // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, }) } manifestBytes, err := json.Marshal(&m) if err != nil { return nil, "", err } s.generatedManifest = manifestBytes } return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil } // uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input. type uncompressedReadCloser struct { io.Reader underlyingCloser func() error uncompressedCloser func() error } func (r uncompressedReadCloser) Close() error { var res error if err := r.uncompressedCloser(); err != nil { res = err } if err := r.underlyingCloser(); err != nil && res == nil { res = err } return res } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *Source) HasThreadSafeGetBlob() bool { return true } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, 0, err } if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, underlyingStream, err := s.openTarComponent(li.path) if err != nil { return nil, 0, err } closeUnderlyingStream := true defer func() { if closeUnderlyingStream { underlyingStream.Close() } }() // In order to handle the fact that digests != diffIDs (and thus that a // caller which is trying to verify the blob will run into problems), // we need to decompress blobs. This is a bit ugly, but it's a // consequence of making everything addressable by their DiffID rather // than by their digest... // // In particular, because the v2s2 manifest being generated uses // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of // layers not their _actual_ digest. The result is that copy/... will // be verifing a "digest" which is not the actual layer's digest (but // is instead the DiffID). uncompressedStream, _, err := compression.AutoDecompress(underlyingStream) if err != nil { return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest) } newStream := uncompressedReadCloser{ Reader: uncompressedStream, underlyingCloser: underlyingStream.Close, uncompressedCloser: uncompressedStream.Close, } closeUnderlyingStream = false return newStream, li.size, nil } return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { // How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType. return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) } return [][]byte{}, nil } image-4.0.1/docker/tarfile/types.go000066400000000000000000000014251354546467100172070ustar00rootroot00000000000000package tarfile import ( "github.com/containers/image/v4/manifest" "github.com/opencontainers/go-digest" ) // Various data structures. // Based on github.com/docker/docker/image/tarexport/tarexport.go const ( manifestFileName = "manifest.json" legacyLayerFileName = "layer.tar" legacyConfigFileName = "json" legacyVersionFileName = "VERSION" legacyRepositoriesFileName = "repositories" ) // ManifestItem is an element of the array stored in the top-level manifest.json file. type ManifestItem struct { Config string RepoTags []string Layers []string Parent imageID `json:",omitempty"` LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` } type imageID string image-4.0.1/docker/wwwauthenticate.go000066400000000000000000000073301354546467100176410ustar00rootroot00000000000000package docker // Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. import ( "net/http" "strings" ) // challenge carries information from a WWW-Authenticate response header. // See RFC 7235. type challenge struct { // Scheme is the auth-scheme according to RFC 7235 Scheme string // Parameters are the auth-params according to RFC 7235 Parameters map[string]string } // Octet types from RFC 7230. type octetType byte var octetTypes [256]octetType const ( isToken octetType = 1 << iota isSpace ) func init() { // OCTET = // CHAR = // CTL = // CR = // LF = // SP = // HT = // <"> = // CRLF = CR LF // LWS = [CRLF] 1*( SP | HT ) // TEXT = // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT // token = 1* // qdtext = > for c := 0; c < 256; c++ { var t octetType isCtl := c <= 31 || c == 127 isChar := 0 <= c && c <= 127 isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { t |= isSpace } if isChar && !isCtl && !isSeparator { t |= isToken } octetTypes[c] = t } } func parseAuthHeader(header http.Header) []challenge { challenges := []challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) if v != "" { challenges = append(challenges, challenge{Scheme: v, Parameters: p}) } } return challenges } // NOTE: This is not a fully compliant parser per RFC 7235: // Most notably it does not support more than one challenge within a single header // Some of the whitespace parsing also seems noncompliant. // But it is clearly better than what we used to have… func parseValueAndParams(header string) (value string, params map[string]string) { params = make(map[string]string) value, s := expectToken(header) if value == "" { return } value = strings.ToLower(value) s = "," + skipSpace(s) for strings.HasPrefix(s, ",") { var pkey string pkey, s = expectToken(skipSpace(s[1:])) if pkey == "" { return } if !strings.HasPrefix(s, "=") { return } var pvalue string pvalue, s = expectTokenOrQuoted(s[1:]) if pvalue == "" { return } pkey = strings.ToLower(pkey) params[pkey] = pvalue s = skipSpace(s) } return } func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isSpace == 0 { break } } return s[i:] } func expectToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if octetTypes[s[i]]&isToken == 0 { break } } return s[:i], s[i:] } func expectTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return expectToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } image-4.0.1/docker/wwwauthenticate_test.go000066400000000000000000000023161354546467100206770ustar00rootroot00000000000000package docker import ( "testing" "github.com/stretchr/testify/assert" ) // This is just a smoke test for the common expected header formats, // by no means comprehensive. func TestParseValueAndParams(t *testing.T) { for _, c := range []struct { input string scope string params map[string]string }{ { `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull"`, "bearer", map[string]string{ "realm": "https://auth.docker.io/token", "service": "registry.docker.io", "scope": "repository:library/busybox:pull", }, }, { `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/busybox:pull,push"`, "bearer", map[string]string{ "realm": "https://auth.docker.io/token", "service": "registry.docker.io", "scope": "repository:library/busybox:pull,push", }, }, { `Bearer realm="http://127.0.0.1:5000/openshift/token"`, "bearer", map[string]string{"realm": "http://127.0.0.1:5000/openshift/token"}, }, } { scope, params := parseValueAndParams(c.input) assert.Equal(t, c.scope, scope, c.input) assert.Equal(t, c.params, params, c.input) } } image-4.0.1/docs/000077500000000000000000000000001354546467100135455ustar00rootroot00000000000000image-4.0.1/docs/atomic-signature-embedded-json.json000066400000000000000000000051131354546467100224110ustar00rootroot00000000000000{ "title": "JSON embedded in an atomic container signature", "description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n", "type": "object", "required": [ "critical", "optional" ], "additionalProperties": false, "properties": { "critical": { "type": "object", "required": [ "type", "image", "identity" ], "additionalProperties": false, "properties": { "type": { "type": "string", "enum": [ "atomic container signature" ] }, "image": { "type": "object", "required": [ "docker-manifest-digest" ], "additionalProperties": false, "properties": { "docker-manifest-digest": { "type": "string" } } }, "identity": { "type": "object", "required": [ "docker-reference" ], "additionalProperties": false, "properties": { "docker-reference": { "type": "string" } } } } }, "optional": { "type": "object", "description": "All members are optional, but if they are included, they must be valid.", "additionalProperties": true, "properties": { "creator": { "type": "string" }, "timestamp": { "type": "integer" } } } } }image-4.0.1/docs/containers-certs.d.5.md000066400000000000000000000024211354546467100177360ustar00rootroot00000000000000% containers-certs.d(5) # NAME containers-certs.d - Directory for storing custom container-registry TLS configurations # DESCRIPTION A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`. The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`). ## Directory Structure A certs directory can contain one or more files with the following extensions: * `*.crt` files with this extensions will be interpreted as CA certificates * `*.cert` files with this extensions will be interpreted as client certificates * `*.key` files with this extensions will be interpreted as client keys Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`). An examplary setup for a registry running at `my-registry.com:5000` may look as follows: ``` /etc/containers/certs.d/ <- Certificate directory └── my-registry.com:5000 <- Hostname:port ├── client.cert <- Client certificate ├── client.key <- Client key └── ca.crt <- Certificate authority that signed the registry certificate ``` # HISTORY Feb 2019, Originally compiled by Valentin Rothberg image-4.0.1/docs/containers-policy.json.5.md000066400000000000000000000260131354546467100206460ustar00rootroot00000000000000% CONTAINERS-POLICY.JSON(5) policy.json Man Page % Miloslav Trmač % September 2016 # NAME containers-policy.json - syntax for the signature verification policy file ## DESCRIPTION Signature verification policy files are used to specify policy, e.g. trusted keys, applicable when deciding whether to accept an image, or individual signatures of that image, as valid. The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`; applications performing verification may allow using a different policy instead. ## FORMAT The signature verification policy file, usually called `policy.json`, uses a JSON format. Unlike some other JSON files, its parsing is fairly strict: unrecognized, duplicated or otherwise invalid fields cause the entire file, and usually the entire operation, to be rejected. The purpose of the policy file is to define a set of *policy requirements* for a container image, usually depending on its location (where it is being pulled from) or otherwise defined identity. Policy requirements can be defined for: - An individual *scope* in a *transport*. The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`), and *scope* values are defined by each transport; see below for more details. Usually, a scope can be defined to match a single image, and various prefixes of such a most specific scope define namespaces of matching images. - A default policy for a single transport, expressed using an empty string as a scope - A global default policy. If multiple policy requirements match a given image, only the requirements from the most specific match apply, the more general policy requirements definitions are ignored. This is expressed in JSON using the top-level syntax ```js { "default": [/* policy requirements: global default */] "transports": { transport_name: { "": [/* policy requirements: default for transport $transport_name */], scope_1: [/* policy requirements: default for $scope_1 in $transport_name */], scope_2: [/*…*/] /*…*/ }, transport_name_2: {/*…*/} /*…*/ } } ``` The global `default` set of policy requirements is mandatory; all of the other fields (`transports` itself, any specific transport, the transport-specific default, etc.) are optional. ## Supported transports and their scopes ### `atomic:` The `atomic:` transport refers to images in an Atomic Registry. Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]], i.e. either specifying a complete name of a tagged image, or prefix denoting a host/namespace/image stream. *Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used e.g. for `docker pull`), _not_ to the OpenShift API host and port. ### `dir:` The `dir:` transport refers to images stored in local directories. Supported scopes are paths of directories (either containing a single image or subdirectories possibly containing images). *Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored. The top-level scope `"/"` is forbidden; use the transport default scope `""`, for consistency with other transports. ### `docker:` The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2". Scopes matching individual images are named Docker references *in the fully expanded form*, either using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name). ### `oci:` The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification". Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to a directory containing one or more tags, or any of the parent directories. *Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently. ### `tarball:` The `tarball:` transport refers to tarred up container root filesystems. Scopes are ignored. ## Policy Requirements Using the mechanisms above, a set of policy requirements is looked up. The policy requirements are represented as a JSON array of individual requirement objects. For an image to be accepted, *all* of the requirements must be satisfied simulatenously. The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author); in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object. The following requirement objects are supported: ### `insecureAcceptAnything` A simple requirement with the following syntax ```json {"type":"insecureAcceptAnything"} ``` This requirement accepts any image (but note that other requirements in the array still apply). When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though. This is useful primarily for policy scopes where no signature verification is required; because the array of policy requirements must not be empty, this requirement is used to represent the lack of requirements explicitly. ### `reject` A simple requirement with the following syntax: ```json {"type":"reject"} ``` This requirement rejects every image, and every signature. ### `signedBy` This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key. ```js { "type": "signedBy", "keyType": "GPGKeys", /* The only currently supported value */ "keyPath": "/path/to/local/keyring/file", "keyData": "base64-encoded-keyring-data", "signedIdentity": identity_requirement } ``` Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted. The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image. One of the following alternatives are supported: - The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail. ```json {"type":"matchExact"} ``` - If the image identity carries a tag, the identity in the signature must exactly match; if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag). (Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.) ```json {"type":"matchRepoDigestOrExact"} ``` - The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version. ```json {"type":"matchRepository"} ``` - The identity in the signature must exactly match a specified identity. This is useful e.g. when locally mirroring images signed using their public identity. ```js { "type": "exactReference", "dockerReference": docker_reference_value } ``` - The identity in the signature must be in the same repository as a specified identity. This combines the properties of `matchRepository` and `exactReference`. ```js { "type": "exactRepository", "dockerRepository": docker_repository_value } ``` If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`. *Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is provided by the transport. In particular, the `dir:` and `oci:` transports can be only used with `exactReference` or `exactRepository`. ## Examples It is *strongly* recommended to set the `default` policy to `reject`, and then selectively allow individual transports and scopes as desired. ### A reasonably locked-down system (Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.) ```js { "default": [{"type": "reject"}], /* Reject anything not explicitly allowed */ "transports": { "docker": { /* Allow installing images from a specific repository namespace, without cryptographic verification. This namespace includes images like openshift/hello-openshift and openshift/origin. */ "docker.io/openshift": [{"type": "insecureAcceptAnything"}], /* Similarly, allow installing the “official” busybox images. Note how the fully expanded form, with the explicit /library/, must be used. */ "docker.io/library/busybox": [{"type": "insecureAcceptAnything"}] /* Other docker: images use the global default policy and are rejected */ }, "dir": { "": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */ }, "atomic": { /* The common case: using a known key for a repository or set of repositories */ "hostname:5000/myns/official": [ { "type": "signedBy", "keyType": "GPGKeys", "keyPath": "/path/to/official-pubkey.gpg" } ], /* A more complex example, for a repository which contains a mirror of a third-party product, which must be signed-off by local IT */ "hostname:5000/vendor/product": [ { /* Require the image to be signed by the original vendor, using the vendor's repository location. */ "type": "signedBy", "keyType": "GPGKeys", "keyPath": "/path/to/vendor-pubkey.gpg", "signedIdentity": { "type": "exactRepository", "dockerRepository": "vendor-hostname/product/repository" } }, { /* Require the image to _also_ be signed by a local reviewer. */ "type": "signedBy", "keyType": "GPGKeys", "keyPath": "/path/to/reviewer-pubkey.gpg" } ] } } } ``` ### Completely disable security, allow all images, do not trust any signatures ```json { "default": [{"type": "insecureAcceptAnything"}] } ``` ## SEE ALSO atomic(1) ## HISTORY August 2018, Rename to containers-policy.json(5) by Valentin Rothberg September 2016, Originally compiled by Miloslav Trmač image-4.0.1/docs/containers-registries.conf.5.md000066400000000000000000000151311354546467100215020ustar00rootroot00000000000000% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file % Brent Baude % Aug 2017 # NAME containers-registries.conf - Syntax of System Registry Configuration File # DESCRIPTION The CONTAINERS-REGISTRIES configuration file is a system-wide configuration file for container image registries. The file format is TOML. By default, the configuration file is located at `/etc/containers/registries.conf`. # FORMATS ## VERSION 2 VERSION 2 is the latest format of the `registries.conf` and is currently in beta. This means in general VERSION 1 should be used in production environments for now. ### GLOBAL SETTINGS `unqualified-search-registries` : An array of _host_[`:`_port_] registries to try when pulling an unqualified image, in order. ### NAMESPACED `[[registry]]` SETTINGS The bulk of the configuration is represented as an array of `[[registry]]` TOML tables; the settings may therefore differ among different registries as well as among different namespaces/repositories within a registry. #### Choosing a `[[registry]]` TOML table Given an image name, a single `[[registry]]` TOML table is chosen based on its `prefix` field. `prefix` : A prefix of the user-specified image name, i.e. using one of the following formats: - _host_[`:`_port_] - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…] - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_ - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_(`:`_tag|`@`_digest_) The user-specified image name must start with the specified `prefix` (and continue with the appropriate separator) for a particular `[[registry]]` TOML table to be considered; (only) the TOML table with the longest match is used. As a special case, the `prefix` field can be missing; if so, it defaults to the value of the `location` field (described below). #### Per-namespace settings `insecure` : `true` or `false`. By default, container runtimes require TLS when retrieving images from a registry. If `insecure` is set to `true`, unencrypted HTTP as well as TLS connections with untrusted certificates are allowed. `blocked` : `true` or `false`. If `true`, pulling images with matching names is forbidden. #### Remapping and mirroring registries The user-specified image reference is, primarily, a "logical" image name, always used for naming the image. By default, the image reference also directly specifies the registry and repository to use, but the following options can be used to redirect the underlying accesses to different registry servers or locations (e.g. to support configurations with no access to the internet without having to change `Dockerfile`s, or to add redundancy). `location` : Accepts the same format as the `prefix` field, and specifies the physical location of the `prefix`-rooted namespace. By default, this equal to `prefix` (in which case `prefix` can be omitted and the `[[registry]]` TOML table can only specify `location`). Example: Given ``` prefix = "example.com/foo" location = "internal-registry-for-example.net/bar" ``` requests for the image `example.com/foo/myimage:latest` will actually work with the `internal-registry-for-example.net/bar/myimage:latest` image. `mirror` : An array of TOML tables specifying (possibly-partial) mirrors for the `prefix`-rooted namespace. The mirrors are attempted in the specified order; the first one that can be contacted and contains the image will be used (and if none of the mirrors contains the image, the primary location specified by the `registry.location` field, or using the unmodified user-specified reference, is tried last). Each TOML table in the `mirror` array can contain the following fields, with the same semantics as if specified in the `[[registry]]` TOML table directly: - `location` - `insecure` `mirror-by-digest-only` : `true` or `false`. If `true`, mirrors will only be used during pulling if the image reference includes a digest. Referencing an image by digest ensures that the same is always used (whereas referencing an image by a tag may cause different registries to return different images if the tag mapping is out of sync). Note that if this is `true`, images referenced by a tag will only use the primary registry, failing if that registry is not accessible. *Note*: Redirection and mirrors are currently processed only when reading images, not when pushing to a registry; that may change in the future. ### EXAMPLE ``` unqualified-search-registries = ["example.com"] [[registry]] prefix = "example.com/foo" insecure = false blocked = false location = "internal-registry-for-example.com/bar" [[registry.mirror]] location = "example-mirror-0.local/mirror-for-foo" [[registry.mirror]] location = "example-mirror-1.local/mirrors/foo" insecure = true ``` Given the above, a pull of `example.com/foo/image:latest` will try: 1. `example-mirror-0.local/mirror-for-foo/image:latest` 2. `example-mirror-1.local/mirrors/foo/image:latest` 3. `internal-registry-for-example.net/bar/myimage:latest` in order, and use the first one that exists. ## VERSION 1 VERSION 1 can be used as alternative to the VERSION 2, but it does not support using registry mirrors, longest-prefix matches, or location rewriting. The TOML format is used to build a simple list of registries under three categories: `registries.search`, `registries.insecure`, and `registries.block`. You can list multiple registries using a comma separated list. Search registries are used when the caller of a container runtime does not fully specify the container image that they want to execute. These registries are prepended onto the front of the specified container image until the named image is found at a registry. Note that insecure registries can be used for any registry, not just the registries listed under search. The `registries.insecure` and `registries.block` lists have the same meaning as the `insecure` and `blocked` fields in VERSION 2. ### EXAMPLE The following example configuration defines two searchable registries, one insecure registry, and two blocked registries. ``` [registries.search] registries = ['registry1.com', 'registry2.com'] [registries.insecure] registries = ['registry3.com'] [registries.block] registries = ['registry.untrusted.com', 'registry.unsafe.com'] ``` # HISTORY Mar 2019, Added additional configuration format by Sascha Grunert Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg Jun 2018, Updated by Tom Sweeney Aug 2017, Originally compiled by Brent Baude image-4.0.1/docs/containers-registries.d.5.md000066400000000000000000000131071354546467100210010ustar00rootroot00000000000000% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page % Miloslav Trmač % August 2016 # NAME containers-registries.d - Directory for various registries configurations # DESCRIPTION The registries configuration directory contains configuration for various registries (servers storing remote container images), and for content stored in them, so that the configuration does not have to be provided in command-line options over and over for every command, and so that it can be shared by all users of containers/image. By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`; applications may allow using a different directory instead. ## Directory Structure The directory may contain any number of files with the extension `.yaml`, each using the YAML format. Other than the mandatory extension, names of the files don’t matter. The contents of these files are merged together; to have a well-defined and easy to understand behavior, there can be only one configuration section describing a single namespace within a registry (in particular there can be at most one one `default-docker` section across all files, and there can be at most one instance of any key under the the `docker` section; these sections are documented later). Thus, it is forbidden to have two conflicting configurations for a single registry or scope, and it is also forbidden to split a configuration for a single registry or scope across more than one file (even if they are not semantically in conflict). ## Registries, Scopes and Search Order Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined: - `default-docker` is the _configuration section_ (as documented below) for registries implementing "Docker Registry HTTP API V2". This key is optional. - `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2", or namespaces and individual images within these registries, as keys; the value assigned to any such key is a _configuration section_. This key is optional. Scopes matching individual images are named Docker references *in the fully expanded form*, either using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (and a port if it differs from the default). Note that if a registry is accessed using a hostname+port configuration, the port-less hostname is _not_ used as parent scope. When searching for a configuration to apply for an individual container image, only the configuration for the most-precisely matching scope is used; configuration using more general scopes is ignored. For example, if _any_ configuration exists for `docker.io/library/busybox`, the configuration for `docker.io` is ignored (even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`). ## Individual Configuration Sections A single configuration section is selected for a container image using the process described above. The configuration section is a YAML mapping, with the following keys: - `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures). This key is optional; if it is missing, `sigstore` below is used. - `sigstore` defines an URL of the signature storage. This URL is used for reading existing signatures, and if `sigstore-staging` does not exist, also for adding or removing them. This key is optional; if it is missing, no signature storage is defined (no signatures are download along with images, adding new signatures is possible only if `sigstore-staging` is defined). ## Examples ### Using Containers from Various Origins The following demonstrates how to to consume and run images from various registries and namespaces: ```yaml docker: registry.database-supplier.com: sigstore: https://sigstore.database-supplier.com distribution.great-middleware.org: sigstore: https://security-team.great-middleware.org/sigstore docker.io/web-framework: sigstore: https://sigstore.web-framework.io:8080 ``` ### Developing and Signing Containers, Staging Signatures For developers in `example.com`: - Consume most container images using the public servers also used by clients. - Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures. - Craft an individual exception for a single branch a specific developer is working on locally. ```yaml docker: registry.example.com: sigstore: https://registry-sigstore.example.com registry.example.com/mydepartment: sigstore: https://sigstore.mydepartment.example.com sigstore-staging: file:///mnt/mydepartment/sigstore-staging registry.example.com/mydepartment/myproject:mybranch: sigstore: http://localhost:4242/sigstore sigstore-staging: file:///home/useraccount/webroot/sigstore ``` ### A Global Default If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures. ```yaml default-docker: sigstore-staging: file:///mnt/company/common-sigstore-staging ``` # AUTHORS Miloslav Trmač image-4.0.1/docs/containers-signature.5.md000066400000000000000000000350701354546467100204030ustar00rootroot00000000000000% container-signature(5) Container signature format % Miloslav Trmač % March 2017 # Container signature format This document describes the format of container signatures, as implemented by the `github.com/containers/image/signature` package. Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package (preferably through the higher-level `signature.PolicyContext` interface) without having to care about the details of the format described below. This documentation exists primarily for maintainers of the package and to allow independent reimplementations. ## High-level overview The signature provides an end-to-end authenticated claim that a container image has been approved by a specific party (e.g. the creator of the image as their work, an automated build system as a result of an automated build, a company IT department approving the image for production) under a specified _identity_ (e.g. an OS base image / specific application, with a specific version). A container signature consists of a cryptographic signature which identifies and authenticates who signed the image, and carries as a signed payload a JSON document. The JSON document identifies the image being signed, claims a specific identity of the image and if applicable, contains other information about the image. The signatures do not modify the container image (the layers, configuration, manifest, …); e.g. their presence does not change the manifest digest used to identify the image in docker/distribution servers; rather, the signatures are associated with an immutable image. An image can have any number of signatures so signature distribution systems SHOULD support associating more than one signature with an image. ## The cryptographic signature As distributed, the container signature is a blob which contains a cryptographic signature in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the JSON document and a signature of the JSON document; it is not a “detached signature” with independent blobs containing the JSON document and a cryptographic signature). Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880), but others may be added in the future. (The blob does not contain metadata identifying the cryptographic signature format. It is expected that most formats are sufficiently self-describing that this is not necessary and the configured expected public key provides another indication of the expected cryptographic signature format. Such metadata may be added in the future for newly added cryptographic signature formats, if necessary.) Consumers of container signatures SHOULD verify the cryptographic signature against one or more trusted public keys (e.g. defined in a [policy.json signature verification policy file](containers-policy.json.5.md)) before parsing or processing the JSON payload in _any_ way, in particular they SHOULD stop processing the container signature if the cryptographic signature verification fails, without even starting to process the JSON payload. (Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature, and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs, e.g. to list the authors and image identities of signatures associated with a single container image; if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted or in any way usable for making policy decisions about the image.) ### OpenPGP signature verification When verifying a cryptographic signature in the OpenPGP format, the consumer MUST verify at least the following aspects of the signature (like the `github.com/containers/image/signature` package does): - The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3. (e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format). - The signature MUST have been made by an expected key trusted for the purpose (and the specific container image). - The signature MUST be correctly formed and pass the cryptographic validation. - The signature MUST correctly authenticate the included JSON payload (in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated). - The signature MUST NOT be expired. The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected. ## JSON processing and forward compatibility The payload of the cryptographic signature is a JSON document (RFC 7159). Consumers SHOULD parse it very strictly, refusing any signature which violates the expected format (e.g. missing members, incorrect member types) or can be interpreted ambiguously (e.g. a duplicated member in a JSON object). Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below) and if the semantics of the invalid document, as created by such an implementation, is clear. The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`, each a JSON object. The `critical` object MUST contain a `type` member identifying the document as a container signature (as defined [below](#criticaltype)) and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value. To ensure forward compatibility (allowing older signature consumers to correctly accept or reject signatures created at a later date, with possible extensions to this format), consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects, contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected. At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members. For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object, and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported. Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid. ## JSON data format An example of the full format follows, with detailed description below. To reiterate, consumers of the signature SHOULD perform successful cryptographic verification, and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above. ```json { "critical": { "type": "atomic container signature", "image": { "docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e" }, "identity": { "docker-reference": "docker.io/library/busybox:latest" } }, "optional": { "creator": "some software package v1.0.1-35", "timestamp": 1483228800, } } ``` ### `critical` This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature. Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. ### `critical.type` This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces). Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value. (The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future; if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`, not by this document.) ### `critical.image` This MUST be a JSON object which identifies the container image this signature applies to. Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. (Currently only the `docker-manifest-digest` way of identifying a container image is defined; alternatives to this may be defined in the future, but existing consumers are required to reject signatures which use formats they do not support.) ### `critical.image.docker-manifest-digest` This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format. The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system. The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way (e.g. parsing the manifest further or downloading layers of the image). Implementation notes: * A single container image manifest may have several valid manifest digest values, using different algorithms. * For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests, the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob. ### `critical.identity` This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information), as asserted by the author of the signature. Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. (Currently only the `docker-reference` way of claiming an image identity/purpose is defined; alternatives to this may be defined in the future, but existing consumers are required to reject signatures which use formats they do not support.) ### `critical.identity.docker-reference` This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format, and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`). If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning, the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces). The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image (again, accounting for the `docker/distribution/reference` normalization semantics). In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image. However, depending on the specific application, users or system administrators may accept less specific matches (e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest), or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image (e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images). The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](containers-policy.json.5.md). The `critical.identity.docker-reference` value SHOULD contain either a tag or digest; in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](containers-policy.json.5.md#signedby).) ### `optional` This MUST be a JSON object. Consumers SHOULD accept any members with unrecognized names in the `optional` object, and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported. Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid. ### `optional.creator` If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature. The contents of this string is not defined in detail; however each implementation creating container signatures: - SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number) - SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number) changes whenever the format or semantics of the generated signature changes in any way; it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value - SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp), and which makes it easy enough to recognize a range of versions of a specific implementation (e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering; the string should contain a version number, or at least a date of the commit). Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator` (perhaps augmented with `optional.timestamp`), and MAY change their processing of the signature based on these values (usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively), as long as the semantics of the invalid document, as created by such an implementation, is clear. If consumers of signatures do change their behavior based on the `optional.creator` value, they SHOULD take care that the way they process the signatures is not inconsistent with strictly validating signature consumers. (I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value if other implementations would completely reject the signature, but it would be very undesirable for the two kinds of implementations to accept the signature in different and inconsistent situations.) ### `optional.timestamp` If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC). image-4.0.1/docs/containers-transports.5.md000066400000000000000000000121521354546467100206150ustar00rootroot00000000000000% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page % Valentin Rothberg % April 2019 ## NAME containers-transports - description of supported transports for copying and storing container images ## DESCRIPTION Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations. The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below. ### **containers-storage**:[**[**storage-specifier**]**]{image-id|docker-reference[@image-id]} An image located in a local containers storage. The format of _docker-reference_ is described in detail in the **docker** transport. The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory. The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored. The optional `options` are a comma-separated list of driver-specific options. Please refer to containers-storage.conf(5) for further information on the drivers and supported options. ### **dir:**_path_ An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. ### **docker://**_docker-reference_ An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1). If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1). The containers-registries.conf(5) further allows for configuring various settings of a registry. Note that a _docker-reference_ has the following format: `name[:tag|@digest]`. While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do. Digests can also be used in an image destination as long as the manifest matches the provided digest. The digest of images can be explored with skopeo-inspect(1). If `name` does not contain a slash, it is treated as `docker.io/library/name`. Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost). If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`. ### **docker-archive:**_path[:docker-reference]_ An image is stored in the docker-save(1) formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest. It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable. ### **docker-daemon:**_docker-reference|algo:digest_ An image stored in the docker daemon's internal storage. The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source. The _algo:digest_ refers to the image ID reported by docker-inspect(1). ### **oci:**_path[:tag]_ An image compliant with the "Open Container Image Layout Specification" at _path_. Using a _tag_ is optional and allows for storing multiple images at the same _path_. ### **oci-archive:**_path[:tag]_ An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_. ### **ostree:**_docker-reference[@/absolute/repo/path]_ An image in the local ostree(1) repository. _/absolute/repo/path_ defaults to _/ostree/repo_. ## Examples The following examples demonstrate how some of the containers transports can be used. The examples use skopeo-copy(1) for copying container images. **Copying an image from one registry to another**: ``` $ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest ``` **Copying an image from a running Docker daemon to a directory in the OCI layout**: ``` $ mkdir alpine-oci $ skopeo copy docker-daemon:alpine:latest oci:alpine-oci $ tree alpine-oci test-oci/ ├── blobs │   └── sha256 │   ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7 │   ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc │   └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7 ├── index.json └── oci-layout 2 directories, 5 files ``` **Copying an image from a registry to the local storage**: ``` $ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest ``` ## SEE ALSO docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5) ## AUTHORS Miloslav Trmač Valentin Rothberg image-4.0.1/docs/signature-protocols.md000066400000000000000000000204441354546467100201160ustar00rootroot00000000000000# Signature access protocols The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image. Some image transports (local storage formats and remote procotocols) implement these signatures natively or trivially; for others, the protocol extensions described below are necessary. ## docker/distribution registries—separate storage ### Usage Any existing docker/distribution registry, whether or not it natively supports signatures, can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md). `registries.d` can be configured to use one storage URL for a whole docker/distribution server, or also separate URLs for smaller namespaces or individual repositories within the server (which e.g. allows image authors to manage their own signature storage while publishing the images on the public `docker.io` server). The signature storage URL defines a root of a path hierarchy. It can be either a `file:///…` URL, pointing to a local directory structure, or a `http`/`https` URL, pointing to a remote server. `file:///` signature storage can be both read and written, `http`/`https` only supports reading. The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be a simple static web server serving a directory structure created by writing to a `file:///` signature storage. (This of course does not prevent other server implementations, e.g. a HTTP server reading signatures from a database.) The usual workflow for producing and distributing images using the separate storage mechanism is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private `file:///` staging area, and a `sigstore` URL pointing to a public web server. To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`), and then copy the created directory structure from the `file:///` staging area to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL. The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to, set up a `sigstore` URL pointing to the public web server. ### Path structure Given a _base_ signature storage URL configured in `registries.d` as mentioned above, and a container image stored in a docker/distribution registry using the _fully-expanded_ name _hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`, _namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`), signatures are accessed using URLs of the form > _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_ where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest (i.e. even if the user referenced the image using a tag, the signature storage is always disambiguated using digest references). Note that in the URLs used for signatures, _digest-algo_ and _digest-value_ are separated using the `=` character, not `:` like when acessing the manifest using the docker/distribution API. Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1. Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1, and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist. Similarly, to add one more signature to an image, find the first _index_ which does not exist, and then store the new signature using that _index_ value. There is no way to list existing signatures other than iterating through the successive _index_ values, and no way to download all of the signatures at once. ### Examples For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e` (or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest), and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image, the following URLs would be accessed to download all signatures: > - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1` > - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2` > - … For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same `sigstore` URL, the signatures would be available at > `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1` and so on. ## (OpenShift) docker/distribution API extension As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides an extension of the docker/distribution API which allows simpler access to the signatures, using only the docker/distribution API endpoint. This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint, and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well), and it is the preferred way implement signature storage in registries. See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API. To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…` path to read an array of signatures. Use only the signature objects which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`; ignore the other fields of the signature object. To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`, and `content` set to the signature. Also set `name` to an unique name with the form _digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL), and _per-image-name_ is any unique identifier. To add more than one signature, add them one at a time. This API does not allow deleting signatures. Note that because signatures are stored within the cluster-wide image objects, i.e. different namespaces can not associate different sets of signatures to the same image, updating signatures requires a cluster-wide access to the `imagesignatures` resource (by default available to the `system:image-signer` role), ## OpenShift-embedded registries The OpenShift-embedded registry implements the ordinary docker/distribution API, and it also exposes images through the OpenShift REST API (available through the “API master” servers). Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension), which is easier to set up and should usually be preferred. Continue reading for details on using older versions of OpenShift. As of https://github.com/openshift/origin/pull/9181, signatures are exposed through the OpenShift API (i.e. to access the complete image, it is necessary to use both APIs, in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints). To read the signature, any user with access to an image can use the `imagestreamimages` namespaced resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of the `ImageSignature` object. To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource, with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form _digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”), and _per-image-name_ is any unique identifier. Note that because signatures are stored within the cluster-wide image objects, i.e. different namespaces can not associate different sets of signatures to the same image, updating signatures requires a cluster-wide access to the `imagesignatures` resource (by default available to the `system:image-signer` role), and deleting signatures is strongly discouraged (it deletes the signature from all namespaces which contain the same image). image-4.0.1/go.mod000066400000000000000000000046321354546467100137300ustar00rootroot00000000000000module github.com/containers/image/v4 go 1.11 require ( github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 // indirect github.com/BurntSushi/toml v0.3.1 github.com/VividCortex/ewma v1.1.1 // indirect github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8 // indirect github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b github.com/containers/storage v1.13.4 github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 github.com/docker/docker-credential-helpers v0.6.0 github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969 github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/etcd-io/bbolt v1.3.3 github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f // indirect github.com/gorilla/context v1.1.1 // indirect github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285 // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/imdario/mergo v0.3.5 github.com/klauspost/compress v1.8.1 github.com/klauspost/pgzip v1.2.1 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/kr/pretty v0.1.0 // indirect github.com/mattn/go-isatty v0.0.4 // indirect github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 github.com/opencontainers/selinux v1.2.2 github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 github.com/pkg/errors v0.8.1 github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 // indirect github.com/sirupsen/logrus v1.4.2 github.com/stretchr/testify v1.4.0 github.com/ulikunitz/xz v0.5.6 github.com/vbatts/tar-split v0.11.1 github.com/vbauerster/mpb v3.4.0+incompatible github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66 go.etcd.io/bbolt v1.3.3 // indirect golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 golang.org/x/net v0.0.0-20190628185345-da137c7871d7 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae ) image-4.0.1/go.sum000066400000000000000000000336421354546467100137600ustar00rootroot00000000000000github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8 h1:ZZOFPzvZO3N0f4LIQvZi68F2XDAMl/gqBfFMVjY6B3Y= github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/storage v1.13.4 h1:j0bBaJDKbUHtAW1MXPFnwXJtqcH+foWeuXK1YaBV5GA= github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65 h1:4zlOyrJUbYnrvlzChJ+jP2J3i77Jbhm336NEuCv7kZo= github.com/docker/distribution v0.0.0-20170817175659-5f6282db7d65/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11 h1:p8hSDXZgVhyh/C9bPlG8QMY64VeXtVfjmjIlzaQok5Q= github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8= github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969 h1:p2WzwcFof6KwsloLgCiAKkU5DJSVgOKGdevswAmskvY= github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd h1:U3yHrYB7NWH2o3UFzJ1J+TknZqM9QQtF8KVIE6Qzrfs= github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f h1:r/AdTzqktq9nQpFlFePWcp+scVi+oFRajfjRJ3UnETg= github.com/gogo/protobuf v0.0.0-20170815085658-fcdc5011193f/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285 h1:pBGAMRKP7Tpv4mOq+RgzKz+jAj+ylo9O8PiNoMmCuu8= github.com/gorilla/mux v0.0.0-20170217192616-94e7d24fd285/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y= github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI= github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4= github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0= github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg= github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 h1:kyf9snWXHvQc+yxE9imhdI8YAm4oKeZISlaAR+x73zs= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE= github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tLAD8IceomUCNw= github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b h1:6cLsL+2FW6dRAdl5iMtHgRogVCff0QpRi9653YmdcJA= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66 h1:F6RPtD6im1kY4bmLByRlOLOZwsPP7mw7cxR1v2CotL0= github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v0.0.0-20190624233834-05ebafbffc79 h1:C+K4iPg1rIvmCf4JjelkbWv2jeWevEwp05Lz8XfTYgE= gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae h1:B3EgNIqpnsZRu7Tms/u6i23BcsxtEKAqXrHt45OqNuw= k8s.io/client-go v0.0.0-20170217214107-bcde30fb7eae/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= image-4.0.1/image/000077500000000000000000000000001354546467100136775ustar00rootroot00000000000000image-4.0.1/image/docker_list.go000066400000000000000000000061361354546467100165360ustar00rootroot00000000000000package image import ( "context" "encoding/json" "fmt" "runtime" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) type platformSpec struct { Architecture string `json:"architecture"` OS string `json:"os"` OSVersion string `json:"os.version,omitempty"` OSFeatures []string `json:"os.features,omitempty"` Variant string `json:"variant,omitempty"` Features []string `json:"features,omitempty"` // removed in OCI } // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { manifest.Schema2Descriptor Platform platformSpec `json:"platform"` } type manifestList struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` Manifests []manifestDescriptor `json:"manifests"` } // chooseDigestFromManifestList parses blob as a schema2 manifest list, // and returns the digest of the image appropriate for the current environment. func chooseDigestFromManifestList(sys *types.SystemContext, blob []byte) (digest.Digest, error) { wantedArch := runtime.GOARCH if sys != nil && sys.ArchitectureChoice != "" { wantedArch = sys.ArchitectureChoice } wantedOS := runtime.GOOS if sys != nil && sys.OSChoice != "" { wantedOS = sys.OSChoice } list := manifestList{} if err := json.Unmarshal(blob, &list); err != nil { return "", err } for _, d := range list.Manifests { if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { return d.Digest, nil } } return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) } func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { targetManifestDigest, err := chooseDigestFromManifestList(sys, manblob) if err != nil { return nil, err } manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) if err != nil { return nil, err } matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) if err != nil { return nil, errors.Wrap(err, "Error computing manifest digest") } if !matches { return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) } return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) } // ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate // for the current system from the manifest available from src. func ChooseManifestInstanceFromManifestList(ctx context.Context, sys *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, // probably along with manifest list editing. blob, mt, err := src.Manifest(ctx) if err != nil { return "", err } if mt != manifest.DockerV2ListMediaType { return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) } return chooseDigestFromManifestList(sys, blob) } image-4.0.1/image/docker_list_test.go000066400000000000000000000026361354546467100175760ustar00rootroot00000000000000package image import ( "bytes" "io/ioutil" "path/filepath" "testing" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestChooseDigestFromManifestList(t *testing.T) { manifest, err := ioutil.ReadFile(filepath.Join("fixtures", "schema2list.json")) require.NoError(t, err) // Match found for arch, expected := range map[string]digest.Digest{ "amd64": "sha256:030fcb92e1487b18c974784dcc110a93147c9fc402188370fbfd17efabffc6af", "s390x": "sha256:e5aa1b0a24620228b75382997a0977f609b3ca3a95533dafdef84c74cc8df642", // There are several "arm" images with different variants; // the current code returns the first match. NOTE: This is NOT an API promise. "arm": "sha256:9142d97ef280a7953cf1a85716de49a24cc1dd62776352afad67e635331ff77a", } { digest, err := chooseDigestFromManifestList(&types.SystemContext{ ArchitectureChoice: arch, OSChoice: "linux", }, manifest) require.NoError(t, err, arch) assert.Equal(t, expected, digest) } // Invalid manifest list _, err = chooseDigestFromManifestList(&types.SystemContext{ ArchitectureChoice: "amd64", OSChoice: "linux", }, bytes.Join([][]byte{manifest, []byte("!INVALID")}, nil)) assert.Error(t, err) // Not found _, err = chooseDigestFromManifestList(&types.SystemContext{OSChoice: "Unmatched"}, manifest) assert.Error(t, err) } image-4.0.1/image/docker_schema1.go000066400000000000000000000214131354546467100170770ustar00rootroot00000000000000package image import ( "context" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type manifestSchema1 struct { m *manifest.Schema1 } func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { m, err := manifest.Schema1FromManifest(manifestBlob) if err != nil { return nil, err } return &manifestSchema1{m: m}, nil } // manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) if err != nil { return nil, err } return &manifestSchema1{m: m}, nil } func (m *manifestSchema1) serialize() ([]byte, error) { return m.m.Serialize() } func (m *manifestSchema1) manifestMIMEType() string { return manifest.DockerV2Schema1SignedMediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { return nil, nil } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { v2s2, err := m.convertToManifestSchema2(nil, nil) if err != nil { return nil, err } return v2s2.OCIConfig(ctx) } // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. // It returns false if the manifest does not embed a Docker reference. // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { // This is a bit convoluted: We can’t just have a "get embedded docker reference" method // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually // embed a full docker/distribution reference, but only the repo name and tag (without the host name). // So we would have to provide a “return repo without host name, and tag” getter for the generic code, // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the // generic copy code needs to know about is reference.Named and that a manifest may need updating // for some destinations. name := reference.Path(ref) var tag string if tagged, isTagged := ref.(reference.NamedTagged); isTagged { tag = tagged.Tag() } else { tag = "" } return m.m.Name != name || m.m.Tag != tag } // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { return m.m.Inspect(nil) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute // (most importantly it forces us to download the full layers even if they are already present at the destination). func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) } // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err } } if options.EmbeddedDockerReference != nil { copy.m.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { copy.m.Tag = tagged.Tag() } else { copy.m.Tag = "" } } switch options.ManifestMIMEType { case "": // No conversion, OK case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, // handle conversions between them by doing nothing. case manifest.DockerV2Schema2MediaType: m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) if err != nil { return nil, err } return memoryImageFromManifest(m2), nil case imgspecv1.MediaTypeImageManifest: // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) if err != nil { return nil, err } return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: imgspecv1.MediaTypeImageManifest, InformationOnly: options.InformationOnly, }) default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) } return memoryImageFromManifest(©), nil } // Based on github.com/docker/docker/distribution/pull_v2.go func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { if len(m.m.ExtractedV1Compatibility) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) } if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } // Build a list of the diffIDs for the non-empty layers. diffIDs := []digest.Digest{} var layers []manifest.Schema2Descriptor for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { var size int64 if uploadedLayerInfos != nil { size = uploadedLayerInfos[v2Index].Size } var d digest.Digest if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } layers = append(layers, manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, Digest: m.m.FSLayers[v1Index].BlobSum, }) diffIDs = append(diffIDs, d) } } configJSON, err := m.m.ToSchema2Config(diffIDs) if err != nil { return nil, err } configDescriptor := manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), } return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil } image-4.0.1/image/docker_schema1_test.go000066400000000000000000000516021354546467100201410ustar00rootroot00000000000000package image import ( "context" "encoding/json" "io/ioutil" "path/filepath" "testing" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var schema1FixtureLayerInfos = []types.BlobInfo{ { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 74876245, Digest: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4", }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 1239, Digest: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a", }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 78339724, Digest: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e", }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 76857203, Digest: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6", }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 25923380, Digest: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788", }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: 23511300, Digest: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d", }, } var schema1FixtureLayerDiffIDs = []digest.Digest{ "sha256:e1d829eddb62dc49f1c56dbf8acd0c71299b3996115399de853a9d66d81b822f", "sha256:02404b4d7e5d89b1383ca346b4462b199128aa4b238c5a2b2c186004ac148ba8", "sha256:45fad80a4b1cec165c421eb570dec312d825bd8fac362e255028fa3f2169148d", "sha256:7ddef8efd44586e54880ec4797458eac87b368544c438d7e7c63fbc0d9a7ae97", "sha256:b56b16b6407ba1b86252e7e50f98f142cf6844fab42e4495d56ebb7ce559e2af", "sha256:9bd63850e406167b4751f5050f6dc0ebd789bb5ef5e5c6c31ed062bda8c063e8", } func manifestSchema1FromFixture(t *testing.T, fixture string) genericManifest { manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) m, err := manifestSchema1FromManifest(manifest) require.NoError(t, err) return m } func manifestSchema1FromComponentsLikeFixture(t *testing.T) genericManifest { ref, err := reference.ParseNormalizedNamed("rhosp12/openstack-nova-api:latest") require.NoError(t, err) m, err := manifestSchema1FromComponents(ref, []manifest.Schema1FSLayers{ {BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"}, {BlobSum: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"}, {BlobSum: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"}, {BlobSum: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"}, {BlobSum: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"}, {BlobSum: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"}, }, []manifest.Schema1History{ {V1Compatibility: "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}"}, {V1Compatibility: "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}"}, {V1Compatibility: "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}"}, }, "amd64") require.NoError(t, err) return m } func TestManifestSchema1FromManifest(t *testing.T) { // This just tests that the JSON can be loaded; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestSchema1FromFixture(t, "schema1.json") // FIXME: Detailed coverage of manifest.Schema1FromManifest failures _, err := manifestSchema1FromManifest([]byte{}) assert.Error(t, err) } func TestManifestSchema1FromComponents(t *testing.T) { // This just smoke-tests that the manifest can be created; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestSchema1FromComponentsLikeFixture(t) // Error on invalid input _, err := manifestSchema1FromComponents(nil, []manifest.Schema1FSLayers{}, []manifest.Schema1History{}, "amd64") assert.Error(t, err) } func TestManifestSchema1Serialize(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { serialized, err := m.serialize() require.NoError(t, err) var contents map[string]interface{} err = json.Unmarshal(serialized, &contents) require.NoError(t, err) original, err := ioutil.ReadFile("fixtures/schema1.json") require.NoError(t, err) var originalContents map[string]interface{} err = json.Unmarshal(original, &originalContents) require.NoError(t, err) // Drop the signature which is generated by AddDummyV2S1Signature delete(contents, "signatures") delete(originalContents, "signatures") // We would ideally like to compare “serialized” with some transformation of // “original”, but the ordering of fields in JSON maps is undefined, so this is // easier. assert.Equal(t, originalContents, contents) } } func TestManifestSchema1ManifestMIMEType(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, m.manifestMIMEType()) } } func TestManifestSchema1ConfigInfo(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { assert.Equal(t, types.BlobInfo{Digest: ""}, m.ConfigInfo()) } } func TestManifestSchema1ConfigBlob(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { blob, err := m.ConfigBlob(context.Background()) require.NoError(t, err) assert.Nil(t, blob) } } func TestManifestSchema1OCIConfig(t *testing.T) { m := manifestSchema1FromFixture(t, "schema1-to-oci-config.json") configOCI, err := m.OCIConfig(context.Background()) require.NoError(t, err) // FIXME: A more comprehensive test? assert.Equal(t, "/pause", configOCI.Config.Entrypoint[0]) } func TestManifestSchema1LayerInfo(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { assert.Equal(t, []types.BlobInfo{ { Digest: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4", Size: -1, }, { Digest: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a", Size: -1, }, { Digest: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e", Size: -1, }, { Digest: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6", Size: -1, }, { Digest: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788", Size: -1, }, { Digest: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d", Size: -1, }, }, m.LayerInfos()) } } func TestManifestSchema1EmbeddedDockerReferenceConflicts(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { for name, expected := range map[string]bool{ "rhosp12/openstack-nova-api:latest": false, // Exactly the embedded reference "example.com/rhosp12/openstack-nova-api:latest": false, // A different host name, but path and tag match "docker.io:3333/rhosp12/openstack-nova-api:latest": false, // A different port, but path and tag match "busybox": true, // Entirely different, minimal "example.com:5555/ns/repo:tag": true, // Entirely different, maximal "rhosp12/openstack-nova-api": true, // Missing tag "rhosp12/openstack-nova-api:notlatest": true, // Different tag "notrhosp12/openstack-nova-api:latest": true, // Different namespace "rhosp12/notopenstack-nova-api:latest": true, // Different repo } { ref, err := reference.ParseNormalizedNamed(name) require.NoError(t, err, name) conflicts := m.EmbeddedDockerReferenceConflicts(ref) assert.Equal(t, expected, conflicts, name) } } } func TestManifestSchema1Inspect(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { ii, err := m.Inspect(context.Background()) require.NoError(t, err) created := time.Date(2018, 1, 25, 0, 37, 48, 268558000, time.UTC) assert.Equal(t, types.ImageInspectInfo{ Tag: "latest", Created: &created, DockerVersion: "1.12.6", Labels: map[string]string{ "Kolla-SHA": "5.0.0-39-g6f1b947b", "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "build-date": "2018-01-25T00:32:27.807261", "com.redhat.build-host": "ip-10-29-120-186.ec2.internal", "com.redhat.component": "openstack-nova-api-docker", "description": "Red Hat OpenStack Platform 12.0 nova-api", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api", "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api", "io.openshift.tags": "rhosp osp openstack osp-12.0", "kolla_version": "stable/pike", "name": "rhosp12/openstack-nova-api", "release": "20180124.1", "summary": "Red Hat OpenStack Platform 12.0 nova-api", "tripleo-common_version": "7.6.3-23-g4891cfe", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1", "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "12.0", "version-release": "12.0-20180124.1", }, Architecture: "amd64", Os: "linux", Layers: []string{ "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4", "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a", "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e", "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6", "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788", "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d", }, Env: []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ ", }, }, *ii) } } func TestManifestSchema1UpdatedImageNeedsLayerDiffIDs(t *testing.T) { for _, m := range []genericManifest{ manifestSchema1FromFixture(t, "schema1.json"), manifestSchema1FromComponentsLikeFixture(t), } { for mt, expected := range map[string]bool{ "": false, manifest.DockerV2Schema1MediaType: false, manifest.DockerV2Schema1SignedMediaType: false, manifest.DockerV2Schema2MediaType: true, imgspecv1.MediaTypeImageManifest: true, } { needsDiffIDs := m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{ ManifestMIMEType: mt, }) assert.Equal(t, expected, needsDiffIDs, mt) } } } func TestManifestSchema1UpdatedImage(t *testing.T) { original := manifestSchema1FromFixture(t, "schema1.json") // LayerInfos: layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) require.NoError(t, err) assert.Equal(t, layerInfos, res.LayerInfos()) _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: append(layerInfos, layerInfos[0]), }) assert.Error(t, err) // EmbeddedDockerReference: for _, refName := range []string{ "busybox", "busybox:notlatest", "rhosp12/openstack-nova-api:latest", } { embeddedRef, err := reference.ParseNormalizedNamed(refName) require.NoError(t, err) res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ EmbeddedDockerReference: embeddedRef, }) require.NoError(t, err) // The previous embedded docker reference now does not match. nonEmbeddedRef, err := reference.ParseNormalizedNamed("rhosp12/openstack-nova-api:latest") require.NoError(t, err) conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef) assert.Equal(t, refName != "rhosp12/openstack-nova-api:latest", conflicts) } // ManifestMIMEType: // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.) for _, mime := range []string{ manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest, } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, InformationOnly: types.ManifestUpdateInformation{ LayerInfos: schema1FixtureLayerInfos, LayerDiffIDs: schema1FixtureLayerDiffIDs, }, }) assert.NoError(t, err, mime) } for _, mime := range []string{ "this is invalid", } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, }) assert.Error(t, err, mime) } // m hasn’t been changed: m2 := manifestSchema1FromFixture(t, "schema1.json") typedOriginal, ok := original.(*manifestSchema1) require.True(t, ok) typedM2, ok := m2.(*manifestSchema1) require.True(t, ok) assert.Equal(t, *typedM2, *typedOriginal) } func TestManifestSchema1ConvertToSchema2(t *testing.T) { original := manifestSchema1FromFixture(t, "schema1.json") res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, InformationOnly: types.ManifestUpdateInformation{ LayerInfos: schema1FixtureLayerInfos, LayerDiffIDs: schema1FixtureLayerDiffIDs, }, }) require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) byHandJSON, err := ioutil.ReadFile("fixtures/schema1-to-schema2.json") require.NoError(t, err) var converted, byHand map[string]interface{} err = json.Unmarshal(byHandJSON, &byHand) require.NoError(t, err) err = json.Unmarshal(convertedJSON, &converted) delete(converted, "config") delete(byHand, "config") require.NoError(t, err) assert.Equal(t, byHand, converted) convertedConfig, err := res.ConfigBlob(context.Background()) require.NoError(t, err) byHandConfig, err := ioutil.ReadFile("fixtures/schema1-to-schema2-config.json") require.NoError(t, err) converted = map[string]interface{}{} byHand = map[string]interface{}{} err = json.Unmarshal(byHandConfig, &byHand) require.NoError(t, err) err = json.Unmarshal(convertedConfig, &converted) require.NoError(t, err) assert.Equal(t, byHand, converted) // FIXME? Test also the various failure cases, if only to see that we don't crash? } // FIXME: Schema1→OCI conversion untested image-4.0.1/image/docker_schema2.go000066400000000000000000000337341354546467100171110ustar00rootroot00000000000000package image import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io/ioutil" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache/none" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is // a non-zero embedded timestamp; we could zero that, but that would just waste storage space // in registries, so let’s use the same values. var GzippedEmptyLayer = []byte{ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, } // GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") type manifestSchema2 struct { src types.ImageSource // May be nil if configBlob is not nil configBlob []byte // If set, corresponds to contents of ConfigDescriptor. m *manifest.Schema2 } func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { m, err := manifest.Schema2FromManifest(manifestBlob) if err != nil { return nil, err } return &manifestSchema2{ src: src, m: m, }, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ src: src, configBlob: configBlob, m: manifest.Schema2FromComponents(config, layers), } } func (m *manifestSchema2) serialize() ([]byte, error) { return m.m.Serialize() } func (m *manifestSchema2) manifestMIMEType() string { return m.m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { return m.m.ConfigInfo() } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { configBlob, err := m.ConfigBlob(ctx) if err != nil { return nil, err } // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields // than OCI v1. This unmarshal makes sure we drop docker v2s2 // fields that aren't needed in OCI v1. configOCI := &imgspecv1.Image{} if err := json.Unmarshal(configBlob, configOCI); err != nil { return nil, err } return configOCI, nil } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if m.configBlob == nil { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) if err != nil { return nil, err } defer stream.Close() blob, err := ioutil.ReadAll(stream) if err != nil { return nil, err } computedDigest := digest.FromBytes(blob) if computedDigest != m.m.ConfigDescriptor.Digest { return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) } m.configBlob = blob } return m.configBlob, nil } // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. // It returns false if the manifest does not embed a Docker reference. // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { return false } // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { getter := func(info types.BlobInfo) ([]byte, error) { if info.Digest != m.ConfigInfo().Digest { // Shouldn't ever happen return nil, errors.New("asked for a different config blob") } config, err := m.ConfigBlob(ctx) if err != nil { return nil, err } return config, nil } return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute // (most importantly it forces us to download the full layers even if they are already present at the destination). func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { return false } // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, configBlob: m.configBlob, m: manifest.Schema2Clone(m.m), } if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. switch options.ManifestMIMEType { case "": // No conversion, OK case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination) case imgspecv1.MediaTypeImageManifest: return copy.convertToManifestOCI1(ctx) default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) } return memoryImageFromManifest(©), nil } func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { return imgspecv1.Descriptor{ MediaType: d.MediaType, Size: d.Size, Digest: d.Digest, URLs: d.URLs, } } func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { configOCI, err := m.OCIConfig(ctx) if err != nil { return nil, err } configOCIBytes, err := json.Marshal(configOCI) if err != nil { return nil, err } config := imgspecv1.Descriptor{ MediaType: imgspecv1.MediaTypeImageConfig, Size: int64(len(configOCIBytes)), Digest: digest.FromBytes(configOCIBytes), } layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) switch m.m.LayersDescriptors[idx].MediaType { case manifest.DockerV2Schema2ForeignLayerMediaType: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip case manifest.DockerV2SchemaLayerMediaTypeUncompressed: layers[idx].MediaType = imgspecv1.MediaTypeImageLayer case manifest.DockerV2Schema2LayerMediaType: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) } } m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) return memoryImageFromManifest(m1), nil } // Based on docker/distribution/manifest/schema1/config_builder.go func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { configBytes, err := m.ConfigBlob(ctx) if err != nil { return nil, err } imageConfig := &manifest.Schema2Image{} if err := json.Unmarshal(configBytes, imageConfig); err != nil { return nil, err } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) history := make([]manifest.Schema1History, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" haveGzippedEmptyLayer := false if len(imageConfig.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) } for v2Index, historyEntry := range imageConfig.History { parentV1ID = v1ID v1Index := len(imageConfig.History) - 1 - v2Index var blobDigest digest.Digest if historyEntry.EmptyLayer { if !haveGzippedEmptyLayer { logrus.Debugf("Uploading empty layer during conversion to schema 1") // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, none.NoCache, false) if err != nil { return nil, errors.Wrap(err, "Error uploading empty layer") } if info.Digest != GzippedEmptyLayerDigest { return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest) } haveGzippedEmptyLayer = true } blobDigest = GzippedEmptyLayerDigest } else { if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) if err != nil { return nil, err } v1ID = v fakeImage := manifest.Schema1V1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, Created: historyEntry.Created, Author: historyEntry.Author, ThrowAway: historyEntry.EmptyLayer, } fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} v1CompatibilityBytes, err := json.Marshal(&fakeImage) if err != nil { return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } // Now patch in real configuration for the top layer (v1Index == 0) v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. if err != nil { return nil, err } v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) if err != nil { return nil, err } history[0].V1Compatibility = string(v1Config) m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) if err != nil { return nil, err // This should never happen, we should have created all the components correctly. } return memoryImageFromManifest(m1), nil } func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { if err := blobDigest.Validate(); err != nil { return "", err } parts := append([]string{blobDigest.Hex()}, others...) v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) return hex.EncodeToString(v1IDHash[:]), nil } func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { // Preserve everything we don't specifically know about. // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) rawContents := map[string]*json.RawMessage{} if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! return nil, err } delete(rawContents, "rootfs") delete(rawContents, "history") updates := map[string]interface{}{"id": v1ID} if parentV1ID != "" { updates["parent"] = parentV1ID } if throwaway { updates["throwaway"] = throwaway } for field, value := range updates { encoded, err := json.Marshal(value) if err != nil { return nil, err } rawContents[field] = (*json.RawMessage)(&encoded) } return json.Marshal(rawContents) } image-4.0.1/image/docker_schema2_test.go000066400000000000000000000524051354546467100201440ustar00rootroot00000000000000package image import ( "bytes" "context" "encoding/json" "io" "io/ioutil" "path/filepath" "testing" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // unusedImageSource is used when we don't expect the ImageSource to be used in our tests. type unusedImageSource struct{} func (f unusedImageSource) Reference() types.ImageReference { panic("Unexpected call to a mock function") } func (f unusedImageSource) Close() error { panic("Unexpected call to a mock function") } func (f unusedImageSource) GetManifest(context.Context, *digest.Digest) ([]byte, string, error) { panic("Unexpected call to a mock function") } func (f unusedImageSource) HasThreadSafeGetBlob() bool { panic("Unexpected call to a mock function") } func (f unusedImageSource) GetBlob(context.Context, types.BlobInfo, types.BlobInfoCache) (io.ReadCloser, int64, error) { panic("Unexpected call to a mock function") } func (f unusedImageSource) GetSignatures(context.Context, *digest.Digest) ([][]byte, error) { panic("Unexpected call to a mock function") } func (f unusedImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { panic("Unexpected call to a mock function") } func manifestSchema2FromFixture(t *testing.T, src types.ImageSource, fixture string, mustFail bool) genericManifest { manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) m, err := manifestSchema2FromManifest(src, manifest) if mustFail { require.Error(t, err) } else { require.NoError(t, err) } return m } func manifestSchema2FromComponentsLikeFixture(configBlob []byte) genericManifest { return manifestSchema2FromComponents(manifest.Schema2Descriptor{ MediaType: "application/octet-stream", Size: 5940, Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", }, nil, configBlob, []manifest.Schema2Descriptor{ { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: 51354364, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: 150, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: 11739507, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, }, }) } func TestManifestSchema2FromManifest(t *testing.T) { // This just tests that the JSON can be loaded; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false) _, err := manifestSchema2FromManifest(nil, []byte{}) assert.Error(t, err) } func TestManifestSchema2FromComponents(t *testing.T) { // This just smoke-tests that the manifest can be created; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestSchema2FromComponentsLikeFixture(nil) } func TestManifestSchema2Serialize(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { serialized, err := m.serialize() require.NoError(t, err) var contents map[string]interface{} err = json.Unmarshal(serialized, &contents) require.NoError(t, err) original, err := ioutil.ReadFile("fixtures/schema2.json") require.NoError(t, err) var originalContents map[string]interface{} err = json.Unmarshal(original, &originalContents) require.NoError(t, err) // We would ideally like to compare “serialized” with some transformation of // “original”, but the ordering of fields in JSON maps is undefined, so this is // easier. assert.Equal(t, originalContents, contents) } } func TestManifestSchema2ManifestMIMEType(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { assert.Equal(t, manifest.DockerV2Schema2MediaType, m.manifestMIMEType()) } } func TestManifestSchema2ConfigInfo(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { assert.Equal(t, types.BlobInfo{ Size: 5940, Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", MediaType: "application/octet-stream", }, m.ConfigInfo()) } } // configBlobImageSource allows testing various GetBlob behaviors in .ConfigBlob() type configBlobImageSource struct { unusedImageSource // We inherit almost all of the methods, which just panic() f func(digest digest.Digest) (io.ReadCloser, int64, error) } func (f configBlobImageSource) GetBlob(ctx context.Context, info types.BlobInfo, _ types.BlobInfoCache) (io.ReadCloser, int64, error) { if info.Digest.String() != "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" { panic("Unexpected digest in GetBlob") } return f.f(info.Digest) } func TestManifestSchema2ConfigBlob(t *testing.T) { realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") require.NoError(t, err) for _, c := range []struct { cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error) blob []byte }{ // Success {func(digest digest.Digest) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, realConfigJSON}, // Various kinds of failures {nil, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { return nil, -1, errors.New("Error returned from GetBlob") }, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { reader, writer := io.Pipe() writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob")) return reader, 1, nil }, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest") return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil }, nil}, } { var src types.ImageSource if c.cbISfn != nil { src = configBlobImageSource{unusedImageSource{}, c.cbISfn} } else { src = nil } m := manifestSchema2FromFixture(t, src, "schema2.json", false) blob, err := m.ConfigBlob(context.Background()) if c.blob != nil { assert.NoError(t, err) assert.Equal(t, c.blob, blob) } else { assert.Error(t, err) } } // Generally conficBlob should match ConfigInfo; we don’t quite need it to, and this will // guarantee that the returned object is returning the original contents instead // of reading an object from elsewhere. configBlob := []byte("config blob which does not match ConfigInfo") // This just tests that the manifest can be created; we test that the parsed // values are correctly returned in tests for the individual getter methods. m := manifestSchema2FromComponentsLikeFixture(configBlob) cb, err := m.ConfigBlob(context.Background()) require.NoError(t, err) assert.Equal(t, configBlob, cb) } func TestManifestSchema2LayerInfo(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { assert.Equal(t, []types.BlobInfo{ { Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: 51354364, MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", }, { Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: 150, MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", }, { Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: 11739507, MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", }, { Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", }, { Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", }, }, m.LayerInfos()) } } func TestManifestSchema2EmbeddedDockerReferenceConflicts(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} { ref, err := reference.ParseNormalizedNamed(name) require.NoError(t, err) conflicts := m.EmbeddedDockerReferenceConflicts(ref) assert.False(t, conflicts) } } } func TestManifestSchema2Inspect(t *testing.T) { configJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") require.NoError(t, err) m := manifestSchema2FromComponentsLikeFixture(configJSON) ii, err := m.Inspect(context.Background()) require.NoError(t, err) created := time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC) assert.Equal(t, types.ImageInspectInfo{ Tag: "", Created: &created, DockerVersion: "1.12.1", Labels: map[string]string{}, Architecture: "amd64", Os: "linux", Layers: []string{ "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", }, Env: []string{ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HTTPD_PREFIX=/usr/local/apache2", "HTTPD_VERSION=2.4.23", "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2", "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", }, }, *ii) // nil configBlob will trigger an error in m.ConfigBlob() m = manifestSchema2FromComponentsLikeFixture(nil) _, err = m.Inspect(context.Background()) assert.Error(t, err) m = manifestSchema2FromComponentsLikeFixture([]byte("invalid JSON")) _, err = m.Inspect(context.Background()) assert.Error(t, err) } func TestManifestSchema2UpdatedImageNeedsLayerDiffIDs(t *testing.T) { for _, m := range []genericManifest{ manifestSchema2FromFixture(t, unusedImageSource{}, "schema2.json", false), manifestSchema2FromComponentsLikeFixture(nil), } { assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, })) } } // schema2ImageSource is plausible enough for schema conversions in manifestSchema2.UpdatedImage() to work. type schema2ImageSource struct { configBlobImageSource ref reference.Named } func (s2is *schema2ImageSource) Reference() types.ImageReference { return refImageReferenceMock{s2is.ref} } // refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference. type refImageReferenceMock struct{ reference.Named } func (ref refImageReferenceMock) Transport() types.ImageTransport { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) StringWithinTransport() string { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) DockerReference() reference.Named { return ref.Named } func (ref refImageReferenceMock) PolicyConfigurationIdentity() string { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) DeleteImage(ctx context.Context, sys *types.SystemContext) error { panic("unexpected call to a mock function") } func newSchema2ImageSource(t *testing.T, dockerRef string) *schema2ImageSource { realConfigJSON, err := ioutil.ReadFile("fixtures/schema2-config.json") require.NoError(t, err) ref, err := reference.ParseNormalizedNamed(dockerRef) require.NoError(t, err) return &schema2ImageSource{ configBlobImageSource: configBlobImageSource{ f: func(digest digest.Digest) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, }, ref: ref, } } type memoryImageDest struct { ref reference.Named storedBlobs map[digest.Digest][]byte } func (d *memoryImageDest) Reference() types.ImageReference { return refImageReferenceMock{d.ref} } func (d *memoryImageDest) Close() error { panic("Unexpected call to a mock function") } func (d *memoryImageDest) SupportedManifestMIMETypes() []string { panic("Unexpected call to a mock function") } func (d *memoryImageDest) SupportsSignatures(ctx context.Context) error { panic("Unexpected call to a mock function") } func (d *memoryImageDest) DesiredLayerCompression() types.LayerCompression { panic("Unexpected call to a mock function") } func (d *memoryImageDest) AcceptsForeignLayerURLs() bool { panic("Unexpected call to a mock function") } func (d *memoryImageDest) MustMatchRuntimeOS() bool { panic("Unexpected call to a mock function") } func (d *memoryImageDest) IgnoresEmbeddedDockerReference() bool { panic("Unexpected call to a mock function") } func (d *memoryImageDest) HasThreadSafePutBlob() bool { panic("Unexpected call to a mock function") } func (d *memoryImageDest) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { if d.storedBlobs == nil { d.storedBlobs = make(map[digest.Digest][]byte) } if inputInfo.Digest.String() == "" { panic("inputInfo.Digest unexpectedly empty") } contents, err := ioutil.ReadAll(stream) if err != nil { return types.BlobInfo{}, err } d.storedBlobs[inputInfo.Digest] = contents return types.BlobInfo{Digest: inputInfo.Digest, Size: int64(len(contents))}, nil } func (d *memoryImageDest) TryReusingBlob(context.Context, types.BlobInfo, types.BlobInfoCache, bool) (bool, types.BlobInfo, error) { panic("Unexpected call to a mock function") } func (d *memoryImageDest) PutManifest(ctx context.Context, m []byte) error { panic("Unexpected call to a mock function") } func (d *memoryImageDest) PutSignatures(ctx context.Context, signatures [][]byte) error { panic("Unexpected call to a mock function") } func (d *memoryImageDest) Commit(ctx context.Context) error { panic("Unexpected call to a mock function") } func TestManifestSchema2UpdatedImage(t *testing.T) { originalSrc := newSchema2ImageSource(t, "httpd:latest") original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false) // LayerInfos: layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) require.NoError(t, err) assert.Equal(t, layerInfos, res.LayerInfos()) _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: append(layerInfos, layerInfos[0]), }) assert.Error(t, err) // EmbeddedDockerReference: // … is ignored embeddedRef, err := reference.ParseNormalizedNamed("busybox") require.NoError(t, err) res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ EmbeddedDockerReference: embeddedRef, }) require.NoError(t, err) nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest") require.NoError(t, err) conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef) assert.False(t, conflicts) // ManifestMIMEType: // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.) for _, mime := range []string{ manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, InformationOnly: types.ManifestUpdateInformation{ Destination: &memoryImageDest{ref: originalSrc.ref}, }, }) assert.NoError(t, err, mime) } for _, mime := range []string{ manifest.DockerV2Schema2MediaType, // This indicates a confused caller, not a no-op "this is invalid", } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, }) assert.Error(t, err, mime) } // m hasn’t been changed: m2 := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false) typedOriginal, ok := original.(*manifestSchema2) require.True(t, ok) typedM2, ok := m2.(*manifestSchema2) require.True(t, ok) assert.Equal(t, *typedM2, *typedOriginal) } func TestConvertToManifestOCI(t *testing.T) { originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: imgspecv1.MediaTypeImageManifest, }) require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt) byHandJSON, err := ioutil.ReadFile("fixtures/schema2-to-oci1.json") require.NoError(t, err) var converted, byHand map[string]interface{} err = json.Unmarshal(byHandJSON, &byHand) require.NoError(t, err) err = json.Unmarshal(convertedJSON, &converted) require.NoError(t, err) assert.Equal(t, byHand, converted) } func TestConvertToManifestOCIAllMediaTypes(t *testing.T) { originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") original := manifestSchema2FromFixture(t, originalSrc, "schema2-all-media-types.json", false) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: imgspecv1.MediaTypeImageManifest, }) require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) assert.Equal(t, imgspecv1.MediaTypeImageManifest, mt) byHandJSON, err := ioutil.ReadFile("fixtures/schema2-all-media-types-to-oci1.json") require.NoError(t, err) var converted, byHand map[string]interface{} err = json.Unmarshal(byHandJSON, &byHand) require.NoError(t, err) err = json.Unmarshal(convertedJSON, &converted) require.NoError(t, err) assert.Equal(t, byHand, converted) } func TestConvertToOCIWithInvalidMIMEType(t *testing.T) { originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") manifestSchema2FromFixture(t, originalSrc, "schema2-invalid-media-type.json", true) } func TestConvertToManifestSchema1(t *testing.T) { originalSrc := newSchema2ImageSource(t, "httpd-copy:latest") original := manifestSchema2FromFixture(t, originalSrc, "schema2.json", false) memoryDest := &memoryImageDest{ref: originalSrc.ref} res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType, InformationOnly: types.ManifestUpdateInformation{ Destination: memoryDest, }, }) require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) assert.Equal(t, manifest.DockerV2Schema1SignedMediaType, mt) // byDockerJSON is the result of asking the Docker Hub for a schema1 manifest, // except that we have replaced "name" to verify that the ref from // memoryDest, not from originalSrc, is used. byDockerJSON, err := ioutil.ReadFile("fixtures/schema2-to-schema1-by-docker.json") require.NoError(t, err) var converted, byDocker map[string]interface{} err = json.Unmarshal(byDockerJSON, &byDocker) require.NoError(t, err) err = json.Unmarshal(convertedJSON, &converted) require.NoError(t, err) delete(byDocker, "signatures") delete(converted, "signatures") assert.Equal(t, byDocker, converted) assert.Equal(t, GzippedEmptyLayer, memoryDest.storedBlobs[GzippedEmptyLayerDigest]) // FIXME? Test also the various failure cases, if only to see that we don't crash? } image-4.0.1/image/fixtures/000077500000000000000000000000001354546467100155505ustar00rootroot00000000000000image-4.0.1/image/fixtures/oci1-all-media-types-to-schema2.json000066400000000000000000000032121354546467100242210ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 4651, "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.zstd", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 152, "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/oci1-all-media-types.json000066400000000000000000000030651354546467100222670ustar00rootroot00000000000000{ "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 4651, "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+zstd", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 152, "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/oci1-config.json000066400000000000000000000134641354546467100205510ustar00rootroot00000000000000{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}}image-4.0.1/image/fixtures/oci1-invalid-media-type.json000066400000000000000000000007371354546467100227650ustar00rootroot00000000000000{ "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 5940, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar+invalid-suffix", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" } ] }image-4.0.1/image/fixtures/oci1-to-schema2.json000066400000000000000000000025761354546467100212500ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 5940, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", "urls": ["https://layer.url"] }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/oci1.json000066400000000000000000000026441354546467100173040ustar00rootroot00000000000000{ "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 5940, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", "annotations": { "test-annotation-1": "one" } }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", "urls": ["https://layer.url"] }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", "annotations": { "test-annotation-2": "two" } }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] } image-4.0.1/image/fixtures/schema1-to-oci-config.json000066400000000000000000000107561354546467100224300ustar00rootroot00000000000000{ "schemaVersion": 1, "name": "google_containers/pause-amd64", "tag": "3.0", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:f112334343777b75be77ec1f835e3bbbe7d7bd46e27b6a2ae35c6b3cfea0987c" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" } ], "history": [ { "v1Compatibility": "{\"id\":\"bb497e16a2d55195649174d1fadac52b00fa2c14124d73009712606909286bc5\",\"parent\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"created\":\"2016-05-04T06:26:41.522308365Z\",\"container\":\"a9873535145fe72b464d3055efbac36aab70d059914e221cbbd7fe3cac53ef6b\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT \\u0026{[\\\"/pause\\\"]}\"],\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":[\"/pause\"],\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\"}" }, { "v1Compatibility": "{\"id\":\"f8e2eec424cf985b4e41d6423991433fb7a93c90f9acc73a5e7bee213b789c52\",\"parent\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:41.091672218Z\",\"container\":\"e1b38778b023f25642273ed9e7f4846b4bf38b22a8b55755880b2e6ab6019811\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:b7eb6a5df9d5fbe509cac16ed89f8d6513a4362017184b14c6a5fae151eee5c5 in /pause\"],\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":746888}" }, { "v1Compatibility": "{\"id\":\"bdb43c586e887b513a056722b50553727b255e3a3d9166f318632d4209963464\",\"created\":\"2016-05-04T06:26:40.628395649Z\",\"container\":\"95722352e41d57660259fbede4413d06889a28eb07a7302d2a7b3f9c71ceaa46\",\"container_config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ARG ARCH\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.9.1\",\"config\":{\"Hostname\":\"95722352e41d\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\"}" } ],"signatures":[{"header":{"alg":"ES256","jwk":{"crv":"P-256","kid":"ORN4:M47W:3KP3:TZRZ:C3UF:5MFQ:INZV:TCMY:LHNV:EYQU:IRGJ:IJLJ","kty":"EC","x":"yJ0ZQ19NBZUQn8LV60sFEabhlgky9svozfK0VGVou7Y","y":"gOJScOkkLVY1f8aAx-6XXpVM5rJaDYLkCNJ1dvcQGMs"}},"protected":"eyJmb3JtYXRMZW5ndGgiOjQxMzMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNi0wNS0wNFQwNjoyODo1MVoifQ","signature":"77_7DVx1IZ3PiKNnO7QnvoF7Sgik4GI4bnlVJdtQW461dSyYzd-nSdBmky8Jew3InEW8Cuv_t5w4GmOSwXvL7g"}] } image-4.0.1/image/fixtures/schema1-to-schema2-config.json000066400000000000000000000152461354546467100231770ustar00rootroot00000000000000{ "architecture": "amd64", "config": { "Hostname": "9428cdea83ba", "Domainname": "", "User": "nova", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "kolla_start" ], "Healthcheck": { "Test": [ "CMD-SHELL", "/openstack/healthcheck" ] }, "ArgsEscaped": true, "Image": "3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33", "Volumes": null, "WorkingDir": "", "Entrypoint": null, "OnBuild": [], "Labels": { "Kolla-SHA": "5.0.0-39-g6f1b947b", "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "build-date": "2018-01-25T00:32:27.807261", "com.redhat.build-host": "ip-10-29-120-186.ec2.internal", "com.redhat.component": "openstack-nova-api-docker", "description": "Red Hat OpenStack Platform 12.0 nova-api", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api", "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api", "io.openshift.tags": "rhosp osp openstack osp-12.0", "kolla_version": "stable/pike", "name": "rhosp12/openstack-nova-api", "release": "20180124.1", "summary": "Red Hat OpenStack Platform 12.0 nova-api", "tripleo-common_version": "7.6.3-23-g4891cfe", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1", "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "12.0", "version-release": "12.0-20180124.1" } }, "container_config": { "Hostname": "9428cdea83ba", "Domainname": "", "User": "nova", "AttachStdin": false, "AttachStdout": false, "AttachStderr": false, "Tty": false, "OpenStdin": false, "StdinOnce": false, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "container=oci", "KOLLA_BASE_DISTRO=rhel", "KOLLA_INSTALL_TYPE=binary", "KOLLA_INSTALL_METATYPE=rhos", "PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ " ], "Cmd": [ "/bin/sh", "-c", "#(nop) ", "USER [nova]" ], "Healthcheck": { "Test": [ "CMD-SHELL", "/openstack/healthcheck" ] }, "ArgsEscaped": true, "Image": "sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf", "Volumes": null, "WorkingDir": "", "Entrypoint": null, "OnBuild": [], "Labels": { "Kolla-SHA": "5.0.0-39-g6f1b947b", "architecture": "x86_64", "authoritative-source-url": "registry.access.redhat.com", "build-date": "2018-01-25T00:32:27.807261", "com.redhat.build-host": "ip-10-29-120-186.ec2.internal", "com.redhat.component": "openstack-nova-api-docker", "description": "Red Hat OpenStack Platform 12.0 nova-api", "distribution-scope": "public", "io.k8s.description": "Red Hat OpenStack Platform 12.0 nova-api", "io.k8s.display-name": "Red Hat OpenStack Platform 12.0 nova-api", "io.openshift.tags": "rhosp osp openstack osp-12.0", "kolla_version": "stable/pike", "name": "rhosp12/openstack-nova-api", "release": "20180124.1", "summary": "Red Hat OpenStack Platform 12.0 nova-api", "tripleo-common_version": "7.6.3-23-g4891cfe", "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1", "vcs-ref": "9b31243b7b448eb2fc3b6e2c96935b948f806e98", "vcs-type": "git", "vendor": "Red Hat, Inc.", "version": "12.0", "version-release": "12.0-20180124.1" } }, "created": "2018-01-25T00:37:48.268558Z", "docker_version": "1.12.6", "os": "linux", "history": [ { "comment": "Imported from -", "created": "2017-11-21T16:47:27.755341705Z" }, { "author": "Red Hat, Inc.", "created": "2017-11-21T16:49:37.292899Z", "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'" }, { "created": "2018-01-24T21:40:32.494686Z", "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'" }, { "created": "2018-01-24T22:00:57.807862Z", "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'" }, { "created": "2018-01-24T23:08:25.300741Z", "created_by": "/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'" }, { "created": "2018-01-25T00:37:48.268558Z", "created_by": "/bin/sh -c #(nop) USER [nova]" } ], "rootfs": { "type": "layers", "diff_ids": [ "sha256:e1d829eddb62dc49f1c56dbf8acd0c71299b3996115399de853a9d66d81b822f", "sha256:02404b4d7e5d89b1383ca346b4462b199128aa4b238c5a2b2c186004ac148ba8", "sha256:45fad80a4b1cec165c421eb570dec312d825bd8fac362e255028fa3f2169148d", "sha256:7ddef8efd44586e54880ec4797458eac87b368544c438d7e7c63fbc0d9a7ae97", "sha256:b56b16b6407ba1b86252e7e50f98f142cf6844fab42e4495d56ebb7ce559e2af", "sha256:9bd63850e406167b4751f5050f6dc0ebd789bb5ef5e5c6c31ed062bda8c063e8" ] } }image-4.0.1/image/fixtures/schema1-to-schema2.json000066400000000000000000000031631354546467100217270ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/octet-stream", "size": -1, "digest": "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 74876245, "digest": "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 1239, "digest": "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 78339724, "digest": "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 76857203, "digest": "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 25923380, "digest": "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 23511300, "digest": "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d" } ] }image-4.0.1/image/fixtures/schema1.json000066400000000000000000000164441354546467100177750ustar00rootroot00000000000000{ "schemaVersion": 1, "name": "rhosp12/openstack-nova-api", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d" }, { "blobSum": "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788" }, { "blobSum": "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6" }, { "blobSum": "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e" }, { "blobSum": "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a" }, { "blobSum": "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4" } ], "history": [ { "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}" }, { "v1Compatibility": "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}" }, { "v1Compatibility": "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}" }, { "v1Compatibility": "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}" }, { "v1Compatibility": "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}" }, { "v1Compatibility": "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}" } ], "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "DB2X:GSG2:72H3:AE3R:KCMI:Y77E:W7TF:ERHK:V5HR:JJ2Y:YMS6:HFGJ", "kty": "EC", "x": "jyr9-xZBorSC9fhqNsmfU_Ud31wbaZ-bVGz0HmySvbQ", "y": "vkE6qZCCvYRWjSUwgAOvibQx_s8FipYkAiHS0VnAFNs" }, "alg": "ES256" }, "signature": "jBBsnocfxw77LzmM_VeN6Nb031BtqPgx-DbppYOEnhZfGLRcyYwGUPW--3JrkeEX6AlEGzPI57R0tlu5bZvrnQ", "protected": "eyJmb3JtYXRMZW5ndGgiOjY4MTMsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxOC0wMS0zMFQxOToyNToxMloifQ" } ] }image-4.0.1/image/fixtures/schema2-all-media-types-to-oci1.json000066400000000000000000000025371354546467100242320ustar00rootroot00000000000000{ "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 4651, "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" }, "layers": [ { "mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 152, "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/schema2-all-media-types.json000066400000000000000000000026561354546467100227630ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 4651, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 152, "digest": "sha256:2bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/schema2-config.json000066400000000000000000000134641354546467100212400ustar00rootroot00000000000000{"architecture":"amd64","config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["httpd-foreground"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"container":"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69","container_config":{"Hostname":"383850eeb47b","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":{"80/tcp":{}},"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","HTTPD_PREFIX=/usr/local/apache2","HTTPD_VERSION=2.4.23","HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc"],"Cmd":["/bin/sh","-c","#(nop) ","CMD [\"httpd-foreground\"]"],"ArgsEscaped":true,"Image":"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd","Volumes":null,"WorkingDir":"/usr/local/apache2","Entrypoint":null,"OnBuild":[],"Labels":{}},"created":"2016-09-23T23:20:45.78976459Z","docker_version":"1.12.1","history":[{"created":"2016-09-23T18:08:50.537223822Z","created_by":"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / "},{"created":"2016-09-23T18:08:51.133779867Z","created_by":"/bin/sh -c #(nop) CMD [\"/bin/bash\"]","empty_layer":true},{"created":"2016-09-23T19:16:40.725768956Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:41.037788416Z","created_by":"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","empty_layer":true},{"created":"2016-09-23T19:16:41.990121202Z","created_by":"/bin/sh -c mkdir -p \"$HTTPD_PREFIX\" \t\u0026\u0026 chown www-data:www-data \"$HTTPD_PREFIX\""},{"created":"2016-09-23T19:16:42.339911155Z","created_by":"/bin/sh -c #(nop) WORKDIR /usr/local/apache2","empty_layer":true},{"created":"2016-09-23T19:16:54.948461741Z","created_by":"/bin/sh -c apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends \t\tlibapr1 \t\tlibaprutil1 \t\tlibaprutil1-ldap \t\tlibapr1-dev \t\tlibaprutil1-dev \t\tlibpcre++0 \t\tlibssl1.0.0 \t\u0026\u0026 rm -r /var/lib/apt/lists/*"},{"created":"2016-09-23T19:16:55.321573403Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23","empty_layer":true},{"created":"2016-09-23T19:16:55.629947307Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f","empty_layer":true},{"created":"2016-09-23T23:19:03.705796801Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\u0026filename=httpd/httpd-2.4.23.tar.bz2","empty_layer":true},{"created":"2016-09-23T23:19:04.009782822Z","created_by":"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc","empty_layer":true},{"created":"2016-09-23T23:20:44.585743332Z","created_by":"/bin/sh -c set -x \t\u0026\u0026 buildDeps=' \t\tbzip2 \t\tca-certificates \t\tgcc \t\tlibpcre++-dev \t\tlibssl-dev \t\tmake \t\twget \t' \t\u0026\u0026 apt-get update \t\u0026\u0026 apt-get install -y --no-install-recommends $buildDeps \t\u0026\u0026 rm -r /var/lib/apt/lists/* \t\t\u0026\u0026 wget -O httpd.tar.bz2 \"$HTTPD_BZ2_URL\" \t\u0026\u0026 echo \"$HTTPD_SHA1 *httpd.tar.bz2\" | sha1sum -c - \t\u0026\u0026 wget -O httpd.tar.bz2.asc \"$HTTPD_ASC_URL\" \t\u0026\u0026 export GNUPGHOME=\"$(mktemp -d)\" \t\u0026\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \t\u0026\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \t\u0026\u0026 rm -r \"$GNUPGHOME\" httpd.tar.bz2.asc \t\t\u0026\u0026 mkdir -p src \t\u0026\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \t\u0026\u0026 rm httpd.tar.bz2 \t\u0026\u0026 cd src \t\t\u0026\u0026 ./configure \t\t--prefix=\"$HTTPD_PREFIX\" \t\t--enable-mods-shared=reallyall \t\u0026\u0026 make -j\"$(nproc)\" \t\u0026\u0026 make install \t\t\u0026\u0026 cd .. \t\u0026\u0026 rm -r src \t\t\u0026\u0026 sed -ri \t\t-e 's!^(\\s*CustomLog)\\s+\\S+!\\1 /proc/self/fd/1!g' \t\t-e 's!^(\\s*ErrorLog)\\s+\\S+!\\1 /proc/self/fd/2!g' \t\t\"$HTTPD_PREFIX/conf/httpd.conf\" \t\t\u0026\u0026 apt-get purge -y --auto-remove $buildDeps"},{"created":"2016-09-23T23:20:45.127455562Z","created_by":"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ "},{"created":"2016-09-23T23:20:45.453934921Z","created_by":"/bin/sh -c #(nop) EXPOSE 80/tcp","empty_layer":true},{"created":"2016-09-23T23:20:45.78976459Z","created_by":"/bin/sh -c #(nop) CMD [\"httpd-foreground\"]","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:142a601d97936307e75220c35dde0348971a9584c21e7cb42e1f7004005432ab","sha256:90fcc66ad3be9f1757f954b750deb37032f208428aa12599fcb02182b9065a9c","sha256:5a8624bb7e76d1e6829f9c64c43185e02bc07f97a2189eb048609a8914e72c56","sha256:d349ff6b3afc6a2800054768c82bfbf4289c9aa5da55c1290f802943dcd4d1e9","sha256:8c064bb1f60e84fa8cc6079b6d2e76e0423389fd6aeb7e497dfdae5e05b2b25b"]}}image-4.0.1/image/fixtures/schema2-invalid-media-type.json000066400000000000000000000025431354546467100234510ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/octet-stream", "size": 5940, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.zstd", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/schema2-to-oci1.json000066400000000000000000000020621354546467100212360ustar00rootroot00000000000000{ "schemaVersion": 2, "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "size": 4651, "digest": "sha256:a13a0762ab7bed51a1b49adec0a702b1cd99294fd460a025b465bcfb7b152745" }, "layers": [{ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" }] } image-4.0.1/image/fixtures/schema2-to-schema1-by-docker.json000066400000000000000000000264351354546467100236130ustar00rootroot00000000000000{ "schemaVersion": 1, "name": "library/httpd-copy", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" }, { "blobSum": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" }, { "blobSum": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" } ], "history": [ { "v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"httpd-foreground\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"container\":\"8825acde1b009729807e4b70a65a89399dd8da8e53be9216b9aaabaff4339f69\",\"container_config\":{\"Hostname\":\"383850eeb47b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":{\"80/tcp\":{}},\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"HTTPD_PREFIX=/usr/local/apache2\",\"HTTPD_VERSION=2.4.23\",\"HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\",\"HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\",\"HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"CMD [\\\"httpd-foreground\\\"]\"],\"ArgsEscaped\":true,\"Image\":\"sha256:4f83530449c67c1ed8fca72583c5b92fdf446010990028c362a381e55dd84afd\",\"Volumes\":null,\"WorkingDir\":\"/usr/local/apache2\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{}},\"created\":\"2016-09-23T23:20:45.78976459Z\",\"docker_version\":\"1.12.1\",\"id\":\"dca7323f9c839837493199d63263083d94f5eb1796d7bd04ca8374c4e9d3749a\",\"os\":\"linux\",\"parent\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"1b750729af47c9a802c8d14b0d327d3ad5ecdce5ae773ac728a0263315b914f4\",\"parent\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"created\":\"2016-09-23T23:20:45.453934921Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) EXPOSE 80/tcp\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"3ef2f186f8b0a2fd2d95f5a1f1cd213f5fb0a6e51b0a8dfbe2ec7003a788ff9a\",\"parent\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"created\":\"2016-09-23T23:20:45.127455562Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) COPY file:761e313354b918b6cd7ea99975a4f6b53ff5381ba689bab2984aec4dab597215 in /usr/local/bin/ \"]}}" }, { "v1Compatibility": "{\"id\":\"dbbb5c772ba968f675ebdb1968a2fbcf3cf53c0c85ff4e3329619e3735c811e6\",\"parent\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"created\":\"2016-09-23T23:20:44.585743332Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c set -x \\t\\u0026\\u0026 buildDeps=' \\t\\tbzip2 \\t\\tca-certificates \\t\\tgcc \\t\\tlibpcre++-dev \\t\\tlibssl-dev \\t\\tmake \\t\\twget \\t' \\t\\u0026\\u0026 apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends $buildDeps \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/* \\t\\t\\u0026\\u0026 wget -O httpd.tar.bz2 \\\"$HTTPD_BZ2_URL\\\" \\t\\u0026\\u0026 echo \\\"$HTTPD_SHA1 *httpd.tar.bz2\\\" | sha1sum -c - \\t\\u0026\\u0026 wget -O httpd.tar.bz2.asc \\\"$HTTPD_ASC_URL\\\" \\t\\u0026\\u0026 export GNUPGHOME=\\\"$(mktemp -d)\\\" \\t\\u0026\\u0026 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys A93D62ECC3C8EA12DB220EC934EA76E6791485A8 \\t\\u0026\\u0026 gpg --batch --verify httpd.tar.bz2.asc httpd.tar.bz2 \\t\\u0026\\u0026 rm -r \\\"$GNUPGHOME\\\" httpd.tar.bz2.asc \\t\\t\\u0026\\u0026 mkdir -p src \\t\\u0026\\u0026 tar -xvf httpd.tar.bz2 -C src --strip-components=1 \\t\\u0026\\u0026 rm httpd.tar.bz2 \\t\\u0026\\u0026 cd src \\t\\t\\u0026\\u0026 ./configure \\t\\t--prefix=\\\"$HTTPD_PREFIX\\\" \\t\\t--enable-mods-shared=reallyall \\t\\u0026\\u0026 make -j\\\"$(nproc)\\\" \\t\\u0026\\u0026 make install \\t\\t\\u0026\\u0026 cd .. \\t\\u0026\\u0026 rm -r src \\t\\t\\u0026\\u0026 sed -ri \\t\\t-e 's!^(\\\\s*CustomLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/1!g' \\t\\t-e 's!^(\\\\s*ErrorLog)\\\\s+\\\\S+!\\\\1 /proc/self/fd/2!g' \\t\\t\\\"$HTTPD_PREFIX/conf/httpd.conf\\\" \\t\\t\\u0026\\u0026 apt-get purge -y --auto-remove $buildDeps\"]}}" }, { "v1Compatibility": "{\"id\":\"d264ded964bb52f78c8905c9e6c5f2b8526ef33f371981f0651f3fb0164ad4a7\",\"parent\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"created\":\"2016-09-23T23:19:04.009782822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"fd6f8d569a8a6d2a95f797494ab3cee7a47693dde647210b236a141f76b5c5fd\",\"parent\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"created\":\"2016-09-23T23:19:03.705796801Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download\\u0026filename=httpd/httpd-2.4.23.tar.bz2\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"5e2578d171daa47c0eeb55e592b4e3bd28a0946a75baed58e4d4dd315c5d5780\",\"parent\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"created\":\"2016-09-23T19:16:55.629947307Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"1912159ee5bea8d7fde49b85012f90c47bceb3f09e4082b112b1f06a3f339c53\",\"parent\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"created\":\"2016-09-23T19:16:55.321573403Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_VERSION=2.4.23\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"3bfb089ca9d4bb73a9016e44a2c6f908b701f97704433305c419f75e8559d8a2\",\"parent\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"created\":\"2016-09-23T19:16:54.948461741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c apt-get update \\t\\u0026\\u0026 apt-get install -y --no-install-recommends \\t\\tlibapr1 \\t\\tlibaprutil1 \\t\\tlibaprutil1-ldap \\t\\tlibapr1-dev \\t\\tlibaprutil1-dev \\t\\tlibpcre++0 \\t\\tlibssl1.0.0 \\t\\u0026\\u0026 rm -r /var/lib/apt/lists/*\"]}}" }, { "v1Compatibility": "{\"id\":\"ae1ece73de4d0365c8b8ab45ba0bf6b1efa4213c16a4903b89341b704d101c3c\",\"parent\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"created\":\"2016-09-23T19:16:42.339911155Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) WORKDIR /usr/local/apache2\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"bffbcb416f40e0bd3ebae202403587bfd41829cd1e0d538b66f29adce40c6408\",\"parent\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"created\":\"2016-09-23T19:16:41.990121202Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c mkdir -p \\\"$HTTPD_PREFIX\\\" \\t\\u0026\\u0026 chown www-data:www-data \\\"$HTTPD_PREFIX\\\"\"]}}" }, { "v1Compatibility": "{\"id\":\"7b27731a3363efcb6b0520962d544471745aae15664920dffe690b4fdb410d80\",\"parent\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"created\":\"2016-09-23T19:16:41.037788416Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"57a0a421f1acbc1fe6b88b32d3d1c3c0388ff1958b97f95dd0e3a599b810499b\",\"parent\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"created\":\"2016-09-23T19:16:40.725768956Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ENV HTTPD_PREFIX=/usr/local/apache2\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"faeaf6fdfdcbb18d68c12db9683a02428bab83962a493de88b4c7b1ec941db8f\",\"parent\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:51.133779867Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) CMD [\\\"/bin/bash\\\"]\"]},\"throwaway\":true}" }, { "v1Compatibility": "{\"id\":\"d0c4f1eb7dc8f4dae2b45fe5c0cf4cfc70e5be85d933f5f5f4deb59f134fb520\",\"created\":\"2016-09-23T18:08:50.537223822Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c #(nop) ADD file:c6c23585ab140b0b320d4e99bc1b0eb544c9e96c24d90fec5e069a6d57d335ca in / \"]}}" } ], "signatures": [ { "header": { "jwk": { "crv": "P-256", "kid": "6QVR:5NTY:VIHC:W6IU:XYIN:CTKT:OG5R:XEEG:Z6XJ:2623:YCBP:36MA", "kty": "EC", "x": "NAGHj6-IdNonuFoxlqJnNMjcrCCE1CBoq2r_1NDci68", "y": "Kocqgj_Ey5J-wLXTjkuqLC-HjciAnWxsBEziAOTvSPc" }, "alg": "ES256" }, "signature": "2MN5k06i8xkJhD5ay4yxAFK7tsZk58UznAZONxDplvQ5lZwbRS162OeBDjCb0Hk0IDyrLXtAfBDlY2Gzf6jrpw", "protected": "eyJmb3JtYXRMZW5ndGgiOjEwODk1LCJmb3JtYXRUYWlsIjoiQ24wIiwidGltZSI6IjIwMTYtMTAtMTRUMTY6MTI6MDlaIn0" } ] } image-4.0.1/image/fixtures/schema2.json000066400000000000000000000025001354546467100177620ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/octet-stream", "size": 5940, "digest": "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 51354364, "digest": "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 150, "digest": "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 11739507, "digest": "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 8841833, "digest": "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 291, "digest": "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa" } ] }image-4.0.1/image/fixtures/schema2list.json000066400000000000000000000052131354546467100206620ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:030fcb92e1487b18c974784dcc110a93147c9fc402188370fbfd17efabffc6af", "platform": { "architecture": "amd64", "os": "linux" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:9142d97ef280a7953cf1a85716de49a24cc1dd62776352afad67e635331ff77a", "platform": { "architecture": "arm", "os": "linux", "variant": "v5" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:b5dbad4bdb4444d919294afe49a095c23e86782f98cdf0aa286198ddb814b50b", "platform": { "architecture": "arm", "os": "linux", "variant": "v6" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:a8fe0549cac196f439de3bf2b57af14f7cd4e59915ccd524428f588628a4ef31", "platform": { "architecture": "arm", "os": "linux", "variant": "v7" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:dc472a59fb006797aa2a6bfb54cc9c57959bb0a6d11fadaa608df8c16dea39cf", "platform": { "architecture": "arm64", "os": "linux", "variant": "v8" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 527, "digest": "sha256:9a33b9909e56b0a2092a65fb1b79ef6717fa160b1f084476b860418780e8d53b", "platform": { "architecture": "386", "os": "linux" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 528, "digest": "sha256:59117d7c016fba6ede7f87991204bd672a1dca444102de66db632383507ed90b", "platform": { "architecture": "ppc64le", "os": "linux" } }, { "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "size": 528, "digest": "sha256:e5aa1b0a24620228b75382997a0977f609b3ca3a95533dafdef84c74cc8df642", "platform": { "architecture": "s390x", "os": "linux" } } ] }image-4.0.1/image/manifest.go000066400000000000000000000101321354546467100160310ustar00rootroot00000000000000package image import ( "context" "fmt" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // genericManifest is an interface for parsing, modifying image manifests and related data. // Note that the public methods are intended to be a subset of types.Image // so that embedding a genericManifest into structs works. // will support v1 one day... type genericManifest interface { serialize() ([]byte, error) manifestMIMEType() string // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() types.BlobInfo // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob(context.Context) ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). OCIConfig(context.Context) (*imgspecv1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []types.BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. // It returns false if the manifest does not embed a Docker reference. // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) EmbeddedDockerReferenceConflicts(ref reference.Named) bool // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. Inspect(context.Context) (*types.ImageInspectInfo, error) // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute // (most importantly it forces us to download the full layers even if they are already present at the destination). UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) } // manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. // If manblob is a manifest list, it implicitly chooses an appropriate image from the list. func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { switch manifest.NormalizedMIMEType(mt) { case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) case manifest.DockerV2Schema2MediaType: return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: return manifestSchema2FromManifestList(ctx, sys, src, manblob) default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } // manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { blobs := make([]types.BlobInfo, len(layers)) for i, layer := range layers { blobs[i] = layer.BlobInfo } return blobs } image-4.0.1/image/manifest_test.go000066400000000000000000000041471354546467100171010ustar00rootroot00000000000000package image import ( "testing" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" ) func TestManifestLayerInfosToBlobInfos(t *testing.T) { blobs := manifestLayerInfosToBlobInfos([]manifest.LayerInfo{}) assert.Equal(t, []types.BlobInfo{}, blobs) blobs = manifestLayerInfosToBlobInfos([]manifest.LayerInfo{ { BlobInfo: types.BlobInfo{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32, }, EmptyLayer: true, }, { BlobInfo: types.BlobInfo{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, }, EmptyLayer: false, }, { BlobInfo: types.BlobInfo{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, }, EmptyLayer: false, }, { BlobInfo: types.BlobInfo{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32, }, EmptyLayer: true, }, }) assert.Equal(t, []types.BlobInfo{ { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, }, { MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32, }, }, blobs) } image-4.0.1/image/memory.go000066400000000000000000000047601354546467100155450ustar00rootroot00000000000000package image import ( "context" "github.com/pkg/errors" "github.com/containers/image/v4/types" ) // memoryImage is a mostly-implementation of types.Image assembled from data // created in memory, used primarily as a return value of types.Image.UpdatedImage // as a way to carry various structured information in a type-safe and easy-to-use way. // Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone // collection of all related information, e.g. there is no way to get layer blobs // from a memoryImage. type memoryImage struct { genericManifest serializedManifest []byte // A private cache for Manifest() } func memoryImageFromManifest(m genericManifest) types.Image { return &memoryImage{ genericManifest: m, serializedManifest: nil, } } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (i *memoryImage) Reference() types.ImageReference { // It would really be inappropriate to return the ImageReference of the image this was based on. return nil } // Size returns the size of the image as stored, if known, or -1 if not. func (i *memoryImage) Size() (int64, error) { return -1, nil } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.serializedManifest == nil { m, err := i.genericManifest.serialize() if err != nil { return nil, "", err } i.serializedManifest = m } return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil } // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { // Modifying an image invalidates signatures; a caller asking the updated image for signatures // is probably confused. return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") } // LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } image-4.0.1/image/oci.go000066400000000000000000000204531354546467100150040ustar00rootroot00000000000000package image import ( "context" "encoding/json" "fmt" "io/ioutil" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache/none" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type manifestOCI1 struct { src types.ImageSource // May be nil if configBlob is not nil configBlob []byte // If set, corresponds to contents of m.Config. m *manifest.OCI1 } func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { m, err := manifest.OCI1FromManifest(manifestBlob) if err != nil { return nil, err } return &manifestOCI1{ src: src, m: m, }, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { return &manifestOCI1{ src: src, configBlob: configBlob, m: manifest.OCI1FromComponents(config, layers), } } func (m *manifestOCI1) serialize() ([]byte, error) { return m.m.Serialize() } func (m *manifestOCI1) manifestMIMEType() string { return imgspecv1.MediaTypeImageManifest } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if m.configBlob == nil { if m.src == nil { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) if err != nil { return nil, err } defer stream.Close() blob, err := ioutil.ReadAll(stream) if err != nil { return nil, err } computedDigest := digest.FromBytes(blob) if computedDigest != m.m.Config.Digest { return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } return m.configBlob, nil } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { cb, err := m.ConfigBlob(ctx) if err != nil { return nil, err } configOCI := &imgspecv1.Image{} if err := json.Unmarshal(cb, configOCI); err != nil { return nil, err } return configOCI, nil } // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. // It returns false if the manifest does not embed a Docker reference. // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { return false } // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { getter := func(info types.BlobInfo) ([]byte, error) { if info.Digest != m.ConfigInfo().Digest { // Shouldn't ever happen return nil, errors.New("asked for a different config blob") } config, err := m.ConfigBlob(ctx) if err != nil { return nil, err } return config, nil } return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute // (most importantly it forces us to download the full layers even if they are already present at the destination). func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { return false } // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. src: m.src, configBlob: m.configBlob, m: manifest.OCI1Clone(m.m), } if options.LayerInfos != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. switch options.ManifestMIMEType { case "": // No conversion, OK case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: // We can't directly convert to V1, but we can transitively convert via a V2 image m2, err := copy.convertToManifestSchema2() if err != nil { return nil, err } return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{ ManifestMIMEType: options.ManifestMIMEType, InformationOnly: options.InformationOnly, }) case manifest.DockerV2Schema2MediaType: return copy.convertToManifestSchema2() default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) } return memoryImageFromManifest(©), nil } func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { return manifest.Schema2Descriptor{ MediaType: d.MediaType, Size: d.Size, Digest: d.Digest, URLs: d.URLs, } } func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. config := schema2DescriptorFromOCI1Descriptor(m.m.Config) // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) switch layers[idx].MediaType { case imgspecv1.MediaTypeImageLayerNonDistributable: layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType case imgspecv1.MediaTypeImageLayerNonDistributableGzip: layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip case imgspecv1.MediaTypeImageLayerNonDistributableZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) case imgspecv1.MediaTypeImageLayer: layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed case imgspecv1.MediaTypeImageLayerGzip: layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) } } // Rather than copying the ConfigBlob now, we just pass m.src to the // translated manifest, since the only difference is the mediatype of // descriptors there is no change to any blob stored in m.src. m1 := manifestSchema2FromComponents(config, m.src, nil, layers) return memoryImageFromManifest(m1), nil } image-4.0.1/image/oci_test.go000066400000000000000000000346441354546467100160520ustar00rootroot00000000000000package image import ( "bytes" "context" "encoding/json" "io" "io/ioutil" "path/filepath" "testing" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func manifestOCI1FromFixture(t *testing.T, src types.ImageSource, fixture string) genericManifest { manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) m, err := manifestOCI1FromManifest(src, manifest) require.NoError(t, err) return m } func manifestOCI1FromComponentsLikeFixture(configBlob []byte) genericManifest { return manifestOCI1FromComponents(imgspecv1.Descriptor{ MediaType: imgspecv1.MediaTypeImageConfig, Size: 5940, Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", Annotations: map[string]string{ "test-annotation-1": "one", }, }, nil, configBlob, []imgspecv1.Descriptor{ { MediaType: imgspecv1.MediaTypeImageLayerGzip, Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: 51354364, }, { MediaType: imgspecv1.MediaTypeImageLayerGzip, Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: 150, }, { MediaType: imgspecv1.MediaTypeImageLayerGzip, Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: 11739507, URLs: []string{ "https://layer.url", }, }, { MediaType: imgspecv1.MediaTypeImageLayerGzip, Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, Annotations: map[string]string{ "test-annotation-2": "two", }, }, { MediaType: imgspecv1.MediaTypeImageLayerGzip, Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, }, }) } func TestManifestOCI1FromManifest(t *testing.T) { // This just tests that the JSON can be loaded; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json") _, err := manifestOCI1FromManifest(nil, []byte{}) assert.Error(t, err) } func TestManifestOCI1FromComponents(t *testing.T) { // This just smoke-tests that the manifest can be created; we test that the parsed // values are correctly returned in tests for the individual getter methods. _ = manifestOCI1FromComponentsLikeFixture(nil) } func TestManifestOCI1Serialize(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { serialized, err := m.serialize() require.NoError(t, err) var contents map[string]interface{} err = json.Unmarshal(serialized, &contents) require.NoError(t, err) original, err := ioutil.ReadFile("fixtures/oci1.json") require.NoError(t, err) var originalContents map[string]interface{} err = json.Unmarshal(original, &originalContents) require.NoError(t, err) // We would ideally like to compare “serialized” with some transformation of // “original”, but the ordering of fields in JSON maps is undefined, so this is // easier. assert.Equal(t, originalContents, contents) } } func TestManifestOCI1ManifestMIMEType(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { assert.Equal(t, imgspecv1.MediaTypeImageManifest, m.manifestMIMEType()) } } func TestManifestOCI1ConfigInfo(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { assert.Equal(t, types.BlobInfo{ Size: 5940, Digest: "sha256:9ca4bda0a6b3727a6ffcc43e981cad0f24e2ec79d338f6ba325b4dfd0756fb8f", Annotations: map[string]string{ "test-annotation-1": "one", }, MediaType: "application/vnd.oci.image.config.v1+json", }, m.ConfigInfo()) } } func TestManifestOCI1ConfigBlob(t *testing.T) { realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") require.NoError(t, err) for _, c := range []struct { cbISfn func(digest digest.Digest) (io.ReadCloser, int64, error) blob []byte }{ // Success {func(digest digest.Digest) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, realConfigJSON}, // Various kinds of failures {nil, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { return nil, -1, errors.New("Error returned from GetBlob") }, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { reader, writer := io.Pipe() writer.CloseWithError(errors.New("Expected error reading input in ConfigBlob")) return reader, 1, nil }, nil}, {func(digest digest.Digest) (io.ReadCloser, int64, error) { nonmatchingJSON := []byte("This does not match ConfigDescriptor.Digest") return ioutil.NopCloser(bytes.NewReader(nonmatchingJSON)), int64(len(nonmatchingJSON)), nil }, nil}, } { var src types.ImageSource if c.cbISfn != nil { src = configBlobImageSource{unusedImageSource{}, c.cbISfn} } else { src = nil } m := manifestOCI1FromFixture(t, src, "oci1.json") blob, err := m.ConfigBlob(context.Background()) if c.blob != nil { assert.NoError(t, err) assert.Equal(t, c.blob, blob) } else { assert.Error(t, err) } } // Generally conficBlob should match ConfigInfo; we don’t quite need it to, and this will // guarantee that the returned object is returning the original contents instead // of reading an object from elsewhere. configBlob := []byte("config blob which does not match ConfigInfo") // This just tests that the manifest can be created; we test that the parsed // values are correctly returned in tests for the individual getter methods. m := manifestOCI1FromComponentsLikeFixture(configBlob) cb, err := m.ConfigBlob(context.Background()) require.NoError(t, err) assert.Equal(t, configBlob, cb) } func TestManifestOCI1LayerInfo(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { assert.Equal(t, []types.BlobInfo{ { Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: 51354364, MediaType: imgspecv1.MediaTypeImageLayerGzip, }, { Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: 150, MediaType: imgspecv1.MediaTypeImageLayerGzip, }, { Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: 11739507, URLs: []string{ "https://layer.url", }, MediaType: imgspecv1.MediaTypeImageLayerGzip, }, { Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: 8841833, Annotations: map[string]string{ "test-annotation-2": "two", }, MediaType: imgspecv1.MediaTypeImageLayerGzip, }, { Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: 291, MediaType: imgspecv1.MediaTypeImageLayerGzip, }, }, m.LayerInfos()) } } func TestManifestOCI1EmbeddedDockerReferenceConflicts(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { for _, name := range []string{"busybox", "example.com:5555/ns/repo:tag"} { ref, err := reference.ParseNormalizedNamed(name) require.NoError(t, err) conflicts := m.EmbeddedDockerReferenceConflicts(ref) assert.False(t, conflicts) } } } func TestManifestOCI1Inspect(t *testing.T) { configJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") require.NoError(t, err) m := manifestOCI1FromComponentsLikeFixture(configJSON) ii, err := m.Inspect(context.Background()) require.NoError(t, err) created := time.Date(2016, 9, 23, 23, 20, 45, 789764590, time.UTC) assert.Equal(t, types.ImageInspectInfo{ Tag: "", Created: &created, DockerVersion: "1.12.1", Labels: map[string]string{}, Architecture: "amd64", Os: "linux", Layers: []string{ "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", }, Env: []string{ "PATH=/usr/local/apache2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "HTTPD_PREFIX=/usr/local/apache2", "HTTPD_VERSION=2.4.23", "HTTPD_SHA1=5101be34ac4a509b245adb70a56690a84fcc4e7f", "HTTPD_BZ2_URL=https://www.apache.org/dyn/closer.cgi?action=download&filename=httpd/httpd-2.4.23.tar.bz2", "HTTPD_ASC_URL=https://www.apache.org/dist/httpd/httpd-2.4.23.tar.bz2.asc", }, }, *ii) // nil configBlob will trigger an error in m.ConfigBlob() m = manifestOCI1FromComponentsLikeFixture(nil) _, err = m.Inspect(context.Background()) assert.Error(t, err) m = manifestOCI1FromComponentsLikeFixture([]byte("invalid JSON")) _, err = m.Inspect(context.Background()) assert.Error(t, err) } func TestManifestOCI1UpdatedImageNeedsLayerDiffIDs(t *testing.T) { for _, m := range []genericManifest{ manifestOCI1FromFixture(t, unusedImageSource{}, "oci1.json"), manifestOCI1FromComponentsLikeFixture(nil), } { assert.False(t, m.UpdatedImageNeedsLayerDiffIDs(types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, })) } } // oci1ImageSource is plausible enough for schema conversions in manifestOCI1.UpdatedImage() to work. type oci1ImageSource struct { configBlobImageSource ref reference.Named } func (OCIis *oci1ImageSource) Reference() types.ImageReference { return refImageReferenceMock{OCIis.ref} } func newOCI1ImageSource(t *testing.T, dockerRef string) *oci1ImageSource { realConfigJSON, err := ioutil.ReadFile("fixtures/oci1-config.json") require.NoError(t, err) ref, err := reference.ParseNormalizedNamed(dockerRef) require.NoError(t, err) return &oci1ImageSource{ configBlobImageSource: configBlobImageSource{ f: func(digest digest.Digest) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(realConfigJSON)), int64(len(realConfigJSON)), nil }, }, ref: ref, } } func TestManifestOCI1UpdatedImage(t *testing.T) { originalSrc := newOCI1ImageSource(t, "httpd:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") // LayerInfos: layerInfos := append(original.LayerInfos()[1:], original.LayerInfos()[0]) res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: layerInfos, }) require.NoError(t, err) assert.Equal(t, layerInfos, res.LayerInfos()) _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ LayerInfos: append(layerInfos, layerInfos[0]), }) assert.Error(t, err) // EmbeddedDockerReference: // … is ignored embeddedRef, err := reference.ParseNormalizedNamed("busybox") require.NoError(t, err) res, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ EmbeddedDockerReference: embeddedRef, }) require.NoError(t, err) nonEmbeddedRef, err := reference.ParseNormalizedNamed("notbusybox:notlatest") require.NoError(t, err) conflicts := res.EmbeddedDockerReferenceConflicts(nonEmbeddedRef) assert.False(t, conflicts) // ManifestMIMEType: // Only smoke-test the valid conversions, detailed tests are below. (This also verifies that “original” is not affected.) for _, mime := range []string{ manifest.DockerV2Schema2MediaType, } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, InformationOnly: types.ManifestUpdateInformation{ Destination: &memoryImageDest{ref: originalSrc.ref}, }, }) assert.NoError(t, err, mime) } for _, mime := range []string{ imgspecv1.MediaTypeImageManifest, // This indicates a confused caller, not a no-op. "this is invalid", } { _, err = original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: mime, }) assert.Error(t, err, mime) } // m hasn’t been changed: m2 := manifestOCI1FromFixture(t, originalSrc, "oci1.json") typedOriginal, ok := original.(*manifestOCI1) require.True(t, ok) typedM2, ok := m2.(*manifestOCI1) require.True(t, ok) assert.Equal(t, *typedM2, *typedOriginal) } func TestConvertToManifestSchema2(t *testing.T) { originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1.json") res, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, }) require.NoError(t, err) convertedJSON, mt, err := res.Manifest(context.Background()) require.NoError(t, err) assert.Equal(t, manifest.DockerV2Schema2MediaType, mt) byHandJSON, err := ioutil.ReadFile("fixtures/oci1-to-schema2.json") require.NoError(t, err) var converted, byHand map[string]interface{} err = json.Unmarshal(byHandJSON, &byHand) require.NoError(t, err) err = json.Unmarshal(convertedJSON, &converted) require.NoError(t, err) assert.Equal(t, byHand, converted) // FIXME? Test also the various failure cases, if only to see that we don't crash? } func TestConvertToManifestSchema2AllMediaTypes(t *testing.T) { originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") original := manifestOCI1FromFixture(t, originalSrc, "oci1-all-media-types.json") _, err := original.UpdatedImage(context.Background(), types.ManifestUpdateOptions{ ManifestMIMEType: manifest.DockerV2Schema2MediaType, }) require.Error(t, err) // zstd compression is not supported for docker images } func TestConvertToV2S2WithInvalidMIMEType(t *testing.T) { originalSrc := newOCI1ImageSource(t, "httpd-copy:latest") manifest, err := ioutil.ReadFile(filepath.Join("fixtures", "oci1-invalid-media-type.json")) require.NoError(t, err) _, err = manifestOCI1FromManifest(originalSrc, manifest) require.Error(t, err) } image-4.0.1/image/sourced.go000066400000000000000000000103441354546467100156740ustar00rootroot00000000000000// Package image consolidates knowledge about various container image formats // (as opposed to image storage mechanisms, which are handled by types.ImageSource) // and exposes all of them using an unified interface. package image import ( "context" "github.com/containers/image/v4/types" ) // imageCloser implements types.ImageCloser, perhaps allowing simple users // to use a single object without having keep a reference to a types.ImageSource // only to call types.ImageSource.Close(). type imageCloser struct { types.Image src types.ImageSource } // FromSource returns a types.ImageCloser implementation for the default instance of source. // If source is a manifest list, .Manifest() still returns the manifest list, // but other methods transparently return data from an appropriate image instance. // // The caller must call .Close() on the returned ImageCloser. // // FromSource “takes ownership” of the input ImageSource and will call src.Close() // when the image is closed. (This does not prevent callers from using both the // Image and ImageSource objects simultaneously, but it means that they only need to // the Image.) // // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) if err != nil { return nil, err } return &imageCloser{ Image: img, src: src, }, nil } func (ic *imageCloser) Close() error { return ic.src.Close() } // sourcedImage is a general set of utilities for working with container images, // whatever is their underlying location (i.e. dockerImageSource-independent). // Note the existence of skopeo/docker.Image: some instances of a `types.Image` // may not be a `sourcedImage` directly. However, most users of `types.Image` // do not care, and those who care about `skopeo/docker.Image` know they do. type sourcedImage struct { *UnparsedImage manifestBlob []byte manifestMIMEType string // genericManifest contains data corresponding to manifestBlob. // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest // if you want to preserve the original manifest; use manifestBlob directly. genericManifest } // FromUnparsedImage returns a types.Image implementation for unparsed. // If unparsed represents a manifest list, .Manifest() still returns the manifest list, // but other methods transparently return data from an appropriate single image. // // The Image must not be used after the underlying ImageSource is Close()d. func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, // this is the only UnparsedImage implementation around, anyway. // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) if err != nil { return nil, err } parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) if err != nil { return nil, err } return &sourcedImage{ UnparsedImage: unparsed, manifestBlob: manifestBlob, manifestMIMEType: manifestMIMEType, genericManifest: parsedManifest, }, nil } // Size returns the size of the image as stored, if it's known, or -1 if it isn't. func (i *sourcedImage) Size() (int64, error) { return -1, nil } // Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. func (i *sourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { return i.manifestBlob, i.manifestMIMEType, nil } func (i *sourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return i.UnparsedImage.src.LayerInfosForCopy(ctx) } image-4.0.1/image/unparsed.go000066400000000000000000000072101354546467100160470ustar00rootroot00000000000000package image import ( "context" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // UnparsedImage implements types.UnparsedImage . // An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. type UnparsedImage struct { src types.ImageSource instanceDigest *digest.Digest cachedManifest []byte // A private cache for Manifest(); nil if not yet known. // A private cache for Manifest(), may be the empty string if guessing failed. // Valid iff cachedManifest is not nil. cachedManifestMIMEType string cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. } // UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // // The UnparsedImage must not be used after the underlying ImageSource is Close()d. func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { return &UnparsedImage{ src: src, instanceDigest: instanceDigest, } } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (i *UnparsedImage) Reference() types.ImageReference { // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. return i.src.Reference() } // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { if i.cachedManifest == nil { m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) if err != nil { return nil, "", err } // ImageSource.GetManifest does not do digest verification, but we do; // this immediately protects also any user of types.Image. if digest, haveDigest := i.expectedManifestDigest(); haveDigest { matches, err := manifest.MatchesDigest(m, digest) if err != nil { return nil, "", errors.Wrap(err, "Error computing manifest digest") } if !matches { return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) } } i.cachedManifest = m i.cachedManifestMIMEType = mt } return i.cachedManifest, i.cachedManifestMIMEType, nil } // expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. // The bool return value seems redundant with digest != ""; it is used explicitly // to refuse (unexpected) situations when the digest exists but is "". func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { if i.instanceDigest != nil { return *i.instanceDigest, true } ref := i.Reference().DockerReference() if ref != nil { if canonical, ok := ref.(reference.Canonical); ok { return canonical.Digest(), true } } return "", false } // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { if i.cachedSignatures == nil { sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) if err != nil { return nil, err } i.cachedSignatures = sigs } return i.cachedSignatures, nil } image-4.0.1/internal/000077500000000000000000000000001354546467100144315ustar00rootroot00000000000000image-4.0.1/internal/pkg/000077500000000000000000000000001354546467100152125ustar00rootroot00000000000000image-4.0.1/internal/pkg/keyctl/000077500000000000000000000000001354546467100165055ustar00rootroot00000000000000image-4.0.1/internal/pkg/keyctl/key.go000066400000000000000000000030001354546467100176150ustar00rootroot00000000000000// Copyright 2015 Jesse Sipprell. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux package keyctl import ( "golang.org/x/sys/unix" ) // Key represents a single key linked to one or more kernel keyrings. type Key struct { Name string id, ring keyID size int } // ID returns the 32-bit kernel identifier for a specific key func (k *Key) ID() int32 { return int32(k.id) } // Get the key's value as a byte slice func (k *Key) Get() ([]byte, error) { var ( b []byte err error sizeRead int ) if k.size == 0 { k.size = 512 } size := k.size b = make([]byte, int(size)) sizeRead = size + 1 for sizeRead > size { r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) if err != nil { return nil, err } if sizeRead = int(r1); sizeRead > size { b = make([]byte, sizeRead) size = sizeRead sizeRead = size + 1 } else { k.size = sizeRead } } return b[:k.size], err } // Unlink a key from the keyring it was loaded from (or added to). If the key // is not linked to any other keyrings, it is destroyed. func (k *Key) Unlink() error { _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) return err } // Describe returns a string describing the attributes of a specified key func (k *Key) Describe() (string, error) { keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) if err != nil { return "", err } return keyAttr, nil } image-4.0.1/internal/pkg/keyctl/keyring.go000066400000000000000000000061761354546467100205160ustar00rootroot00000000000000// Copyright 2015 Jesse Sipprell. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux // Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) // // Deprecated: Most callers should use either golang.org/x/sys/unix directly, // or the original (and more extensive) github.com/jsipprell/keyctl . package keyctl import ( "unsafe" "golang.org/x/sys/unix" ) // Keyring is the basic interface to a linux keyctl keyring. type Keyring interface { ID Add(string, []byte) (*Key, error) Search(string) (*Key, error) } type keyring struct { id keyID } // ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. type ID interface { ID() int32 } // Add a new key to a keyring. The key can be searched for later by name. func (kr *keyring) Add(name string, key []byte) (*Key, error) { r, err := unix.AddKey("user", name, key, int(kr.id)) if err == nil { key := &Key{Name: name, id: keyID(r), ring: kr.id} return key, nil } return nil, err } // Search for a key by name, this also searches child keyrings linked to this // one. The key, if found, is linked to the top keyring that Search() was called // from. func (kr *keyring) Search(name string) (*Key, error) { id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) if err == nil { return &Key{Name: name, id: keyID(id), ring: kr.id}, nil } return nil, err } // ID returns the 32-bit kernel identifier of a keyring func (kr *keyring) ID() int32 { return int32(kr.id) } // SessionKeyring returns the current login session keyring func SessionKeyring() (Keyring, error) { return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) } // UserKeyring returns the keyring specific to the current user. func UserKeyring() (Keyring, error) { return newKeyring(unix.KEY_SPEC_USER_KEYRING) } // Unlink an object from a keyring func Unlink(parent Keyring, child ID) error { _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) return err } // Link a key into a keyring func Link(parent Keyring, child ID) error { _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) return err } // ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it func ReadUserKeyring() ([]*Key, error) { var ( b []byte err error sizeRead int ) krSize := 4 size := krSize b = make([]byte, size) sizeRead = size + 1 for sizeRead > size { r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) if err != nil { return nil, err } if sizeRead = int(r1); sizeRead > size { b = make([]byte, sizeRead) size = sizeRead sizeRead = size + 1 } else { krSize = sizeRead } } keyIDs := getKeyIDsFromByte(b[:krSize]) return keyIDs, err } func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { idSize := 4 var keys []*Key for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) keys = append(keys, &Key{id: keyID(tempID)}) } return keys } image-4.0.1/internal/pkg/keyctl/keyring_test.go000066400000000000000000000066461354546467100215570ustar00rootroot00000000000000// +build linux package keyctl import ( "crypto/rand" "strings" "testing" ) func TestSessionKeyring(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testname" keyring, err := SessionKeyring() if err != nil { t.Fatal(err) } _, err = keyring.Add(testname, token) if err != nil { t.Fatal(err) } key, err := keyring.Search(testname) if err != nil { t.Fatal(err) } data, err := key.Get() if err != nil { t.Fatal(err) } if string(data) != string(token) { t.Errorf("Expected data %v, but get %v", token, data) } err = key.Unlink() if err != nil { t.Fatal(err) } } func TestUserKeyring(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testuser" userKeyring, err := UserKeyring() if err != nil { t.Fatal(err) } userKey, err := userKeyring.Add(testname, token) if err != nil { t.Fatal(err, userKey) } searchRet, err := userKeyring.Search(testname) if err != nil { t.Fatal(err) } if searchRet.Name != testname { t.Errorf("Expected data %v, but get %v", testname, searchRet.Name) } err = userKey.Unlink() if err != nil { t.Fatal(err) } } func TestLink(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testlink" userKeyring, err := UserKeyring() if err != nil { t.Fatal(err) } sessionKeyring, err := SessionKeyring() if err != nil { t.Fatal(err) } key, err := sessionKeyring.Add(testname, token) if err != nil { t.Fatal(err) } _, err = userKeyring.Search(testname) if err == nil { t.Fatalf("Expected error, but got key %v", testname) } ExpectedError := "required key not available" if err.Error() != ExpectedError { t.Fatal(err) } err = Link(userKeyring, key) if err != nil { t.Fatal(err) } _, err = userKeyring.Search(testname) if err != nil { t.Fatal(err) } err = key.Unlink() if err != nil { t.Fatal(err) } err = Unlink(userKeyring, key) if err != nil { t.Fatal(err) } } func TestUnlink(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testunlink" keyring, err := SessionKeyring() if err != nil { t.Fatal(err) } key, err := keyring.Add(testname, token) if err != nil { t.Fatal(err) } err = Unlink(keyring, key) if err != nil { t.Fatal(err) } _, err = keyring.Search(testname) ExpectedError := "required key not available" if err.Error() != ExpectedError { t.Fatal(err) } } func TestReadKeyring(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testuser" userKeyring, err := UserKeyring() if err != nil { t.Fatal(err) } userKey, err := userKeyring.Add(testname, token) if err != nil { t.Fatal(err, userKey) } keys, err := ReadUserKeyring() if err != nil { t.Fatal(err) } expectedKeyLen := 1 if len(keys) != 1 { t.Errorf("expected to read %d userkeyring, but get %d", expectedKeyLen, len(keys)) } err = Unlink(userKeyring, userKey) if err != nil { t.Fatal(err) } } func TestDescribe(t *testing.T) { token := make([]byte, 20) rand.Read(token) testname := "testuser" userKeyring, err := UserKeyring() if err != nil { t.Fatal(err) } userKey, err := userKeyring.Add(testname, token) if err != nil { t.Fatal(err, userKey) } keyAttr, err := userKey.Describe() if err != nil { t.Fatal(err) } if !strings.Contains(keyAttr, testname) { t.Errorf("expect description contains %s, but get %s", testname, keyAttr) } err = Unlink(userKeyring, userKey) if err != nil { t.Fatal(err) } } image-4.0.1/internal/pkg/keyctl/perm.go000066400000000000000000000016201354546467100177760ustar00rootroot00000000000000// Copyright 2015 Jesse Sipprell. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux package keyctl import ( "golang.org/x/sys/unix" ) // KeyPerm represents in-kernel access control permission to keys and keyrings // as a 32-bit integer broken up into four permission sets, one per byte. // In MSB order, the perms are: Processor, User, Group, Other. type KeyPerm uint32 const ( // PermOtherAll sets all permission for Other PermOtherAll KeyPerm = 0x3f << (8 * iota) // PermGroupAll sets all permission for Group PermGroupAll // PermUserAll sets all permission for User PermUserAll // PermProcessAll sets all permission for Processor PermProcessAll ) // SetPerm sets the permissions on a key or keyring. func SetPerm(k ID, p KeyPerm) error { err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) return err } image-4.0.1/internal/pkg/keyctl/sys_linux.go000066400000000000000000000007061354546467100210740ustar00rootroot00000000000000// Copyright 2015 Jesse Sipprell. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux package keyctl import ( "golang.org/x/sys/unix" ) type keyID int32 func newKeyring(id keyID) (*keyring, error) { r1, err := unix.KeyctlGetKeyringID(int(id), true) if err != nil { return nil, err } if id < 0 { r1 = int(id) } return &keyring{id: keyID(r1)}, nil } image-4.0.1/internal/testing/000077500000000000000000000000001354546467100161065ustar00rootroot00000000000000image-4.0.1/internal/testing/explicitfilepath-tmpdir/000077500000000000000000000000001354546467100227415ustar00rootroot00000000000000image-4.0.1/internal/testing/explicitfilepath-tmpdir/tmpdir.go000066400000000000000000000017321354546467100245720ustar00rootroot00000000000000// Package tmpdir is a TESTING-ONLY utility. // // Some tests directly or indirectly exercising the directory/explicitfilepath // subpackage expect the path returned by ioutil.TempDir to be canonical in the // directory/explicitfilepath sense (absolute, no symlinks, cleaned up). // // ioutil.TempDir uses $TMPDIR by default, and on macOS, $TMPDIR is by // default set to /var/folders/…, with /var a symlink to /private/var , // which does not match our expectations. So, tests which want to use // ioutil.TempDir that way, can // import _ "github.com/containesr/image/internal/testing/explicitfilepath-tmpdir" // to ensure that $TMPDIR is canonical and usable as a base for testing // path canonicalization in its subdirectories. // // NEVER use this in non-testing subpackages! package tmpdir import ( "os" "path/filepath" ) func init() { tmpDir := os.TempDir() explicitTmpDir, err := filepath.EvalSymlinks(tmpDir) if err == nil { os.Setenv("TMPDIR", explicitTmpDir) } } image-4.0.1/internal/testing/mocks/000077500000000000000000000000001354546467100172225ustar00rootroot00000000000000image-4.0.1/internal/testing/mocks/imagetransport.go000066400000000000000000000021611354546467100226100ustar00rootroot00000000000000package mocks import "github.com/containers/image/v4/types" // NameImageTransport is a mock of types.ImageTransport which returns itself in Name. type NameImageTransport string // Name returns the name of the transport, which must be unique among other transports. func (name NameImageTransport) Name() string { return string(name) } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (name NameImageTransport) ParseReference(reference string) (types.ImageReference, error) { panic("unexpected call to a mock function") } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (name NameImageTransport) ValidatePolicyConfigurationScope(scope string) error { panic("unexpected call to a mock function") } image-4.0.1/internal/tmpdir/000077500000000000000000000000001354546467100157305ustar00rootroot00000000000000image-4.0.1/internal/tmpdir/tmpdir.go000066400000000000000000000022461354546467100175620ustar00rootroot00000000000000package tmpdir import ( "os" "runtime" ) // unixTempDirForBigFiles is the directory path to store big files on non Windows systems. // You can override this at build time with // -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path' var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // builtinUnixTempDirForBigFiles is the directory path to store big files. // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. // DO NOT change this, instead see unixTempDirForBigFiles above. const builtinUnixTempDirForBigFiles = "/var/tmp" // TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. // On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp // which on systemd based systems could be the unsuitable tmpfs filesystem. func TemporaryDirectoryForBigFiles() string { var temporaryDirectoryForBigFiles string if runtime.GOOS == "windows" { temporaryDirectoryForBigFiles = os.TempDir() } else { temporaryDirectoryForBigFiles = unixTempDirForBigFiles } return temporaryDirectoryForBigFiles } image-4.0.1/manifest/000077500000000000000000000000001354546467100144235ustar00rootroot00000000000000image-4.0.1/manifest/docker_schema1.go000066400000000000000000000304701354546467100176260ustar00rootroot00000000000000package manifest import ( "encoding/json" "regexp" "strings" "time" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. type Schema1FSLayers struct { BlobSum digest.Digest `json:"blobSum"` } // Schema1History is an entry of the "history" array in docker/distribution schema 1. type Schema1History struct { V1Compatibility string `json:"v1Compatibility"` } // Schema1 is a manifest in docker/distribution schema 1. type Schema1 struct { Name string `json:"name"` Tag string `json:"tag"` Architecture string `json:"architecture"` FSLayers []Schema1FSLayers `json:"fsLayers"` History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) SchemaVersion int `json:"schemaVersion"` } type schema1V1CompatibilityContainerConfig struct { Cmd []string } // Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. type Schema1V1Compatibility struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` Author string `json:"author,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } // Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. // (NOTE: The instance is not necessary a literal representation of the original blob, // layers with duplicate IDs are eliminated.) func Schema1FromManifest(manifest []byte) (*Schema1, error) { s1 := Schema1{} if err := json.Unmarshal(manifest, &s1); err != nil { return nil, err } if s1.SchemaVersion != 1 { return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) } if err := s1.initialize(); err != nil { return nil, err } if err := s1.fixManifestLayers(); err != nil { return nil, err } return &s1, nil } // Schema1FromComponents creates an Schema1 manifest instance from the supplied data. func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { var name, tag string if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. name = reference.Path(ref) if tagged, ok := ref.(reference.NamedTagged); ok { tag = tagged.Tag() } } s1 := Schema1{ Name: name, Tag: tag, Architecture: architecture, FSLayers: fsLayers, History: history, SchemaVersion: 1, } if err := s1.initialize(); err != nil { return nil, err } return &s1, nil } // Schema1Clone creates a copy of the supplied Schema1 manifest. func Schema1Clone(src *Schema1) *Schema1 { copy := *src return © } // initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. func (m *Schema1) initialize() error { if len(m.FSLayers) != len(m.History) { return errors.New("length of history not equal to number of layers") } if len(m.FSLayers) == 0 { return errors.New("no FSLayers in manifest") } m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) for i, h := range m.History { if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i) } } return nil } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. func (m *Schema1) ConfigInfo() types.BlobInfo { return types.BlobInfo{} } // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema1) LayerInfos() []LayerInfo { layers := make([]LayerInfo, len(m.FSLayers)) for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) layers[(len(m.FSLayers)-1)-i] = LayerInfo{ BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, } } return layers } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. if len(m.FSLayers) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) } m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) for i, info := range layerInfos { // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest } return nil } // Serialize returns the manifest in a blob format. // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! func (m *Schema1) Serialize() ([]byte, error) { // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. unsigned, err := json.Marshal(*m) if err != nil { return nil, err } return AddDummyV2S1Signature(unsigned) } // fixManifestLayers, after validating the supplied manifest // (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), // modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, // both from m.History and m.FSLayers). // Note that even after this succeeds, m.FSLayers may contain duplicate entries // (for Dockerfile operations which change the configuration but not the filesystem). func (m *Schema1) fixManifestLayers() error { // m.initialize() has verified that len(m.FSLayers) == len(m.History) for _, compat := range m.ExtractedV1Compatibility { if err := validateV1ID(compat.ID); err != nil { return err } } if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { return errors.New("Invalid parent ID in the base layer of the image") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range m.ExtractedV1Compatibility { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) m.History = append(m.History[:i], m.History[i+1:]...) m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...) } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } } return nil } var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) func validateV1ID(id string) error { if ok := validHex.MatchString(id); !ok { return errors.Errorf("image ID %q is invalid", id) } return nil } // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { s1 := &Schema2V1Image{} if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { return nil, err } i := &types.ImageInspectInfo{ Tag: m.Tag, Created: &s1.Created, DockerVersion: s1.DockerVersion, Architecture: s1.Architecture, Os: s1.OS, Layers: layerInfosToStrings(m.LayerInfos()), } if s1.Config != nil { i.Labels = s1.Config.Labels i.Env = s1.Config.Env } return i, nil } // ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields // that aren't directly comparable using info from the manifest. if len(m.History) == 0 { return nil, errors.New("image has no layers") } s1 := Schema2V1Image{} config := []byte(m.History[0].V1Compatibility) err := json.Unmarshal(config, &s1) if err != nil { return nil, errors.Wrapf(err, "error decoding configuration") } // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, // adding some fields that aren't "omitempty". if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { config, err = json.Marshal(&s1) if err != nil { return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1) } } // Build the history. convertedHistory := []Schema2History{} for _, compat := range m.ExtractedV1Compatibility { hitem := Schema2History{ Created: compat.Created, CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), Author: compat.Author, Comment: compat.Comment, EmptyLayer: compat.ThrowAway, } convertedHistory = append([]Schema2History{hitem}, convertedHistory...) } // Build the rootfs information. We need the decompressed sums that we've been // calculating to fill in the DiffIDs. It's expected (but not enforced by us) // that the number of diffIDs corresponds to the number of non-EmptyLayer // entries in the history. rootFS := &Schema2RootFS{ Type: "layers", DiffIDs: diffIDs, } // And now for some raw manipulation. raw := make(map[string]*json.RawMessage) err = json.Unmarshal(config, &raw) if err != nil { return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1) } // Drop some fields. delete(raw, "id") delete(raw, "parent") delete(raw, "parent_id") delete(raw, "layer_id") delete(raw, "throwaway") delete(raw, "Size") // Add the history and rootfs information. rootfs, err := json.Marshal(rootFS) if err != nil { return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) } rawRootfs := json.RawMessage(rootfs) raw["rootfs"] = &rawRootfs history, err := json.Marshal(convertedHistory) if err != nil { return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) } rawHistory := json.RawMessage(history) raw["history"] = &rawHistory // Encode the result. config, err = json.Marshal(raw) if err != nil { return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err) } return config, nil } // ImageID computes an ID which can uniquely identify this image by its contents. func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { image, err := m.ToSchema2Config(diffIDs) if err != nil { return "", err } return digest.FromBytes(image).Hex(), nil } image-4.0.1/manifest/docker_schema1_test.go000066400000000000000000000311511354546467100206620ustar00rootroot00000000000000package manifest import ( "io/ioutil" "path/filepath" "testing" "time" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func manifestSchema1FromFixture(t *testing.T, fixture string) *Schema1 { manifest, err := ioutil.ReadFile(filepath.Join("fixtures", fixture)) require.NoError(t, err) m, err := Schema1FromManifest(manifest) require.NoError(t, err) return m } func TestSchema1Initialize(t *testing.T) { // Test this indirectly via Schema1FromComponents; otherwise we would have to break the API and create an instance manually. // FIXME: this should eventually share a fixture with the other parsing tests. fsLayers := []Schema1FSLayers{ {BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"}, {BlobSum: "sha256:62e48e39dc5b30b75a97f05bccc66efbae6058b860ee20a5c9a184b9d5e25788"}, {BlobSum: "sha256:9e92df2aea7dc0baf5f1f8d509678d6a6306de27ad06513f8e218371938c07a6"}, {BlobSum: "sha256:f576d102e09b9eef0e305aaef705d2d43a11bebc3fd5810a761624bd5e11997e"}, {BlobSum: "sha256:4aa565ad8b7a87248163ce7dba1dd3894821aac97e846b932ff6b8ef9a8a508a"}, {BlobSum: "sha256:9cadd93b16ff2a0c51ac967ea2abfadfac50cfa3af8b5bf983d89b8f8647f3e4"}, } history := []Schema1History{ {V1Compatibility: "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"kolla_start\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"3bf9afe371220b1eb1c57bec39b5a99ba976c36c92d964a1c014584f95f51e33\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"container_config\":{\"Hostname\":\"9428cdea83ba\",\"Domainname\":\"\",\"User\":\"nova\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"container=oci\",\"KOLLA_BASE_DISTRO=rhel\",\"KOLLA_INSTALL_TYPE=binary\",\"KOLLA_INSTALL_METATYPE=rhos\",\"PS1=$(tput bold)($(printenv KOLLA_SERVICE_NAME))$(tput sgr0)[$(id -un)@$(hostname -s) $(pwd)]$ \"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \",\"USER [nova]\"],\"Healthcheck\":{\"Test\":[\"CMD-SHELL\",\"/openstack/healthcheck\"]},\"ArgsEscaped\":true,\"Image\":\"sha256:274ce4dcbeb09fa173a5d50203ae5cec28f456d1b8b59477b47a42bd74d068bf\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":[],\"Labels\":{\"Kolla-SHA\":\"5.0.0-39-g6f1b947b\",\"architecture\":\"x86_64\",\"authoritative-source-url\":\"registry.access.redhat.com\",\"build-date\":\"2018-01-25T00:32:27.807261\",\"com.redhat.build-host\":\"ip-10-29-120-186.ec2.internal\",\"com.redhat.component\":\"openstack-nova-api-docker\",\"description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"distribution-scope\":\"public\",\"io.k8s.description\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.k8s.display-name\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"io.openshift.tags\":\"rhosp osp openstack osp-12.0\",\"kolla_version\":\"stable/pike\",\"name\":\"rhosp12/openstack-nova-api\",\"release\":\"20180124.1\",\"summary\":\"Red Hat OpenStack Platform 12.0 nova-api\",\"tripleo-common_version\":\"7.6.3-23-g4891cfe\",\"url\":\"https://access.redhat.com/containers/#/registry.access.redhat.com/rhosp12/openstack-nova-api/images/12.0-20180124.1\",\"vcs-ref\":\"9b31243b7b448eb2fc3b6e2c96935b948f806e98\",\"vcs-type\":\"git\",\"vendor\":\"Red Hat, Inc.\",\"version\":\"12.0\",\"version-release\":\"12.0-20180124.1\"}},\"created\":\"2018-01-25T00:37:48.268558Z\",\"docker_version\":\"1.12.6\",\"id\":\"486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf\",\"os\":\"linux\",\"parent\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\"}"}, {V1Compatibility: "{\"id\":\"20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20\",\"parent\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"created\":\"2018-01-24T23:08:25.300741Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa\",\"parent\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"created\":\"2018-01-24T22:00:57.807862Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824\",\"parent\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"created\":\"2018-01-24T21:40:32.494686Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'\"]}}"}, {V1Compatibility: "{\"id\":\"0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2\",\"parent\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"created\":\"2017-11-21T16:49:37.292899Z\",\"container_config\":{\"Cmd\":[\"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'\"]},\"author\":\"Red Hat, Inc.\"}"}, {V1Compatibility: "{\"id\":\"3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345\",\"comment\":\"Imported from -\",\"created\":\"2017-11-21T16:47:27.755341705Z\",\"container_config\":{\"Cmd\":[\"\"]}}"}, } // Valid input m, err := Schema1FromComponents(nil, fsLayers, history, "amd64") assert.NoError(t, err) assert.Equal(t, []Schema1V1Compatibility{ { ID: "486cbbaf6c6f7d890f9368c86eda3f4ebe3ae982b75098037eb3c3cc6f0e0cdf", Parent: "20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20", Created: time.Date(2018, 1, 25, 0, 37, 48, 268558000, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{"/bin/sh", "-c", "#(nop) ", "USER [nova]"}, }, ThrowAway: false, }, { ID: "20d0c9c79f9fee83c4094993335b9b321112f13eef60ed9ec1599c7593dccf20", Parent: "47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa", Created: time.Date(2018, 1, 24, 23, 8, 25, 300741000, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"}, }, ThrowAway: false, }, { ID: "47a1014db2116c312736e11adcc236fb77d0ad32457f959cbaec0c3fc9ab1caa", Parent: "cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824", Created: time.Date(2018, 1, 24, 22, 0, 57, 807862000, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"}, }, ThrowAway: false, }, { ID: "cec66cab6c92a5f7b50ef407b80b83840a0d089b9896257609fd01de3a595824", Parent: "0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2", Created: time.Date(2018, 1, 24, 21, 40, 32, 494686000, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/rhel-7.4.repo' '/etc/yum.repos.d/rhos-optools-12.0.repo' '/etc/yum.repos.d/rhos-12.0-container-yum-need_images.repo'"}, }, ThrowAway: false, }, { ID: "0e7730eccb3d014b33147b745d771bc0e38a967fd932133a6f5325a3c84282e2", Parent: "3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345", Created: time.Date(2017, 11, 21, 16, 49, 37, 292899000, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{"/bin/sh -c rm -f '/etc/yum.repos.d/compose-rpms-1.repo'"}, }, Author: "Red Hat, Inc.", ThrowAway: false, }, { ID: "3e49094c0233214ab73f8e5c204af8a14cfc6f0403384553c17fbac2e9d38345", Comment: "Imported from -", Created: time.Date(2017, 11, 21, 16, 47, 27, 755341705, time.UTC), ContainerConfig: schema1V1CompatibilityContainerConfig{ Cmd: []string{""}, }, ThrowAway: false, }, }, m.ExtractedV1Compatibility) // Layer and history length mismatch _, err = Schema1FromComponents(nil, fsLayers, history[1:], "amd64") assert.Error(t, err) // No layers/history _, err = Schema1FromComponents(nil, []Schema1FSLayers{}, []Schema1History{}, "amd64") assert.Error(t, err) // Invalid history JSON _, err = Schema1FromComponents(nil, []Schema1FSLayers{{BlobSum: "sha256:e623934bca8d1a74f51014256445937714481e49343a31bda2bc5f534748184d"}}, []Schema1History{{V1Compatibility: "-"}}, "amd64") assert.Error(t, err) } func TestSchema1LayerInfos(t *testing.T) { // We use this instead of original schema1 manifests, because those, surprisingly, // seem not to set the "throwaway" flag. m := manifestSchema1FromFixture(t, "schema2-to-schema1-by-docker.json") // FIXME: Test also Schema1FromComponents assert.Equal(t, []LayerInfo{ {BlobInfo: types.BlobInfo{Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:8f5dc8a4b12c307ac84de90cdd9a7f3915d1be04c9388868ca118831099c67a9", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:bbd6b22eb11afce63cc76f6bc41042d99f10d6024c96b655dafba930b8d25909", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:960e52ecf8200cbd84e70eb2ad8678f4367e50d14357021872c10fa3fc5935fa", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, }, m.LayerInfos()) } image-4.0.1/manifest/docker_schema2.go000066400000000000000000000355721354546467100176370ustar00rootroot00000000000000package manifest import ( "encoding/json" "fmt" "time" "github.com/containers/image/v4/pkg/compression" "github.com/containers/image/v4/pkg/strslice" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // Schema2Descriptor is a “descriptor” in docker/distribution schema 2. type Schema2Descriptor struct { MediaType string `json:"mediaType"` Size int64 `json:"size"` Digest digest.Digest `json:"digest"` URLs []string `json:"urls,omitempty"` } // BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { return types.BlobInfo{ Digest: desc.Digest, Size: desc.Size, URLs: desc.URLs, MediaType: desc.MediaType, } } // Schema2 is a manifest in docker/distribution schema 2. type Schema2 struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` ConfigDescriptor Schema2Descriptor `json:"config"` LayersDescriptors []Schema2Descriptor `json:"layers"` } // Schema2Port is a Port, a string containing port number and protocol in the // format "80/tcp", from docker/go-connections/nat. type Schema2Port string // Schema2PortSet is a PortSet, a collection of structs indexed by Port, from // docker/go-connections/nat. type Schema2PortSet map[Schema2Port]struct{} // Schema2HealthConfig is a HealthConfig, which holds configuration settings // for the HEALTHCHECK feature, from docker/docker/api/types/container. type Schema2HealthConfig struct { // Test is the test to perform to check that the container is healthy. // An empty slice means to inherit the default. // The options are: // {} : inherit healthcheck // {"NONE"} : disable healthcheck // {"CMD", args...} : exec arguments directly // {"CMD-SHELL", command} : run command with system's default shell Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. Retries int `json:",omitempty"` } // Schema2Config is a Config in docker/docker/api/types/container. type Schema2Config struct { Hostname string // Hostname Domainname string // Domainname User string // User that will run the command(s) inside the container, also support user:group AttachStdin bool // Attach the standard input, makes possible user interaction AttachStdout bool // Attach the standard output AttachStderr bool // Attach the standard error ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched Entrypoint strslice.StrSlice // Entrypoint to run when starting the container NetworkDisabled bool `json:",omitempty"` // Is network disabled MacAddress string `json:",omitempty"` // Mac Address of the container OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile Labels map[string]string // List of labels set to this container StopSignal string `json:",omitempty"` // Signal to stop a container StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT } // Schema2V1Image is a V1Image in docker/docker/image. type Schema2V1Image struct { // ID is a unique 64 character identifier of the image ID string `json:"id,omitempty"` // Parent is the ID of the parent image Parent string `json:"parent,omitempty"` // Comment is the commit message that was set when committing the image Comment string `json:"comment,omitempty"` // Created is the timestamp at which the image was created Created time.Time `json:"created"` // Container is the id of the container used to commit Container string `json:"container,omitempty"` // ContainerConfig is the configuration of the container that is committed into the image ContainerConfig Schema2Config `json:"container_config,omitempty"` // DockerVersion specifies the version of Docker that was used to build the image DockerVersion string `json:"docker_version,omitempty"` // Author is the name of the author that was specified when committing the image Author string `json:"author,omitempty"` // Config is the configuration of the container received from the client Config *Schema2Config `json:"config,omitempty"` // Architecture is the hardware that the image is build and runs on Architecture string `json:"architecture,omitempty"` // OS is the operating system used to build and run the image OS string `json:"os,omitempty"` // Size is the total size of the image including all layers it is composed of Size int64 `json:",omitempty"` } // Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. type Schema2RootFS struct { Type string `json:"type"` DiffIDs []digest.Digest `json:"diff_ids,omitempty"` } // Schema2History stores build commands that were used to create an image, from docker/docker/image. type Schema2History struct { // Created is the timestamp at which the image was created Created time.Time `json:"created"` // Author is the name of the author that was specified when committing the image Author string `json:"author,omitempty"` // CreatedBy keeps the Dockerfile command used while building the image CreatedBy string `json:"created_by,omitempty"` // Comment is the commit message that was set when committing the image Comment string `json:"comment,omitempty"` // EmptyLayer is set to true if this history item did not generate a // layer. Otherwise, the history item is associated with the next // layer in the RootFS section. EmptyLayer bool `json:"empty_layer,omitempty"` } // Schema2Image is an Image in docker/docker/image. type Schema2Image struct { Schema2V1Image Parent digest.Digest `json:"parent,omitempty"` RootFS *Schema2RootFS `json:"rootfs,omitempty"` History []Schema2History `json:"history,omitempty"` OSVersion string `json:"os.version,omitempty"` OSFeatures []string `json:"os.features,omitempty"` } // Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. func Schema2FromManifest(manifest []byte) (*Schema2, error) { s2 := Schema2{} if err := json.Unmarshal(manifest, &s2); err != nil { return nil, err } // Check manifest's and layers' media types. if err := SupportedSchema2MediaType(s2.MediaType); err != nil { return nil, err } for _, layer := range s2.LayersDescriptors { if err := SupportedSchema2MediaType(layer.MediaType); err != nil { return nil, err } } return &s2, nil } // Schema2FromComponents creates an Schema2 manifest instance from the supplied data. func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { return &Schema2{ SchemaVersion: 2, MediaType: DockerV2Schema2MediaType, ConfigDescriptor: config, LayersDescriptors: layers, } } // Schema2Clone creates a copy of the supplied Schema2 manifest. func Schema2Clone(src *Schema2) *Schema2 { copy := *src return © } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. func (m *Schema2) ConfigInfo() types.BlobInfo { return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) } // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema2) LayerInfos() []LayerInfo { blobs := []LayerInfo{} for _, layer := range m.LayersDescriptors { blobs = append(blobs, LayerInfo{ BlobInfo: BlobInfoFromSchema2Descriptor(layer), EmptyLayer: false, }) } return blobs } // isSchema2ForeignLayer is a convenience wrapper to check if a given mime type // is a compressed or decompressed schema 2 foreign layer. func isSchema2ForeignLayer(mimeType string) bool { switch mimeType { case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip: return true default: return false } } // isSchema2Layer is a convenience wrapper to check if a given mime type is a // compressed or decompressed schema 2 layer. func isSchema2Layer(mimeType string) bool { switch mimeType { case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType: return true default: return false } } // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { if len(m.LayersDescriptors) != len(layerInfos) { return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) } original := m.LayersDescriptors m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) for i, info := range layerInfos { // First make sure we support the media type of the original layer. if err := SupportedSchema2MediaType(original[i].MediaType); err != nil { return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType) } // Set the correct media types based on the specified compression // operation, the desired compression algorithm AND the original media // type. // // Note that manifests in containers-storage might be reporting the // wrong media type since the original manifests are stored while layers // are decompressed in storage. Hence, we need to consider the case // that an already {de}compressed layer should be {de}compressed, which // is being addressed in `isSchema2{Foreign}Layer`. switch info.CompressionOperation { case types.PreserveOriginal: // Keep the original media type. m.LayersDescriptors[i].MediaType = original[i].MediaType case types.Decompress: // Decompress the original media type and check if it was // non-distributable one or not. mimeType := original[i].MediaType switch { case isSchema2ForeignLayer(mimeType): m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType case isSchema2Layer(mimeType): m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed default: return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType) } case types.Compress: if info.CompressionAlgorithm == nil { logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest) m.LayersDescriptors[i].MediaType = original[i].MediaType break } // Compress the original media type and set the new one based on // that type (distributable or not) and the specified compression // algorithm. Throw an error if the algorithm is not supported. switch info.CompressionAlgorithm.Name() { case compression.Gzip.Name(): mimeType := original[i].MediaType switch { case isSchema2ForeignLayer(mimeType): m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip case isSchema2Layer(mimeType): m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType default: return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType) } case compression.Zstd.Name(): return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images") default: return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest) } default: return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest) } m.LayersDescriptors[i].Digest = info.Digest m.LayersDescriptors[i].Size = info.Size m.LayersDescriptors[i].URLs = info.URLs } return nil } // Serialize returns the manifest in a blob format. // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! func (m *Schema2) Serialize() ([]byte, error) { return json.Marshal(*m) } // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { config, err := configGetter(m.ConfigInfo()) if err != nil { return nil, err } s2 := &Schema2Image{} if err := json.Unmarshal(config, s2); err != nil { return nil, err } i := &types.ImageInspectInfo{ Tag: "", Created: &s2.Created, DockerVersion: s2.DockerVersion, Architecture: s2.Architecture, Os: s2.OS, Layers: layerInfosToStrings(m.LayerInfos()), } if s2.Config != nil { i.Labels = s2.Config.Labels i.Env = s2.Config.Env } return i, nil } // ImageID computes an ID which can uniquely identify this image by its contents. func (m *Schema2) ImageID([]digest.Digest) (string, error) { if err := m.ConfigDescriptor.Digest.Validate(); err != nil { return "", err } return m.ConfigDescriptor.Digest.Hex(), nil } image-4.0.1/manifest/docker_schema2_test.go000066400000000000000000000143141354546467100206650ustar00rootroot00000000000000package manifest import ( "io/ioutil" "testing" "github.com/containers/image/v4/pkg/compression" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" ) func TestSupportedSchema2MediaType(t *testing.T) { type testData struct { m string mustFail bool } data := []testData{ { DockerV2Schema2MediaType, false, }, { DockerV2Schema2ConfigMediaType, false, }, { DockerV2Schema2LayerMediaType, false, }, { DockerV2SchemaLayerMediaTypeUncompressed, false, }, { DockerV2ListMediaType, false, }, { DockerV2Schema2ForeignLayerMediaType, false, }, { DockerV2Schema2ForeignLayerMediaTypeGzip, false, }, { "application/vnd.docker.image.rootfs.foreign.diff.unknown", true, }, } for _, d := range data { err := SupportedSchema2MediaType(d.m) if d.mustFail { assert.NotNil(t, err) } else { assert.Nil(t, err) } } } func TestUpdateLayerInfosV2S2GzipToZstd(t *testing.T) { bytes, err := ioutil.ReadFile("fixtures/v2s2.manifest.json") assert.Nil(t, err) origManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) err = origManifest.UpdateLayerInfos([]types.BlobInfo{ { Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", Size: 32654, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd, }, { Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", Size: 16724, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd, }, { Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", Size: 73109, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd, }, }) assert.NotNil(t, err) // zstd is not supported for docker images } func TestUpdateLayerInfosV2S2InvalidCompressionOperation(t *testing.T) { bytes, err := ioutil.ReadFile("fixtures/v2s2.manifest.json") assert.Nil(t, err) origManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) err = origManifest.UpdateLayerInfos([]types.BlobInfo{ { Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", Size: 32654, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Decompress, }, { Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", Size: 16724, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Decompress, }, { Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", Size: 73109, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: 42, // MUST fail here }, }) assert.NotNil(t, err) } func TestUpdateLayerInfosV2S2InvalidCompressionAlgorithm(t *testing.T) { bytes, err := ioutil.ReadFile("fixtures/v2s2.manifest.json") assert.Nil(t, err) origManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) err = origManifest.UpdateLayerInfos([]types.BlobInfo{ { Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", Size: 32654, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip, }, { Digest: "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b", Size: 16724, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip, }, { Digest: "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736", Size: 73109, MediaType: DockerV2Schema2LayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Zstd, // MUST fail here }, }) assert.NotNil(t, err) } func TestUpdateLayerInfosV2S2NondistributableToGzip(t *testing.T) { bytes, err := ioutil.ReadFile("fixtures/v2s2.nondistributable.manifest.json") assert.Nil(t, err) origManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) err = origManifest.UpdateLayerInfos([]types.BlobInfo{ { Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", Size: 32654, MediaType: DockerV2Schema2ForeignLayerMediaType, CompressionOperation: types.Compress, CompressionAlgorithm: &compression.Gzip, }, }) assert.Nil(t, err) updatedManifestBytes, err := origManifest.Serialize() assert.Nil(t, err) bytes, err = ioutil.ReadFile("fixtures/v2s2.nondistributable.gzip.manifest.json") assert.Nil(t, err) expectedManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) expectedManifestBytes, err := expectedManifest.Serialize() assert.Nil(t, err) assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) } func TestUpdateLayerInfosV2S2NondistributableGzipToUncompressed(t *testing.T) { bytes, err := ioutil.ReadFile("fixtures/v2s2.nondistributable.gzip.manifest.json") assert.Nil(t, err) origManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) err = origManifest.UpdateLayerInfos([]types.BlobInfo{ { Digest: "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", Size: 32654, MediaType: DockerV2Schema2ForeignLayerMediaType, CompressionOperation: types.Decompress, }, }) assert.Nil(t, err) updatedManifestBytes, err := origManifest.Serialize() assert.Nil(t, err) bytes, err = ioutil.ReadFile("fixtures/v2s2.nondistributable.manifest.json") assert.Nil(t, err) expectedManifest, err := Schema2FromManifest(bytes) assert.Nil(t, err) expectedManifestBytes, err := expectedManifest.Serialize() assert.Nil(t, err) assert.Equal(t, string(expectedManifestBytes), string(updatedManifestBytes)) } image-4.0.1/manifest/fixtures/000077500000000000000000000000001354546467100162745ustar00rootroot00000000000000image-4.0.1/manifest/fixtures/non-json.manifest.json000066400000000000000000000006331354546467100225370ustar00rootroot00000000000000xD$1Z)($391GɪZ)3%5$NON--JMK-JKNUR*I-.KMLju2sStS2AJ3Lͬ Ҍ SSLL͓- ,,SRS--M͓R ͌,- R S R LR LSMMATܑXWZPXRZ R_PPrQ*PqBА\s ML-- kk;00r02…SZQW~:k?g^WeĈ`v喙X,U,23|`*;Ϯ 2㢌{ߗ=s&%Rwշ6i̯kG1?cSjl[]ű= 2) assert.Equal(t, tmpDir, ns[0]) assert.Equal(t, filepath.Dir(tmpDir), ns[1]) // Test with a known path which should exist. Test just one non-canonical // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. // // It would be nice to test a deeper hierarchy, but it is not obvious what // deeper path is always available in the various distros, AND is not likely // to contains a symbolic link. for _, path := range []string{"/usr/share", "/usr/share/./."} { _, err := os.Lstat(path) require.NoError(t, err) ref, err := NewReference(path, "someimage") require.NoError(t, err) ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns) assert.Equal(t, []string{"/usr/share", "/usr"}, ns) } // "/" as a corner case. ref, err := NewReference("/", "image3") require.NoError(t, err) assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces()) } func TestReferenceNewImage(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) _, err := ref.NewImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceNewImageSource(t *testing.T) { ref, tmpTarFile := refToTempOCIArchive(t) defer os.RemoveAll(tmpTarFile) _, err := ref.NewImageSource(context.Background(), nil) assert.NoError(t, err) } func TestReferenceNewImageDestination(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) assert.NoError(t, err) defer dest.Close() } func TestReferenceDeleteImage(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) err := ref.DeleteImage(context.Background(), nil) assert.Error(t, err) } image-4.0.1/oci/internal/000077500000000000000000000000001354546467100152035ustar00rootroot00000000000000image-4.0.1/oci/internal/oci_util.go000066400000000000000000000065531354546467100173520ustar00rootroot00000000000000package internal import ( "github.com/pkg/errors" "path/filepath" "regexp" "runtime" "strings" ) // annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys const ( separator = `(?:[-._:@+]|--)` alphanum = `(?:[A-Za-z0-9]+)` component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` ) var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) // ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. // In any other case an error is returned. func ValidateImageName(image string) error { if len(image) == 0 { return nil } var err error if !refRegexp.MatchString(image) { err = errors.Errorf("Invalid image %s", image) } return err } // SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. // Neither path nor image parts are validated at this stage. func SplitPathAndImage(reference string) (string, string) { if runtime.GOOS == "windows" { return splitPathAndImageWindows(reference) } return splitPathAndImageNonWindows(reference) } func splitPathAndImageWindows(reference string) (string, string) { groups := windowsRefRegexp.FindStringSubmatch(reference) // nil group means no match if groups == nil { return reference, "" } // we expect three elements. First one full match, second the capture group for the path and // the third the capture group for the image if len(groups) != 3 { return reference, "" } return groups[1], groups[2] } func splitPathAndImageNonWindows(reference string) (string, string) { sep := strings.SplitN(reference, ":", 2) path := sep[0] var image string if len(sep) == 2 { image = sep[1] } return path, image } // ValidateOCIPath takes the OCI path and validates it. func ValidateOCIPath(path string) error { if runtime.GOOS == "windows" { // On Windows we must allow for a ':' as part of the path if strings.Count(path, ":") > 1 { return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) } } else { if strings.Contains(path, ":") { return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) } } return nil } // ValidateScope validates a policy configuration scope for an OCI transport. func ValidateScope(scope string) error { var err error if runtime.GOOS == "windows" { err = validateScopeWindows(scope) } else { err = validateScopeNonWindows(scope) } if err != nil { return err } cleaned := filepath.Clean(scope) if cleaned != scope { return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) } return nil } func validateScopeWindows(scope string) error { matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) if !matched { return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) } return nil } func validateScopeNonWindows(scope string) error { if !strings.HasPrefix(scope, "/") { return errors.Errorf("Invalid scope %s: must be an absolute path", scope) } // Refuse also "/", otherwise "/" and "" would have the same semantics, // and "" could be unexpectedly shadowed by the "/" entry. if scope == "/" { return errors.New(`Invalid scope "/": Use the generic default scope ""`) } return nil } image-4.0.1/oci/internal/oci_util_test.go000066400000000000000000000032311354546467100203770ustar00rootroot00000000000000package internal import ( "fmt" "github.com/stretchr/testify/assert" "testing" ) type testDataSplitReference struct { ref string dir string image string } type testDataScopeValidation struct { scope string errMessage string } func TestSplitReferenceIntoDirAndImageWindows(t *testing.T) { tests := []testDataSplitReference{ {`C:\foo\bar:busybox:latest`, `C:\foo\bar`, "busybox:latest"}, {`C:\foo\bar:busybox`, `C:\foo\bar`, "busybox"}, {`C:\foo\bar`, `C:\foo\bar`, ""}, } for _, test := range tests { dir, image := splitPathAndImageWindows(test.ref) assert.Equal(t, test.dir, dir, "Unexpected OCI directory") assert.Equal(t, test.image, image, "Unexpected image") } } func TestSplitReferenceIntoDirAndImageNonWindows(t *testing.T) { tests := []testDataSplitReference{ {"/foo/bar:busybox:latest", "/foo/bar", "busybox:latest"}, {"/foo/bar:busybox", "/foo/bar", "busybox"}, {"/foo/bar", "/foo/bar", ""}, } for _, test := range tests { dir, image := splitPathAndImageNonWindows(test.ref) assert.Equal(t, test.dir, dir, "Unexpected OCI directory") assert.Equal(t, test.image, image, "Unexpected image") } } func TestValidateScopeWindows(t *testing.T) { tests := []testDataScopeValidation{ {`C:\foo`, ""}, {`D:\`, ""}, {"C:", "Invalid scope 'C:'. Must be an absolute path"}, {"E", "Invalid scope 'E'. Must be an absolute path"}, {"", "Invalid scope ''. Must be an absolute path"}, } for _, test := range tests { err := validateScopeWindows(test.scope) if test.errMessage == "" { assert.NoError(t, err) } else { assert.EqualError(t, err, test.errMessage, fmt.Sprintf("No error for scope '%s'", test.scope)) } } } image-4.0.1/oci/layout/000077500000000000000000000000001354546467100147045ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/000077500000000000000000000000001354546467100165555ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/accepted_certs/000077500000000000000000000000001354546467100215255ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/accepted_certs/cacert.crt000066400000000000000000000014061354546467100235010ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICDTCCAW+gAwIBAgITAIFeQjAc7foKoRkj4CwVfETqcTAKBggqhkjOPQQDBDAS MRAwDgYDVQQKDAdBY21lIENvMB4XDTE5MDkyNjE0NTY0OVoXDTIwMDkyNTE0NTY0 OVowEjEQMA4GA1UECgwHQWNtZSBDbzCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAE AI3pxckijV44L3ffAlLOqB4oA/HpP7S5gTpWrIUU+2SxFJU/bcTKDLPk1cEC87vW +UCYIXAyYGlyMAGSm0GxAFHnAIIrQzx9m3yiHbUyIPvRMW4BoDKsLaf5+GIZMm9n Oq2qnjvHr9ag2J3IzxEqQ8KZ95ivmHYrh3VsnfisI7c3opiro2EwXzAOBgNVHQ8B Af8EBAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB /wQFMAMBAf8wHQYDVR0RBBYwFIIJbG9jYWxob3N0gQdhQGEuY29tMAoGCCqGSM49 BAMEA4GLADCBhwJBBLQFykSLj4iCTmrPIVqhjjDJMULeUmpc+iLF2VuPrr6BnYwS MIzKfV9/ml32DaaSh+pzf3MfaXnx1Pv1fKGoJCgCQgHJfuCpoT1pV0UkoHDGzmIA h1MG7MEghRZpkdqsJa2g6C68MThAJNR61iEU2D8QTYYH5yPgWTZK/gQ+WP45pZ3p GQ== -----END CERTIFICATE----- image-4.0.1/oci/layout/fixtures/accepted_certs/cert.cert000066400000000000000000000014061354546467100233420ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICDTCCAW+gAwIBAgITAIFeQjAc7foKoRkj4CwVfETqcTAKBggqhkjOPQQDBDAS MRAwDgYDVQQKDAdBY21lIENvMB4XDTE5MDkyNjE0NTY0OVoXDTIwMDkyNTE0NTY0 OVowEjEQMA4GA1UECgwHQWNtZSBDbzCBmzAQBgcqhkjOPQIBBgUrgQQAIwOBhgAE AI3pxckijV44L3ffAlLOqB4oA/HpP7S5gTpWrIUU+2SxFJU/bcTKDLPk1cEC87vW +UCYIXAyYGlyMAGSm0GxAFHnAIIrQzx9m3yiHbUyIPvRMW4BoDKsLaf5+GIZMm9n Oq2qnjvHr9ag2J3IzxEqQ8KZ95ivmHYrh3VsnfisI7c3opiro2EwXzAOBgNVHQ8B Af8EBAMCAqQwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB /wQFMAMBAf8wHQYDVR0RBBYwFIIJbG9jYWxob3N0gQdhQGEuY29tMAoGCCqGSM49 BAMEA4GLADCBhwJBBLQFykSLj4iCTmrPIVqhjjDJMULeUmpc+iLF2VuPrr6BnYwS MIzKfV9/ml32DaaSh+pzf3MfaXnx1Pv1fKGoJCgCQgHJfuCpoT1pV0UkoHDGzmIA h1MG7MEghRZpkdqsJa2g6C68MThAJNR61iEU2D8QTYYH5yPgWTZK/gQ+WP45pZ3p GQ== -----END CERTIFICATE----- image-4.0.1/oci/layout/fixtures/accepted_certs/cert.key000066400000000000000000000005551354546467100232010ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MIHcAgEBBEIAMDtdVU5PeUWCo1Ndvr+1X+Hry4I7+NdTqxLlU0ZBudm2ov0iJdZj O2PdSW6pRHJl9gYL+D/QjcEIwQBK4vsHS3SgBwYFK4EEACOhgYkDgYYABACN6cXJ Io1eOC933wJSzqgeKAPx6T+0uYE6VqyFFPtksRSVP23Eygyz5NXBAvO71vlAmCFw MmBpcjABkptBsQBR5wCCK0M8fZt8oh21MiD70TFuAaAyrC2n+fhiGTJvZzqtqp47 x6/WoNidyM8RKkPCmfeYr5h2K4d1bJ34rCO3N6KYqw== -----END EC PRIVATE KEY----- image-4.0.1/oci/layout/fixtures/accepted_certs/gencert.sh000077500000000000000000000012321354546467100235110ustar00rootroot00000000000000#!/bin/bash -e config=$(mktemp -t) if test -z "$config" ; then echo error creating temporary file for configuration exit 1 fi trap 'rm -f "$config"' EXIT cat > "$config" << EOF [req] prompt=no distinguished_name=dn x509_extensions=extensions [extensions] keyUsage=critical,digitalSignature,keyEncipherment,keyCertSign extendedKeyUsage=serverAuth,clientAuth basicConstraints=critical,CA:TRUE subjectAltName=DNS:localhost,email:a@a.com [dn] O=Acme Co EOF serial=$(dd if=/dev/random bs=1 count=16 status=none | hexdump -e '"%x1"') openssl req -new -set_serial 0x"$serial" -x509 -sha512 -days 365 -key cert.key -config "$config" -out cert.cert cp cert.cert cacert.crt image-4.0.1/oci/layout/fixtures/manifest/000077500000000000000000000000001354546467100203635ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/manifest/index.json000066400000000000000000000004501354546467100223640ustar00rootroot00000000000000{"schemaVersion":2,"manifests":[{"mediaType":"application/vnd.oci.image.manifest.v1+json","digest":"sha256:84afb6189c4d69f2d040c5f1dc4e0a16fed9b539ce9cfb4ac2526ae4e0576cc0","size":496,"annotations":{"org.opencontainers.image.ref.name":"v0.1.1"},"platform":{"architecture":"amd64","os":"linux"}}]}image-4.0.1/oci/layout/fixtures/rejected_certs/000077500000000000000000000000001354546467100215425ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/rejected_certs/cert.cert000066400000000000000000000014061354546467100233570ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICDTCCAW6gAwIBAgISALRsBzQesV5B7NE+qxFDKZThMAoGCCqGSM49BAMEMBIx EDAOBgNVBAoMB0FjbWUgQ28wHhcNMTkwOTI2MTQ1ODEyWhcNMjAwOTI1MTQ1ODEy WjASMRAwDgYDVQQKDAdBY21lIENvMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQB R4+JF/uRFgu6KK2o4H1iRw7PU8dh9lY788y67iZ5s3Ovk1TCg16GqsQcr8VCeh1x FVbYWII3pqBhd9slm81fg+cBk5Yd+yCljqR1Shi5+r0O5BWhVE5euZdyBs6Rnfgg 5sa09hmFXS45q0SDTW8uSQu35bAqr+hT5Ww/6gPUvGt1Oa+jYTBfMA4GA1UdDwEB /wQEAwICpDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/ BAUwAwEB/zAdBgNVHREEFjAUgglsb2NhbGhvc3SBB2JAYi5jb20wCgYIKoZIzj0E AwQDgYwAMIGIAkIBUUd2kTZFsFzWR7OwfB39yHlvjQ8r1zLkgG7RDROvz9XMe6sA 7jZiYhJV7kHHk51Bs1ik8c8D0fSyVTjQXFdN2kgCQgG2iH93yNiKMjgnkvuK2PA3 ezvP8LSQLkGsb2KkEPDdrTSXAMf6qWi4ZnPEnvLN525MkxYp6yDLFAfoUgBx0ciZ bg== -----END CERTIFICATE----- image-4.0.1/oci/layout/fixtures/rejected_certs/cert.key000066400000000000000000000005551354546467100232160ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MIHcAgEBBEIB3BPUEOohwxGCV8V2fwIBdZ3S7yWADrbz5w17YITBt0p6j1C0NKRx xL9V7Cq+P2OkfQa6rxiD7cM8DjP/6y1//XKgBwYFK4EEACOhgYkDgYYABAFHj4kX +5EWC7oorajgfWJHDs9Tx2H2VjvzzLruJnmzc6+TVMKDXoaqxByvxUJ6HXEVVthY gjemoGF32yWbzV+D5wGTlh37IKWOpHVKGLn6vQ7kFaFUTl65l3IGzpGd+CDmxrT2 GYVdLjmrRINNby5JC7flsCqv6FPlbD/qA9S8a3U5rw== -----END EC PRIVATE KEY----- image-4.0.1/oci/layout/fixtures/rejected_certs/gencert.sh000077500000000000000000000012021354546467100235230ustar00rootroot00000000000000#!/bin/bash -e config=$(mktemp -t) if test -z "$config" ; then echo error creating temporary file for configuration exit 1 fi trap 'rm -f "$config"' EXIT cat > "$config" << EOF [req] prompt=no distinguished_name=dn x509_extensions=extensions [extensions] keyUsage=critical,digitalSignature,keyEncipherment,keyCertSign extendedKeyUsage=serverAuth,clientAuth basicConstraints=critical,CA:TRUE subjectAltName=DNS:localhost,email:b@b.com [dn] O=Acme Co EOF serial=$(dd if=/dev/random bs=1 count=16 status=none | hexdump -e '"%x1"') openssl req -new -set_serial 0x"$serial" -x509 -sha512 -days 365 -key cert.key -config "$config" -out cert.cert image-4.0.1/oci/layout/fixtures/two_images_manifest/000077500000000000000000000000001354546467100226015ustar00rootroot00000000000000image-4.0.1/oci/layout/fixtures/two_images_manifest/index.json000066400000000000000000000013241354546467100246030ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", "manifests": [ { "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7143, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", "platform": { "architecture": "ppc64le", "os": "linux" } }, { "mediaType": "application/vnd.docker.image.manifest.v2+json", "size": 7682, "digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270", "platform": { "architecture": "amd64", "os": "linux", "features": [ "sse4" ] } } ] } image-4.0.1/oci/layout/oci_dest.go000066400000000000000000000257671354546467100170450ustar00rootroot00000000000000package layout import ( "context" "encoding/json" "io" "io/ioutil" "os" "path/filepath" "runtime" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type ociImageDestination struct { ref ociReference index imgspecv1.Index sharedBlobDir string acceptUncompressedLayers bool } // newImageDestination returns an ImageDestination for writing to an existing directory. func newImageDestination(sys *types.SystemContext, ref ociReference) (types.ImageDestination, error) { var index *imgspecv1.Index if indexExists(ref) { var err error index, err = ref.getIndex() if err != nil { return nil, err } } else { index = &imgspecv1.Index{ Versioned: imgspec.Versioned{ SchemaVersion: 2, }, } } d := &ociImageDestination{ref: ref, index: *index} if sys != nil { d.sharedBlobDir = sys.OCISharedBlobDirPath d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers } if err := ensureDirectoryExists(d.ref.dir); err != nil { return nil, err } // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, // but it MAY be empty (e.g. if we never end up calling PutBlob) // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { return nil, err } return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *ociImageDestination) Reference() types.ImageReference { return d.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *ociImageDestination) Close() error { return nil } func (d *ociImageDestination) SupportedManifestMIMETypes() []string { return []string{ imgspecv1.MediaTypeImageManifest, } } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error { return errors.Errorf("Pushing signatures for OCI images is not supported") } func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression { if d.acceptUncompressedLayers { return types.PreserveOriginal } return types.Compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { return true } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *ociImageDestination) MustMatchRuntimeOS() bool { return false } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ociImageDestination) HasThreadSafePutBlob() bool { return false } // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } succeeded := false explicitClosed := false defer func() { if !explicitClosed { blobFile.Close() } if !succeeded { os.Remove(blobFile.Name()) } }() digester := digest.Canonical.Digester() tee := io.TeeReader(stream, digester.Hash()) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, tee) if err != nil { return types.BlobInfo{}, err } computedDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. // On Windows, the “permissions of newly created files” argument to syscall.Open is // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, // always fails on Windows. if runtime.GOOS != "windows" { if err := blobFile.Chmod(0644); err != nil { return types.BlobInfo{}, err } } blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) if err != nil { return types.BlobInfo{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { return types.BlobInfo{}, err } // need to explicitly close the file, since a rename won't otherwise not work on Windows blobFile.Close() explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { return types.BlobInfo{}, err } succeeded = true return types.BlobInfo{Digest: computedDigest, Size: size}, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if info.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`) } blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { return false, types.BlobInfo{}, err } finfo, err := os.Stat(blobPath) if err != nil && os.IsNotExist(err) { return false, types.BlobInfo{}, nil } if err != nil { return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte) error { digest, err := manifest.Digest(m) if err != nil { return err } desc := imgspecv1.Descriptor{} desc.Digest = digest // TODO(runcom): beaware and add support for OCI manifest list desc.MediaType = imgspecv1.MediaTypeImageManifest desc.Size = int64(len(m)) blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) if err != nil { return err } if err := ensureParentDirectoryExists(blobPath); err != nil { return err } if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { return err } if d.ref.image != "" { annotations := make(map[string]string) annotations["org.opencontainers.image.ref.name"] = d.ref.image desc.Annotations = annotations } desc.Platform = &imgspecv1.Platform{ Architecture: runtime.GOARCH, OS: runtime.GOOS, } d.addManifest(&desc) return nil } func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { for i, manifest := range d.index.Manifests { if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { // TODO Should there first be a cleanup based on the descriptor we are going to replace? d.index.Manifests[i] = *desc return } } d.index.Manifests = append(d.index.Manifests, *desc) } func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { if len(signatures) != 0 { return errors.Errorf("Pushing signatures for OCI images is not supported") } return nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *ociImageDestination) Commit(ctx context.Context) error { if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { return err } indexJSON, err := json.Marshal(d.index) if err != nil { return err } return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) } func ensureDirectoryExists(path string) error { if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(path, 0755); err != nil { return err } } return nil } // ensureParentDirectoryExists ensures the parent of the supplied path exists. func ensureParentDirectoryExists(path string) error { return ensureDirectoryExists(filepath.Dir(path)) } // indexExists checks whether the index location specified in the OCI reference exists. // The implementation is opinionated, since in case of unexpected errors false is returned func indexExists(ref ociReference) bool { _, err := os.Stat(ref.indexPath()) if err == nil { return true } if os.IsNotExist(err) { return false } return true } image-4.0.1/oci/layout/oci_dest_test.go000066400000000000000000000071211354546467100200640ustar00rootroot00000000000000package layout import ( "context" "os" "path/filepath" "testing" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // readerFromFunc allows implementing Reader by any function, e.g. a closure. type readerFromFunc func([]byte) (int, error) func (fn readerFromFunc) Read(p []byte) (int, error) { return fn(p) } // TestPutBlobDigestFailure simulates behavior on digest verification failure. func TestPutBlobDigestFailure(t *testing.T) { const digestErrorString = "Simulated digest error" const blobDigest = "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) dirRef, ok := ref.(ociReference) require.True(t, ok) blobPath, err := dirRef.blobPath(blobDigest, "") assert.NoError(t, err) cache := memory.New() firstRead := true reader := readerFromFunc(func(p []byte) (int, error) { _, err := os.Lstat(blobPath) require.Error(t, err) require.True(t, os.IsNotExist(err)) if firstRead { if len(p) > 0 { firstRead = false } for i := 0; i < len(p); i++ { p[i] = 0xAA } return len(p), nil } return 0, errors.Errorf(digestErrorString) }) dest, err := ref.NewImageDestination(context.Background(), nil) require.NoError(t, err) defer dest.Close() _, err = dest.PutBlob(context.Background(), reader, types.BlobInfo{Digest: blobDigest, Size: -1}, cache, false) assert.Error(t, err) assert.Contains(t, digestErrorString, err.Error()) err = dest.Commit(context.Background()) assert.NoError(t, err) _, err = os.Lstat(blobPath) require.Error(t, err) require.True(t, os.IsNotExist(err)) } // TestPutManifestAppendsToExistingManifest tests that new manifests are getting added to existing index. func TestPutManifestAppendsToExistingManifest(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) // iniitally we have one manifest index, err := ociRef.getIndex() assert.NoError(t, err) assert.Equal(t, 1, len(index.Manifests), "Unexpected number of manifests") // create a new test reference ociRef2, err := NewReference(tmpDir, "new-image") assert.NoError(t, err) putTestManifest(t, ociRef2.(ociReference), tmpDir) index, err = ociRef.getIndex() assert.NoError(t, err) assert.Equal(t, 2, len(index.Manifests), "Unexpected number of manifests") } // TestPutManifestTwice tests that existing manifest gets updated and not appended. func TestPutManifestTwice(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) putTestManifest(t, ociRef, tmpDir) putTestManifest(t, ociRef, tmpDir) index, err := ociRef.getIndex() assert.NoError(t, err) assert.Equal(t, 1, len(index.Manifests), "Unexpected number of manifests") } func putTestManifest(t *testing.T, ociRef ociReference, tmpDir string) { imageDest, err := newImageDestination(nil, ociRef) assert.NoError(t, err) data := []byte("abc") err = imageDest.PutManifest(context.Background(), data) assert.NoError(t, err) err = imageDest.Commit(context.Background()) assert.NoError(t, err) paths := []string{} filepath.Walk(tmpDir, func(path string, info os.FileInfo, err error) error { paths = append(paths, path) return nil }) digest := "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" assert.Contains(t, paths, filepath.Join(tmpDir, "blobs", "sha256", digest), "The OCI directory does not contain the new manifest data") } image-4.0.1/oci/layout/oci_src.go000066400000000000000000000130101354546467100166470ustar00rootroot00000000000000package layout import ( "context" "io" "io/ioutil" "net/http" "os" "strconv" "github.com/containers/image/v4/pkg/tlsclientconfig" "github.com/containers/image/v4/types" "github.com/docker/go-connections/tlsconfig" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type ociImageSource struct { ref ociReference descriptor imgspecv1.Descriptor client *http.Client sharedBlobDir string } // newImageSource returns an ImageSource for reading from an existing directory. func newImageSource(sys *types.SystemContext, ref ociReference) (types.ImageSource, error) { tr := tlsclientconfig.NewTransport() tr.TLSClientConfig = tlsconfig.ServerDefault() if sys != nil && sys.OCICertPath != "" { if err := tlsclientconfig.SetupCertificates(sys.OCICertPath, tr.TLSClientConfig); err != nil { return nil, err } tr.TLSClientConfig.InsecureSkipVerify = sys.OCIInsecureSkipTLSVerify } client := &http.Client{} client.Transport = tr descriptor, err := ref.getManifestDescriptor() if err != nil { return nil, err } d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} if sys != nil { // TODO(jonboulle): check dir existence? d.sharedBlobDir = sys.OCISharedBlobDirPath } return d, nil } // Reference returns the reference used to set up this source. func (s *ociImageSource) Reference() types.ImageReference { return s.ref } // Close removes resources associated with an initialized ImageSource, if any. func (s *ociImageSource) Close() error { return nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { var dig digest.Digest var mimeType string if instanceDigest == nil { dig = digest.Digest(s.descriptor.Digest) mimeType = s.descriptor.MediaType } else { dig = *instanceDigest // XXX: instanceDigest means that we don't immediately have the context of what // mediaType the manifest has. In OCI this means that we don't know // what reference it came from, so we just *assume* that its // MediaTypeImageManifest. // FIXME: We should actually be able to look up the manifest in the index, // and see the MIME type there. mimeType = imgspecv1.MediaTypeImageManifest } manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) if err != nil { return nil, "", err } m, err := ioutil.ReadFile(manifestPath) if err != nil { return nil, "", err } return m, mimeType, nil } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *ociImageSource) HasThreadSafeGetBlob() bool { return false } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if len(info.URLs) != 0 { return s.getExternalBlob(ctx, info.URLs) } path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) if err != nil { return nil, 0, err } r, err := os.Open(path) if err != nil { return nil, 0, err } fi, err := r.Stat() if err != nil { return nil, 0, err } return r, fi.Size(), nil } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { return [][]byte{}, nil } func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { errWrap := errors.New("failed fetching external blob from all urls") for _, url := range urls { req, err := http.NewRequest("GET", url, nil) if err != nil { errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) continue } resp, err := s.client.Do(req.WithContext(ctx)) if err != nil { errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) continue } if resp.StatusCode != http.StatusOK { resp.Body.Close() errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) continue } return resp.Body, getBlobSize(resp), nil } return nil, 0, errWrap } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (s *ociImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { size = -1 } return size } image-4.0.1/oci/layout/oci_src_test.go000066400000000000000000000070251354546467100177170ustar00rootroot00000000000000package layout import ( "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const RemoteLayerContent = "This is the remote layer content" var httpServerAddr string func TestMain(m *testing.M) { httpServer, err := startRemoteLayerServer() if err != nil { println("Error starting test TLS server", err.Error()) os.Exit(1) } httpServerAddr = strings.Replace(httpServer.URL, "127.0.0.1", "localhost", 1) code := m.Run() httpServer.Close() os.Exit(code) } func TestGetBlobForRemoteLayers(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprintln(w, "Hello world") })) defer ts.Close() cache := memory.New() imageSource := createImageSource(t, &types.SystemContext{}) layerInfo := types.BlobInfo{ Digest: digest.FromBytes([]byte("Hello world")), Size: -1, URLs: []string{ "brokenurl", ts.URL, }, } reader, _, err := imageSource.GetBlob(context.Background(), layerInfo, cache) require.NoError(t, err) defer reader.Close() data, err := ioutil.ReadAll(reader) require.NoError(t, err) assert.Contains(t, string(data), "Hello world") } func TestGetBlobForRemoteLayersWithTLS(t *testing.T) { imageSource := createImageSource(t, &types.SystemContext{ OCICertPath: "fixtures/accepted_certs", }) cache := memory.New() layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{ URLs: []string{httpServerAddr}, }, cache) require.NoError(t, err) layerContent, _ := ioutil.ReadAll(layer) assert.Equal(t, RemoteLayerContent, string(layerContent)) assert.Equal(t, int64(len(RemoteLayerContent)), size) } func TestGetBlobForRemoteLayersOnTLSFailure(t *testing.T) { imageSource := createImageSource(t, &types.SystemContext{ OCICertPath: "fixtures/rejected_certs", }) cache := memory.New() layer, size, err := imageSource.GetBlob(context.Background(), types.BlobInfo{ URLs: []string{httpServerAddr}, }, cache) require.Error(t, err) assert.Nil(t, layer) assert.Equal(t, int64(0), size) } func remoteLayerContent(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, RemoteLayerContent) } func startRemoteLayerServer() (*httptest.Server, error) { certBytes, err := ioutil.ReadFile("fixtures/accepted_certs/cert.cert") if err != nil { return nil, err } clientCertPool := x509.NewCertPool() if !clientCertPool.AppendCertsFromPEM(certBytes) { return nil, fmt.Errorf("Could not append certificate") } cert, err := tls.LoadX509KeyPair("fixtures/accepted_certs/cert.cert", "fixtures/accepted_certs/cert.key") if err != nil { return nil, err } tlsConfig := &tls.Config{ // Reject any TLS certificate that cannot be validated ClientAuth: tls.RequireAndVerifyClientCert, // Ensure that we only use our "CA" to validate certificates ClientCAs: clientCertPool, Certificates: []tls.Certificate{cert}, } httpServer := httptest.NewUnstartedServer(http.HandlerFunc(remoteLayerContent)) httpServer.TLS = tlsConfig httpServer.StartTLS() return httpServer, nil } func createImageSource(t *testing.T, sys *types.SystemContext) types.ImageSource { imageRef, err := NewReference("fixtures/manifest", "") require.NoError(t, err) imageSource, err := imageRef.NewImageSource(context.Background(), sys) require.NoError(t, err) return imageSource } image-4.0.1/oci/layout/oci_transport.go000066400000000000000000000254331354546467100201300ustar00rootroot00000000000000package layout import ( "context" "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/containers/image/v4/directory/explicitfilepath" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/oci/internal" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } var ( // Transport is an ImageTransport for OCI directories. Transport = ociTransport{} // ErrMoreThanOneImage is an error returned when the manifest includes // more than one image and the user should choose which one to use. ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image") ) type ociTransport struct{} func (t ociTransport) Name() string { return "oci" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { return ParseReference(reference) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { return internal.ValidateScope(scope) } // ociReference is an ImageReference for OCI directory paths. type ociReference struct { // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid // being exposed to symlinks and renames in the parent directories to the working directory). // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) dir string // As specified by the user. May be relative, contain symlinks, etc. resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. // If image=="", it means the "only image" in the index.json is used in the case it is a source // for destinations, the image name annotation "image.ref.name" is not added to the index.json image string } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { dir, image := internal.SplitPathAndImage(reference) return NewReference(dir, image) } // NewReference returns an OCI reference for a directory and a image. // // We do not expose an API supplying the resolvedDir; we could, but recomputing it // is generally cheap enough that we prefer being confident about the properties of resolvedDir. func NewReference(dir, image string) (types.ImageReference, error) { resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) if err != nil { return nil, err } if err := internal.ValidateOCIPath(dir); err != nil { return nil, err } if err = internal.ValidateImageName(image); err != nil { return nil, err } return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil } func (ref ociReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ociReference) StringWithinTransport() string { return fmt.Sprintf("%s:%s", ref.dir, ref.image) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref ociReference) DockerReference() reference.Named { return nil } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref ociReference) PolicyConfigurationIdentity() string { // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the // same image and the two can’t be statically disambiguated. Using at least the repository directory is // less granular but hopefully still useful. return fmt.Sprintf("%s", ref.resolvedDir) } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref ociReference) PolicyConfigurationNamespaces() []string { res := []string{} path := ref.resolvedDir for { lastSlash := strings.LastIndex(path, "/") // Note that we do not include "/"; it is redundant with the default "" global default, // and rejected by ociTransport.ValidatePolicyConfigurationScope above. if lastSlash == -1 || path == "/" { break } res = append(res, path) path = path[:lastSlash] } return res } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(sys, ref) if err != nil { return nil, err } return image.FromSource(ctx, sys, src) } // getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together // with an error. func (ref ociReference) getIndex() (*imgspecv1.Index, error) { indexJSON, err := os.Open(ref.indexPath()) if err != nil { return nil, err } defer indexJSON.Close() index := &imgspecv1.Index{} if err := json.NewDecoder(indexJSON).Decode(index); err != nil { return nil, err } return index, nil } func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { index, err := ref.getIndex() if err != nil { return imgspecv1.Descriptor{}, err } var d *imgspecv1.Descriptor if ref.image == "" { // return manifest if only one image is in the oci directory if len(index.Manifests) == 1 { d = &index.Manifests[0] } else { // ask user to choose image when more than one image in the oci directory return imgspecv1.Descriptor{}, ErrMoreThanOneImage } } else { // if image specified, look through all manifests for a match for _, md := range index.Manifests { if md.MediaType != imgspecv1.MediaTypeImageManifest { continue } refName, ok := md.Annotations["org.opencontainers.image.ref.name"] if !ok { continue } if refName == ref.image { d = &md break } } } if d == nil { return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) } return *d, nil } // LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name // when pulling an image func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { ociRef, ok := imgRef.(ociReference) if !ok { return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") } return ociRef.getManifestDescriptor() } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref ociReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(sys, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(sys, ref) } // DeleteImage deletes the named image from the registry, if supported. func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { return errors.Errorf("Deleting images not implemented for oci: images") } // ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. func (ref ociReference) ociLayoutPath() string { return filepath.Join(ref.dir, "oci-layout") } // indexPath returns a path for the index.json within a directory using OCI conventions. func (ref ociReference) indexPath() string { return filepath.Join(ref.dir, "index.json") } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { if err := digest.Validate(); err != nil { return "", errors.Wrapf(err, "unexpected digest reference %s", digest) } blobDir := filepath.Join(ref.dir, "blobs") if sharedBlobDir != "" { blobDir = sharedBlobDir } return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } image-4.0.1/oci/layout/oci_transport_test.go000066400000000000000000000223171354546467100211650ustar00rootroot00000000000000package layout import ( "context" "io/ioutil" "os" "path/filepath" "testing" _ "github.com/containers/image/v4/internal/testing/explicitfilepath-tmpdir" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestGetManifestDescriptor is testing a regression issue where a nil error was being wrapped, // this causes the returned error to be nil as well and the user wasn't getting a proper error output. // // More info: https://github.com/containers/skopeo/issues/496 func TestGetManifestDescriptor(t *testing.T) { imageRef, err := NewReference("fixtures/two_images_manifest", "") require.NoError(t, err) _, err = imageRef.(ociReference).getManifestDescriptor() assert.EqualError(t, err, ErrMoreThanOneImage.Error()) } func TestTransportName(t *testing.T) { assert.Equal(t, "oci", Transport.Name()) } func TestTransportParseReference(t *testing.T) { testParseReference(t, Transport.ParseReference) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ "/etc", "/this/does/not/exist", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } for _, scope := range []string{ "relative/path", "/", "/double//slashes", "/has/./dot", "/has/dot/../dot", "/trailing/slash/", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } func TestParseReference(t *testing.T) { testParseReference(t, ParseReference) } // testParseReference is a test shared for Transport.ParseReference and ParseReference. func testParseReference(t *testing.T, fn func(string) (types.ImageReference, error)) { tmpDir, err := ioutil.TempDir("", "oci-transport-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, path := range []string{ "/", "/etc", tmpDir, "relativepath", tmpDir + "/thisdoesnotexist", } { for _, image := range []struct{ suffix, image string }{ {":notlatest:image", "notlatest:image"}, {":latestimage", "latestimage"}, {":", ""}, {"", ""}, } { input := path + image.suffix ref, err := fn(input) require.NoError(t, err, input) ociRef, ok := ref.(ociReference) require.True(t, ok) assert.Equal(t, path, ociRef.dir, input) assert.Equal(t, image.image, ociRef.image, input) } } _, err = fn(tmpDir + ":invalid'image!value@") assert.Error(t, err) } func TestNewReference(t *testing.T) { const ( imageValue = "imageValue" noImageValue = "" ) tmpDir, err := ioutil.TempDir("", "oci-transport-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) ref, err := NewReference(tmpDir, imageValue) require.NoError(t, err) ociRef, ok := ref.(ociReference) require.True(t, ok) assert.Equal(t, tmpDir, ociRef.dir) assert.Equal(t, imageValue, ociRef.image) ref, err = NewReference(tmpDir, noImageValue) require.NoError(t, err) ociRef, ok = ref.(ociReference) require.True(t, ok) assert.Equal(t, tmpDir, ociRef.dir) assert.Equal(t, noImageValue, ociRef.image) _, err = NewReference(tmpDir+"/thisparentdoesnotexist/something", imageValue) assert.Error(t, err) _, err = NewReference(tmpDir, "invalid'image!value@") assert.Error(t, err) _, err = NewReference(tmpDir+"/has:colon", imageValue) assert.Error(t, err) } // refToTempOCI creates a temporary directory and returns an reference to it. // The caller should // defer os.RemoveAll(tmpDir) func refToTempOCI(t *testing.T) (ref types.ImageReference, tmpDir string) { tmpDir, err := ioutil.TempDir("", "oci-transport-test") require.NoError(t, err) m := `{ "schemaVersion": 2, "manifests": [ { "mediaType": "application/vnd.oci.image.manifest.v1+json", "size": 7143, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f", "platform": { "architecture": "ppc64le", "os": "linux" }, "annotations": { "org.opencontainers.image.ref.name": "imageValue" } } ] } ` ioutil.WriteFile(filepath.Join(tmpDir, "index.json"), []byte(m), 0644) ref, err = NewReference(tmpDir, "imageValue") require.NoError(t, err) return ref, tmpDir } func TestReferenceTransport(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { tmpDir, err := ioutil.TempDir("", "oci-transport-test") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range []struct{ input, result string }{ {"/dir1:notlatest:notlatest", "/dir1:notlatest:notlatest"}, // Explicit image {"/dir3:", "/dir3:"}, // No image } { ref, err := ParseReference(tmpDir + c.input) require.NoError(t, err, c.input) stringRef := ref.StringWithinTransport() assert.Equal(t, tmpDir+c.result, stringRef, c.input) // Do one more round to verify that the output can be parsed, to an equal value. ref2, err := Transport.ParseReference(stringRef) require.NoError(t, err, c.input) stringRef2 := ref2.StringWithinTransport() assert.Equal(t, stringRef, stringRef2, c.input) } } func TestReferenceDockerReference(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) assert.Nil(t, ref.DockerReference()) } func TestReferencePolicyConfigurationIdentity(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) // A non-canonical path. Test just one, the various other cases are // tested in explicitfilepath.ResolvePathToFullyExplicit. ref, err := NewReference(tmpDir+"/.", "image2") require.NoError(t, err) assert.Equal(t, tmpDir, ref.PolicyConfigurationIdentity()) // "/" as a corner case. ref, err = NewReference("/", "image3") require.NoError(t, err) assert.Equal(t, "/", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) // We don't really know enough to make a full equality test here. ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns) assert.True(t, len(ns) >= 2) assert.Equal(t, tmpDir, ns[0]) assert.Equal(t, filepath.Dir(tmpDir), ns[1]) // Test with a known path which should exist. Test just one non-canonical // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. // // It would be nice to test a deeper hierarchy, but it is not obvious what // deeper path is always available in the various distros, AND is not likely // to contains a symbolic link. for _, path := range []string{"/usr/share", "/usr/share/./."} { _, err := os.Lstat(path) require.NoError(t, err) ref, err := NewReference(path, "someimage") require.NoError(t, err) ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns) assert.Equal(t, []string{"/usr/share", "/usr"}, ns) } // "/" as a corner case. ref, err := NewReference("/", "image3") require.NoError(t, err) assert.Equal(t, []string{}, ref.PolicyConfigurationNamespaces()) } func TestReferenceNewImage(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) _, err := ref.NewImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceNewImageSource(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) _, err := ref.NewImageSource(context.Background(), nil) assert.NoError(t, err) } func TestReferenceNewImageDestination(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) dest, err := ref.NewImageDestination(context.Background(), nil) assert.NoError(t, err) defer dest.Close() } func TestReferenceDeleteImage(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) err := ref.DeleteImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceOCILayoutPath(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) assert.Equal(t, tmpDir+"/oci-layout", ociRef.ociLayoutPath()) } func TestReferenceIndexPath(t *testing.T) { ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) assert.Equal(t, tmpDir+"/index.json", ociRef.indexPath()) } func TestReferenceBlobPath(t *testing.T) { const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) bp, err := ociRef.blobPath("sha256:"+hex, "") assert.NoError(t, err) assert.Equal(t, tmpDir+"/blobs/sha256/"+hex, bp) } func TestReferenceSharedBlobPathShared(t *testing.T) { const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) bp, err := ociRef.blobPath("sha256:"+hex, "/external/path") assert.NoError(t, err) assert.Equal(t, "/external/path/sha256/"+hex, bp) } func TestReferenceBlobPathInvalid(t *testing.T) { const hex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" ref, tmpDir := refToTempOCI(t) defer os.RemoveAll(tmpDir) ociRef, ok := ref.(ociReference) require.True(t, ok) _, err := ociRef.blobPath(hex, "") assert.Error(t, err) assert.Contains(t, err.Error(), "unexpected digest reference "+hex) } image-4.0.1/oci/oci.go000066400000000000000000000000141354546467100144630ustar00rootroot00000000000000package oci image-4.0.1/openshift/000077500000000000000000000000001354546467100146145ustar00rootroot00000000000000image-4.0.1/openshift/openshift-copies.go000066400000000000000000001324171354546467100204320ustar00rootroot00000000000000package openshift import ( "crypto/tls" "crypto/x509" "encoding/json" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "path" "path/filepath" "reflect" "strings" "time" "github.com/ghodss/yaml" "github.com/imdario/mergo" "github.com/pkg/errors" "golang.org/x/net/http2" "k8s.io/client-go/util/homedir" ) // restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. // restTLSClientConfig contains settings to enable transport layer security type restTLSClientConfig struct { // Server requires TLS client certificate authentication CertFile string // Server requires TLS client certificate authentication KeyFile string // Trusted root certificates for server CAFile string // CertData holds PEM-encoded bytes (typically read from a client certificate file). // CertData takes precedence over CertFile CertData []byte // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). // KeyData takes precedence over KeyFile KeyData []byte // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). // CAData takes precedence over CAFile CAData []byte } // restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. // Config holds the common attributes that can be passed to a Kubernetes client on // initialization. type restConfig struct { // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. // If a URL is given then the (optional) Path of that URL represents a prefix that must // be appended to all request URIs used to access the apiserver. This allows a frontend // proxy to easily relocate all of the apiserver endpoints. Host string // Server requires Basic authentication Username string Password string // Server requires Bearer authentication. This client will not attempt to use // refresh tokens for an OAuth2 flow. // TODO: demonstrate an OAuth2 compatible client. BearerToken string // TLSClientConfig contains settings to enable transport layer security restTLSClientConfig // Server should be accessed without verifying the TLS // certificate. For testing only. Insecure bool } // ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. // ClientConfig is used to make it easy to get an api server client type clientConfig interface { // ClientConfig returns a complete client config ClientConfig() (*restConfig, error) } // defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. func defaultClientConfig() clientConfig { loadingRules := newOpenShiftClientConfigLoadingRules() // REMOVED: Allowing command-line overriding of loadingRules // REMOVED: clientcmd.ConfigOverrides clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) return clientConfig } var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") // newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. // NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. // 1. --config value // 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { chain := []string{} envVarFile := os.Getenv("KUBECONFIG") if len(envVarFile) != 0 { chain = append(chain, filepath.SplitList(envVarFile)...) } else { chain = append(chain, recommendedHomeFile) } return &clientConfigLoadingRules{ Precedence: chain, // REMOVED: Migration support; run (oc login) to trigger migration } } // deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. // DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules // It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that // the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before // the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid // passing extraneous information down a call stack type deferredLoadingClientConfig struct { loadingRules *clientConfigLoadingRules clientConfig clientConfig } // NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. // NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { return &deferredLoadingClientConfig{loadingRules: loadingRules} } func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { if config.clientConfig == nil { // REMOVED: Support for concurrent use in multiple threads. mergedConfig, err := config.loadingRules.Load() if err != nil { return nil, err } var mergedClientConfig clientConfig // REMOVED: Interactive fallback support. mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) config.clientConfig = mergedClientConfig } return config.clientConfig, nil } // ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. // ClientConfig implements ClientConfig func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { mergedClientConfig, err := config.createClientConfig() if err != nil { return nil, err } mergedConfig, err := mergedClientConfig.ClientConfig() if err != nil { return nil, err } // REMOVED: In-cluster service account configuration use. return mergedConfig, nil } var ( // DefaultCluster is the cluster config used when no other config is specified // TODO: eventually apiserver should start on 443 and be secure by default defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} ) // directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. // DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information type directClientConfig struct { config clientcmdConfig } // newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. // NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { return &directClientConfig{config} } // ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. // ClientConfig implements ClientConfig func (config *directClientConfig) ClientConfig() (*restConfig, error) { if err := config.ConfirmUsable(); err != nil { return nil, err } configAuthInfo := config.getAuthInfo() configClusterInfo := config.getCluster() clientConfig := &restConfig{} clientConfig.Host = configClusterInfo.Server if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { u.RawQuery = "" u.Fragment = "" clientConfig.Host = u.String() } // only try to read the auth information if we are secure if isConfigTransportTLS(*clientConfig) { var err error // REMOVED: Support for interactive fallback. userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) if err != nil { return nil, err } mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) if err != nil { return nil, err } mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) } return clientConfig, nil } // getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. // clientauth.Info object contain both user identification and server identification. We want different precedence orders for // both, so we have to split the objects and merge them separately // we want this order of precedence for the server identification // 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. load the ~/.kubernetes_auth file as a default func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { mergedConfig := &restConfig{} // configClusterInfo holds the information identify the server provided by .kubeconfig configClientConfig := &restConfig{} configClientConfig.CAFile = configClusterInfo.CertificateAuthority configClientConfig.CAData = configClusterInfo.CertificateAuthorityData configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify mergo.MergeWithOverwrite(mergedConfig, configClientConfig) return mergedConfig, nil } // getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. // clientauth.Info object contain both user identification and server identification. We want different precedence orders for // both, so we have to split the objects and merge them separately // we want this order of precedence for user identifcation // 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) // 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) // 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file // 4. if there is not enough information to identify the user, prompt if possible func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { mergedConfig := &restConfig{} // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { mergedConfig.CertFile = configAuthInfo.ClientCertificate mergedConfig.CertData = configAuthInfo.ClientCertificateData mergedConfig.KeyFile = configAuthInfo.ClientKey mergedConfig.KeyData = configAuthInfo.ClientKeyData } if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { mergedConfig.Username = configAuthInfo.Username mergedConfig.Password = configAuthInfo.Password } // REMOVED: prompting for missing information. return mergedConfig, nil } // canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser func canIdentifyUser(config restConfig) bool { return len(config.Username) > 0 || (len(config.CertFile) > 0 || len(config.CertData) > 0) || len(config.BearerToken) > 0 } // ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. // ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, // but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. func (config *directClientConfig) ConfirmUsable() error { var validationErrors []error validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) // when direct client config is specified, and our only error is that no server is defined, we should // return a standard "no config" error if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { return newErrConfigurationInvalid([]error{errEmptyConfig}) } return newErrConfigurationInvalid(validationErrors) } // getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. func (config *directClientConfig) getContextName() string { // REMOVED: overrides support return config.config.CurrentContext } // getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. func (config *directClientConfig) getAuthInfoName() string { // REMOVED: overrides support return config.getContext().AuthInfo } // getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. func (config *directClientConfig) getClusterName() string { // REMOVED: overrides support return config.getContext().Cluster } // getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. func (config *directClientConfig) getContext() clientcmdContext { contexts := config.config.Contexts contextName := config.getContextName() var mergedContext clientcmdContext if configContext, exists := contexts[contextName]; exists { mergo.MergeWithOverwrite(&mergedContext, configContext) } // REMOVED: overrides support return mergedContext } var ( errEmptyConfig = errors.New("no configuration has been provided") // message is for consistency with old behavior errEmptyCluster = errors.New("cluster has no server defined") ) // validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. // validateClusterInfo looks for conflicts and errors in the cluster info func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { var validationErrors []error if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { return []error{errEmptyCluster} } if len(clusterInfo.Server) == 0 { if len(clusterName) == 0 { validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) } else { validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) } } // Make sure CA data and CA file aren't both specified if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) } if len(clusterInfo.CertificateAuthority) != 0 { clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) defer clientCertCA.Close() if err != nil { validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) } } return validationErrors } // validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. // validateAuthInfo looks for conflicts and errors in the auth info func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { var validationErrors []error usingAuthPath := false methods := make([]string, 0, 3) if len(authInfo.Token) != 0 { methods = append(methods, "token") } if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { methods = append(methods, "basicAuth") } if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { // Make sure cert data and file aren't both specified if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) } // Make sure key data and file aren't both specified if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) } // Make sure a key is specified if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) } if len(authInfo.ClientCertificate) != 0 { clientCertFile, err := os.Open(authInfo.ClientCertificate) defer clientCertFile.Close() if err != nil { validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) } } if len(authInfo.ClientKey) != 0 { clientKeyFile, err := os.Open(authInfo.ClientKey) defer clientKeyFile.Close() if err != nil { validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) } } } // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case if (len(methods) > 1) && (!usingAuthPath) { validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) } return validationErrors } // getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { authInfos := config.config.AuthInfos authInfoName := config.getAuthInfoName() var mergedAuthInfo clientcmdAuthInfo if configAuthInfo, exists := authInfos[authInfoName]; exists { mergo.MergeWithOverwrite(&mergedAuthInfo, configAuthInfo) } // REMOVED: overrides support return mergedAuthInfo } // getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. func (config *directClientConfig) getCluster() clientcmdCluster { clusterInfos := config.config.Clusters clusterInfoName := config.getClusterName() var mergedClusterInfo clientcmdCluster mergo.MergeWithOverwrite(&mergedClusterInfo, defaultCluster) mergo.MergeWithOverwrite(&mergedClusterInfo, envVarCluster) if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { mergo.MergeWithOverwrite(&mergedClusterInfo, configClusterInfo) } // REMOVED: overrides support return mergedClusterInfo } // aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. // This helper implements the error and Errors interfaces. Keeping it private // prevents people from making an aggregate of 0 errors, which is not // an error, but does satisfy the error interface. type aggregateErr []error // newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. // NewAggregate converts a slice of errors into an Aggregate interface, which // is itself an implementation of the error interface. If the slice is empty, // this returns nil. // It will check if any of the element of input error list is nil, to avoid // nil pointer panic when call Error(). func newAggregate(errlist []error) error { if len(errlist) == 0 { return nil } // In case of input error list contains nil var errs []error for _, e := range errlist { if e != nil { errs = append(errs, e) } } if len(errs) == 0 { return nil } return aggregateErr(errs) } // Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. // Error is part of the error interface. func (agg aggregateErr) Error() string { if len(agg) == 0 { // This should never happen, really. return "" } if len(agg) == 1 { return agg[0].Error() } result := fmt.Sprintf("[%s", agg[0].Error()) for i := 1; i < len(agg); i++ { result += fmt.Sprintf(", %s", agg[i].Error()) } result += "]" return result } // REMOVED: aggregateErr.Errors // errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. // errConfigurationInvalid is a set of errors indicating the configuration is invalid. type errConfigurationInvalid []error var _ error = errConfigurationInvalid{} // REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. // newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. func newErrConfigurationInvalid(errs []error) error { switch len(errs) { case 0: return nil default: return errConfigurationInvalid(errs) } } // Error implements the error interface func (e errConfigurationInvalid) Error() string { return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) } // clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: // EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath // ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present type clientConfigLoadingRules struct { Precedence []string } // Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load // Load starts by running the MigrationRules and then // takes the loading rules and returns a Config object based on following rules. // if the ExplicitPath, return the unmerged explicit file // Otherwise, return a merged config based on the Precedence slice // A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. // Read errors or files with non-deserializable content produce errors. // The first file to set a particular map key wins and map key's value is never changed. // BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. // This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. // It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even // non-conflicting entries from the second file's "red-user" are discarded. // Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder // and only absolute file paths are returned. func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { errlist := []error{} kubeConfigFiles := []string{} // REMOVED: explicit path support kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) kubeconfigs := []*clientcmdConfig{} // read and cache the config files so that we only look at them once for _, filename := range kubeConfigFiles { if len(filename) == 0 { // no work to do continue } config, err := loadFromFile(filename) if os.IsNotExist(err) { // skip missing files continue } if err != nil { errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) continue } kubeconfigs = append(kubeconfigs, config) } // first merge all of our maps mapConfig := clientcmdNewConfig() for _, kubeconfig := range kubeconfigs { mergo.MergeWithOverwrite(mapConfig, kubeconfig) } // merge all of the struct values in the reverse order so that priority is given correctly // errors are not added to the list the second time nonMapConfig := clientcmdNewConfig() for i := len(kubeconfigs) - 1; i >= 0; i-- { kubeconfig := kubeconfigs[i] mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // get the values we expect. config := clientcmdNewConfig() mergo.MergeWithOverwrite(config, mapConfig) mergo.MergeWithOverwrite(config, nonMapConfig) // REMOVED: Possibility to skip this. if err := resolveLocalPaths(config); err != nil { errlist = append(errlist, err) } return config, newAggregate(errlist) } // loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile // LoadFromFile takes a filename and deserializes the contents into Config object func loadFromFile(filename string) (*clientcmdConfig, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) if err != nil { return nil, err } config, err := load(kubeconfigBytes) if err != nil { return nil, err } // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { obj.LocationOfOrigin = filename config.AuthInfos[key] = obj } for key, obj := range config.Clusters { obj.LocationOfOrigin = filename config.Clusters[key] = obj } for key, obj := range config.Contexts { obj.LocationOfOrigin = filename config.Contexts[key] = obj } if config.AuthInfos == nil { config.AuthInfos = map[string]*clientcmdAuthInfo{} } if config.Clusters == nil { config.Clusters = map[string]*clientcmdCluster{} } if config.Contexts == nil { config.Contexts = map[string]*clientcmdContext{} } return config, nil } // load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load // Load takes a byte slice and deserializes the contents into Config object. // Encapsulates deserialization without assuming the source is a file. func load(data []byte) (*clientcmdConfig, error) { config := clientcmdNewConfig() // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) if len(data) == 0 { return config, nil } // Note: This does absolutely no kind/version checking or conversions. data, err := yaml.YAMLToJSON(data) if err != nil { return nil, err } if err := json.Unmarshal(data, config); err != nil { return nil, err } return config, nil } // resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. // ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin // this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without // modification of its contents. func resolveLocalPaths(config *clientcmdConfig) error { for _, cluster := range config.Clusters { if len(cluster.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) if err != nil { return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) } if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { return err } } for _, authInfo := range config.AuthInfos { if len(authInfo.LocationOfOrigin) == 0 { continue } base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) if err != nil { return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) } if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { return err } } return nil } // getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. func getClusterFileReferences(cluster *clientcmdCluster) []*string { return []*string{&cluster.CertificateAuthority} } // getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} } // resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. // ResolvePaths updates the given refs to be absolute paths, relative to the given base directory func resolvePaths(refs []*string, base string) error { for _, ref := range refs { // Don't resolve empty paths if len(*ref) > 0 { // Don't resolve absolute paths if !filepath.IsAbs(*ref) { *ref = filepath.Join(base, *ref) } } } return nil } // restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. // RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config // object. Note that a RESTClient may require fields that are optional when initializing a Client. // A RESTClient created by this method is generic - it expects to operate on an API that follows // the Kubernetes conventions, but may not be the Kubernetes API. func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { // REMOVED: Configurable GroupVersion, Codec // REMOVED: Configurable versionedAPIPath baseURL, err := defaultServerURLFor(config) if err != nil { return nil, nil, err } transport, err := transportFor(config) if err != nil { return nil, nil, err } var httpClient *http.Client if transport != http.DefaultTransport { httpClient = &http.Client{Transport: transport} } // REMOVED: Configurable QPS, Burst, ContentConfig // REMOVED: Actually returning a RESTClient object. return baseURL, httpClient, nil } // defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. // DefaultServerURL converts a host, host:port, or URL string to the default base server API path // to use with a Client at a given API version following the standard conventions for a // Kubernetes API. func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { if host == "" { return nil, errors.Errorf("host must be a URL or a host:port pair") } base := host hostURL, err := url.Parse(base) if err != nil { return nil, err } if hostURL.Scheme == "" { scheme := "http://" if defaultTLS { scheme = "https://" } hostURL, err = url.Parse(scheme + base) if err != nil { return nil, err } if hostURL.Path != "" && hostURL.Path != "/" { return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) } } // REMOVED: versionedAPIPath computation. return hostURL, nil } // defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. // defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It // requires Host and Version to be set prior to being called. func defaultServerURLFor(config *restConfig) (*url.URL, error) { // TODO: move the default to secure when the apiserver supports TLS by default // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 defaultTLS := hasCA || hasCert || config.Insecure host := config.Host if host == "" { host = "localhost" } // REMOVED: Configurable APIPath, GroupVersion return defaultServerURL(host, defaultTLS) } // transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. // TransportFor returns an http.RoundTripper that will provide the authentication // or transport level security defined by the provided Config. Will return the // default http.DefaultTransport if no special case behavior is needed. func transportFor(config *restConfig) (http.RoundTripper, error) { // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support return transportNew(config) } // isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. // IsConfigTransportTLS returns true if and only if the provided // config will result in a protected connection to the server when it // is passed to restclient.RESTClientFor(). Use to determine when to // send credentials over the wire. // // Note: the Insecure flag is ignored when testing for this value, so MITM attacks are // still possible. func isConfigTransportTLS(config restConfig) bool { baseURL, err := defaultServerURLFor(&config) if err != nil { return false } return baseURL.Scheme == "https" } // transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. // New returns an http.RoundTripper that will provide the authentication // or transport level security defined by the provided Config. func transportNew(config *restConfig) (http.RoundTripper, error) { // REMOVED: custom config.Transport support. // Set transport level security var ( rt http.RoundTripper err error ) rt, err = tlsCacheGet(config) if err != nil { return nil, err } // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. if len(config.Username) != 0 && len(config.BearerToken) != 0 { return nil, errors.Errorf("username/password or bearer token may be set, but not both") } return rt, nil } // newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. // NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if // no matching CIDRs are found func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it noProxyEnv := os.Getenv("NO_PROXY") noProxyRules := strings.Split(noProxyEnv, ",") cidrs := []*net.IPNet{} for _, noProxyRule := range noProxyRules { _, cidr, _ := net.ParseCIDR(noProxyRule) if cidr != nil { cidrs = append(cidrs, cidr) } } if len(cidrs) == 0 { return delegate } return func(req *http.Request) (*url.URL, error) { host := req.URL.Host // for some urls, the Host is already the host, not the host:port if net.ParseIP(host) == nil { var err error host, _, err = net.SplitHostPort(req.URL.Host) if err != nil { return delegate(req) } } ip := net.ParseIP(host) if ip == nil { return delegate(req) } for _, cidr := range cidrs { if cidr.Contains(ip) { return nil, nil } } return delegate(req) } } // tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { // REMOVED: any actual caching // Get the TLS options for this client config tlsConfig, err := tlsConfigFor(config) if err != nil { return nil, err } // The options didn't require a custom TLS config if tlsConfig == nil { return http.DefaultTransport, nil } // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. t := &http.Transport{ // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: tlsConfig, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, } // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { _ = http2.ConfigureTransport(t) } return t, nil } // tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. // TLSConfigFor returns a tls.Config that will provide the transport level security defined // by the provided Config. Will return nil if no transport level security is requested. func tlsConfigFor(c *restConfig) (*tls.Config, error) { if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { return nil, nil } if c.HasCA() && c.Insecure { return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") } if err := loadTLSFiles(c); err != nil { return nil, err } tlsConfig := &tls.Config{ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) MinVersion: tls.VersionTLS10, InsecureSkipVerify: c.Insecure, } if c.HasCA() { tlsConfig.RootCAs = rootCertPool(c.CAData) } if c.HasCertAuth() { cert, err := tls.X509KeyPair(c.CertData, c.KeyData) if err != nil { return nil, err } tlsConfig.Certificates = []tls.Certificate{cert} } return tlsConfig, nil } // loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. // loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, // KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are // either populated or were empty to start. func loadTLSFiles(c *restConfig) error { var err error c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) if err != nil { return err } c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) if err != nil { return err } c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) if err != nil { return err } return nil } // dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. // dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, // or an error if an error occurred reading the file func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { if len(data) > 0 { return data, nil } if len(file) > 0 { fileData, err := ioutil.ReadFile(file) if err != nil { return []byte{}, err } return fileData, nil } return nil, nil } // rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. // rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". // When caData is not empty, it will be the ONLY information used in the CertPool. func rootCertPool(caData []byte) *x509.CertPool { // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values // It doesn't allow trusting either/or, but hopefully that won't be an issue if len(caData) == 0 { return nil } // if we have caData, use it certPool := x509.NewCertPool() certPool.AppendCertsFromPEM(caData) return certPool } // HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. // HasCA returns whether the configuration has a certificate authority or not. func (c *restConfig) HasCA() bool { return len(c.CAData) > 0 || len(c.CAFile) > 0 } // HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. // HasCertAuth returns whether the configuration has certificate authentication or not. func (c *restConfig) HasCertAuth() bool { return len(c.CertData) != 0 || len(c.CertFile) != 0 } // clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. // Config holds the information needed to build connect to remote kubernetes clusters as a given user // IMPORTANT if you add fields to this struct, please update IsConfigEmpty() type clientcmdConfig struct { // Clusters is a map of referencable names to cluster configs Clusters clustersMap `json:"clusters"` // AuthInfos is a map of referencable names to user configs AuthInfos authInfosMap `json:"users"` // Contexts is a map of referencable names to context configs Contexts contextsMap `json:"contexts"` // CurrentContext is the name of the context that you would like to use by default CurrentContext string `json:"current-context"` } type clustersMap map[string]*clientcmdCluster func (m *clustersMap) UnmarshalJSON(data []byte) error { var a []v1NamedCluster if err := json.Unmarshal(data, &a); err != nil { return err } for _, e := range a { cluster := e.Cluster // Allocates a new instance in each iteration (*m)[e.Name] = &cluster } return nil } type authInfosMap map[string]*clientcmdAuthInfo func (m *authInfosMap) UnmarshalJSON(data []byte) error { var a []v1NamedAuthInfo if err := json.Unmarshal(data, &a); err != nil { return err } for _, e := range a { authInfo := e.AuthInfo // Allocates a new instance in each iteration (*m)[e.Name] = &authInfo } return nil } type contextsMap map[string]*clientcmdContext func (m *contextsMap) UnmarshalJSON(data []byte) error { var a []v1NamedContext if err := json.Unmarshal(data, &a); err != nil { return err } for _, e := range a { context := e.Context // Allocates a new instance in each iteration (*m)[e.Name] = &context } return nil } // clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. // NewConfig is a convenience function that returns a new Config object with non-nil maps func clientcmdNewConfig() *clientcmdConfig { return &clientcmdConfig{ Clusters: make(map[string]*clientcmdCluster), AuthInfos: make(map[string]*clientcmdAuthInfo), Contexts: make(map[string]*clientcmdContext), } } // clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. // Cluster contains information about how to communicate with a kubernetes cluster type clientcmdCluster struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // Server is the address of the kubernetes cluster (https://hostname:port). Server string `json:"server"` // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` // CertificateAuthority is the path to a cert file for the certificate authority. CertificateAuthority string `json:"certificate-authority,omitempty"` // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` } // clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. type clientcmdAuthInfo struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // ClientCertificate is the path to a client cert file for TLS. ClientCertificate string `json:"client-certificate,omitempty"` // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate ClientCertificateData []byte `json:"client-certificate-data,omitempty"` // ClientKey is the path to a client key file for TLS. ClientKey string `json:"client-key,omitempty"` // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey ClientKeyData []byte `json:"client-key-data,omitempty"` // Token is the bearer token for authentication to the kubernetes cluster. Token string `json:"token,omitempty"` // Username is the username for basic authentication to the kubernetes cluster. Username string `json:"username,omitempty"` // Password is the password for basic authentication to the kubernetes cluster. Password string `json:"password,omitempty"` } // clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) type clientcmdContext struct { // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. LocationOfOrigin string // Cluster is the name of the cluster for this context Cluster string `json:"cluster"` // AuthInfo is the name of the authInfo for this context AuthInfo string `json:"user"` // Namespace is the default namespace to use on unspecified requests Namespace string `json:"namespace,omitempty"` } // v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. // NamedCluster relates nicknames to cluster information type v1NamedCluster struct { // Name is the nickname for this Cluster Name string `json:"name"` // Cluster holds the cluster information Cluster clientcmdCluster `json:"cluster"` } // v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. // NamedContext relates nicknames to context information type v1NamedContext struct { // Name is the nickname for this Context Name string `json:"name"` // Context holds the context information Context clientcmdContext `json:"context"` } // v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. // NamedAuthInfo relates nicknames to auth information type v1NamedAuthInfo struct { // Name is the nickname for this AuthInfo Name string `json:"name"` // AuthInfo holds the auth information AuthInfo clientcmdAuthInfo `json:"user"` } image-4.0.1/openshift/openshift-copies_test.go000066400000000000000000000067371354546467100214760ustar00rootroot00000000000000package openshift import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const fixtureKubeConfigPath = "testdata/admin.kubeconfig" // These are only smoke tests based on the skopeo integration test cluster. Error handling, non-trivial configuration merging, // and any other situations are not currently covered. // Set up KUBECONFIG to point at the fixture, and return a handler to clean it up. // Callers MUST NOT call testing.T.Parallel(). func setupKubeConfigForSerialTest() func() { // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. oldKC, hasKC := os.LookupEnv("KUBECONFIG") cleanup := func() { if hasKC { os.Setenv("KUBECONFIG", oldKC) } else { os.Unsetenv("KUBECONFIG") } } os.Setenv("KUBECONFIG", fixtureKubeConfigPath) return cleanup } func TestClientConfigLoadingRules(t *testing.T) { cleanup := setupKubeConfigForSerialTest() defer cleanup() rules := newOpenShiftClientConfigLoadingRules() res, err := rules.Load() require.NoError(t, err) expected := clientcmdConfig{ Clusters: clustersMap{ "172-17-0-2:8443": &clientcmdCluster{ LocationOfOrigin: fixtureKubeConfigPath, Server: "https://172.17.0.2:8443", CertificateAuthorityData: []byte("Cluster CA"), }, }, AuthInfos: authInfosMap{ "system:admin/172-17-0-2:8443": &clientcmdAuthInfo{ LocationOfOrigin: fixtureKubeConfigPath, ClientCertificateData: []byte("Client cert"), ClientKeyData: []byte("Client key"), }, }, Contexts: contextsMap{ "default/172-17-0-2:8443/system:admin": &clientcmdContext{ LocationOfOrigin: fixtureKubeConfigPath, Cluster: "172-17-0-2:8443", AuthInfo: "system:admin/172-17-0-2:8443", Namespace: "default", }, }, CurrentContext: "default/172-17-0-2:8443/system:admin", } assert.Equal(t, &expected, res) } func TestDirectClientConfig(t *testing.T) { cleanup := setupKubeConfigForSerialTest() defer cleanup() rules := newOpenShiftClientConfigLoadingRules() config, err := rules.Load() require.NoError(t, err) direct := newNonInteractiveClientConfig(*config) res, err := direct.ClientConfig() require.NoError(t, err) assert.Equal(t, &restConfig{ Host: "https://172.17.0.2:8443", restTLSClientConfig: restTLSClientConfig{ CertData: []byte("Client cert"), KeyData: []byte("Client key"), CAData: []byte("Cluster CA"), }, }, res) } func TestDeferredLoadingClientConfig(t *testing.T) { cleanup := setupKubeConfigForSerialTest() defer cleanup() rules := newOpenShiftClientConfigLoadingRules() deferred := newNonInteractiveDeferredLoadingClientConfig(rules) res, err := deferred.ClientConfig() require.NoError(t, err) assert.Equal(t, &restConfig{ Host: "https://172.17.0.2:8443", restTLSClientConfig: restTLSClientConfig{ CertData: []byte("Client cert"), KeyData: []byte("Client key"), CAData: []byte("Cluster CA"), }, }, res) } func TestDefaultClientConfig(t *testing.T) { cleanup := setupKubeConfigForSerialTest() defer cleanup() config := defaultClientConfig() res, err := config.ClientConfig() require.NoError(t, err) assert.Equal(t, &restConfig{ Host: "https://172.17.0.2:8443", restTLSClientConfig: restTLSClientConfig{ CertData: []byte("Client cert"), KeyData: []byte("Client key"), CAData: []byte("Cluster CA"), }, }, res) } image-4.0.1/openshift/openshift.go000066400000000000000000000524541354546467100171540ustar00rootroot00000000000000package openshift import ( "bytes" "context" "crypto/rand" "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "strings" "github.com/containers/image/v4/docker" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/containers/image/v4/version" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // openshiftClient is configuration for dealing with a single image stream, for reading or writing. type openshiftClient struct { ref openshiftReference baseURL *url.URL // Values from Kubernetes configuration httpClient *http.Client bearerToken string // "" if not used username string // "" if not used password string // if username != "" } // newOpenshiftClient creates a new openshiftClient for the specified reference. func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { // We have already done this parsing in ParseReference, but thrown away // httpClient. So, parse again. // (We could also rework/split restClientFor to "get base URL" to be done // in ParseReference, and "get httpClient" to be done here. But until/unless // we support non-default clusters, this is good enough.) // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. cmdConfig := defaultClientConfig() logrus.Debugf("cmdConfig: %#v", cmdConfig) restConfig, err := cmdConfig.ClientConfig() if err != nil { return nil, err } // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) logrus.Debugf("restConfig: %#v", restConfig) baseURL, httpClient, err := restClientFor(restConfig) if err != nil { return nil, err } logrus.Debugf("URL: %#v", *baseURL) if httpClient == nil { httpClient = http.DefaultClient } return &openshiftClient{ ref: ref, baseURL: baseURL, httpClient: httpClient, bearerToken: restConfig.BearerToken, username: restConfig.Username, password: restConfig.Password, }, nil } // doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { url := *c.baseURL url.Path = path var requestBodyReader io.Reader if requestBody != nil { logrus.Debugf("Will send body: %s", requestBody) requestBodyReader = bytes.NewReader(requestBody) } req, err := http.NewRequest(method, url.String(), requestBodyReader) if err != nil { return nil, err } req = req.WithContext(ctx) if len(c.bearerToken) != 0 { req.Header.Set("Authorization", "Bearer "+c.bearerToken) } else if len(c.username) != 0 { req.SetBasicAuth(c.username, c.password) } req.Header.Set("Accept", "application/json, */*") req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) if requestBody != nil { req.Header.Set("Content-Type", "application/json") } logrus.Debugf("%s %s", method, url.String()) res, err := c.httpClient.Do(req) if err != nil { return nil, err } defer res.Body.Close() body, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } logrus.Debugf("Got body: %s", body) // FIXME: Just throwing this useful information away only to try to guess later... logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) var status status statusValid := false if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { statusValid = true } switch { case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. if statusValid && status.Status != "Success" { return nil, errors.New(status.Message) } case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: // OK. default: if statusValid { return nil, errors.New(status.Message) } return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body)) } return body, nil } // getImage loads the specified image object. func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { // FIXME: validate components per validation.IsValidPathSegmentName? path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) body, err := c.doRequest(ctx, "GET", path, nil) if err != nil { return nil, err } // Note: This does absolutely no kind/version checking or conversions. var isi imageStreamImage if err := json.Unmarshal(body, &isi); err != nil { return nil, err } return &isi.Image, nil } // convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; // currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { parts := strings.SplitN(ref, "/", 2) if len(parts) != 2 { return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) } return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil } type openshiftImageSource struct { client *openshiftClient // Values specific to this image sys *types.SystemContext // State docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet imageStreamImageName string // Resolved image identifier, or "" if not known yet } // newImageSource creates a new ImageSource for the specified reference. // The caller must call .Close() on the returned ImageSource. func newImageSource(sys *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { client, err := newOpenshiftClient(ref) if err != nil { return nil, err } return &openshiftImageSource{ client: client, sys: sys, }, nil } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (s *openshiftImageSource) Reference() types.ImageReference { return s.client.ref } // Close removes resources associated with an initialized ImageSource, if any. func (s *openshiftImageSource) Close() error { if s.docker != nil { err := s.docker.Close() s.docker = nil return err } return nil } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if err := s.ensureImageIsResolved(ctx); err != nil { return nil, "", err } return s.docker.GetManifest(ctx, instanceDigest) } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *openshiftImageSource) HasThreadSafeGetBlob() bool { return false } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { if err := s.ensureImageIsResolved(ctx); err != nil { return nil, 0, err } return s.docker.GetBlob(ctx, info, cache) } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { var imageName string if instanceDigest == nil { if err := s.ensureImageIsResolved(ctx); err != nil { return nil, err } imageName = s.imageStreamImageName } else { imageName = instanceDigest.String() } image, err := s.client.getImage(ctx, imageName) if err != nil { return nil, err } var sigs [][]byte for _, sig := range image.Signatures { if sig.Type == imageSignatureTypeAtomic { sigs = append(sigs, sig.Content) } } return sigs, nil } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (s *openshiftImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { return nil } // FIXME: validate components per validation.IsValidPathSegmentName? path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) body, err := s.client.doRequest(ctx, "GET", path, nil) if err != nil { return err } // Note: This does absolutely no kind/version checking or conversions. var is imageStream if err := json.Unmarshal(body, &is); err != nil { return err } var te *tagEvent for _, tag := range is.Status.Tags { if tag.Tag != s.client.ref.dockerReference.Tag() { continue } if len(tag.Items) > 0 { te = &tag.Items[0] break } } if te == nil { return errors.Errorf("No matching tag found") } logrus.Debugf("tag event %#v", te) dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) if err != nil { return err } logrus.Debugf("Resolved reference %#v", dockerRefString) dockerRef, err := docker.ParseReference("//" + dockerRefString) if err != nil { return err } d, err := dockerRef.NewImageSource(ctx, s.sys) if err != nil { return err } s.docker = d s.imageStreamImageName = te.Image return nil } type openshiftImageDestination struct { client *openshiftClient docker types.ImageDestination // The Docker Registry endpoint // State imageStreamImageName string // "" if not yet known } // newImageDestination creates a new ImageDestination for the specified reference. func newImageDestination(ctx context.Context, sys *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { client, err := newOpenshiftClient(ref) if err != nil { return nil, err } // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know // the manifest digest at this point. dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) dockerRef, err := docker.ParseReference(dockerRefString) if err != nil { return nil, err } docker, err := dockerRef.NewImageDestination(ctx, sys) if err != nil { return nil, err } return &openshiftImageDestination{ client: client, docker: docker, }, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *openshiftImageDestination) Reference() types.ImageReference { return d.client.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *openshiftImageDestination) Close() error { return d.docker.Close() } func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { return d.docker.SupportedManifestMIMETypes() } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *openshiftImageDestination) SupportsSignatures(ctx context.Context) error { return nil } func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression { return types.Compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { return true } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { return false } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool { return d.docker.IgnoresEmbeddedDockerReference() } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *openshiftImageDestination) HasThreadSafePutBlob() bool { return false } // PutBlob writes contents of stream and returns data representing the result (with all data filled in). // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig) } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute) } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *openshiftImageDestination) PutManifest(ctx context.Context, m []byte) error { manifestDigest, err := manifest.Digest(m) if err != nil { return err } d.imageStreamImageName = manifestDigest.String() return d.docker.PutManifest(ctx, m) } func (d *openshiftImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { if d.imageStreamImageName == "" { return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") } // Because image signatures are a shared resource in Atomic Registry, the default upload // always adds signatures. Eventually we should also allow removing signatures. if len(signatures) == 0 { return nil // No need to even read the old state. } image, err := d.client.getImage(ctx, d.imageStreamImageName) if err != nil { return err } existingSigNames := map[string]struct{}{} for _, sig := range image.Signatures { existingSigNames[sig.objectMeta.Name] = struct{}{} } sigExists: for _, newSig := range signatures { for _, existingSig := range image.Signatures { if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { continue sigExists } } // The API expect us to invent a new unique name. This is racy, but hopefully good enough. var signatureName string for { randBytes := make([]byte, 16) n, err := rand.Read(randBytes) if err != nil || n != 16 { return errors.Wrapf(err, "Error generating random signature len %d", n) } signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) if _, ok := existingSigNames[signatureName]; !ok { break } } // Note: This does absolutely no kind/version checking or conversions. sig := imageSignature{ typeMeta: typeMeta{ Kind: "ImageSignature", APIVersion: "v1", }, objectMeta: objectMeta{Name: signatureName}, Type: imageSignatureTypeAtomic, Content: newSig, } body, err := json.Marshal(sig) _, err = d.client.doRequest(ctx, "POST", "/oapi/v1/imagesignatures", body) if err != nil { return err } } return nil } // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) func (d *openshiftImageDestination) Commit(ctx context.Context) error { return d.docker.Commit(ctx) } // These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. type imageStream struct { Status imageStreamStatus `json:"status,omitempty"` } type imageStreamStatus struct { DockerImageRepository string `json:"dockerImageRepository"` Tags []namedTagEventList `json:"tags,omitempty"` } type namedTagEventList struct { Tag string `json:"tag"` Items []tagEvent `json:"items"` } type tagEvent struct { DockerImageReference string `json:"dockerImageReference"` Image string `json:"image"` } type imageStreamImage struct { Image image `json:"image"` } type image struct { objectMeta `json:"metadata,omitempty"` DockerImageReference string `json:"dockerImageReference,omitempty"` // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` DockerImageManifest string `json:"dockerImageManifest,omitempty"` // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` Signatures []imageSignature `json:"signatures,omitempty"` } const imageSignatureTypeAtomic string = "atomic" type imageSignature struct { typeMeta `json:",inline"` objectMeta `json:"metadata,omitempty"` Type string `json:"type"` Content []byte `json:"content"` // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` // ImageIdentity string `json:"imageIdentity,omitempty"` // SignedClaims map[string]string `json:"signedClaims,omitempty"` // Created *unversioned.Time `json:"created,omitempty"` // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` } type typeMeta struct { Kind string `json:"kind,omitempty"` APIVersion string `json:"apiVersion,omitempty"` } type objectMeta struct { Name string `json:"name,omitempty"` GenerateName string `json:"generateName,omitempty"` Namespace string `json:"namespace,omitempty"` SelfLink string `json:"selfLink,omitempty"` ResourceVersion string `json:"resourceVersion,omitempty"` Generation int64 `json:"generation,omitempty"` DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` } // A subset of k8s.io/kubernetes/pkg/api/unversioned/Status type status struct { Status string `json:"status,omitempty"` Message string `json:"message,omitempty"` // Reason StatusReason `json:"reason,omitempty"` // Details *StatusDetails `json:"details,omitempty"` Code int32 `json:"code,omitempty"` } image-4.0.1/openshift/openshift_transport.go000066400000000000000000000163201354546467100212600ustar00rootroot00000000000000package openshift import ( "context" "fmt" "regexp" "strings" "github.com/containers/image/v4/docker/policyconfiguration" "github.com/containers/image/v4/docker/reference" genericImage "github.com/containers/image/v4/image" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) func init() { transports.Register(Transport) } // Transport is an ImageTransport for OpenShift registry-hosted images. var Transport = openshiftTransport{} type openshiftTransport struct{} func (t openshiftTransport) Name() string { return "atomic" } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { return ParseReference(reference) } // Note that imageNameRegexp is namespace/stream:tag, this // is HOSTNAME/namespace/stream:tag or parent prefixes. // Keep this in sync with imageNameRegexp! var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { if scopeRegexp.FindStringIndex(scope) == nil { return errors.Errorf("Invalid scope name %s", scope) } return nil } // openshiftReference is an ImageReference for OpenShift images. type openshiftReference struct { dockerReference reference.NamedTagged namespace string // Computed from dockerReference in advance. stream string // Computed from dockerReference in advance. } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. func ParseReference(ref string) (types.ImageReference, error) { r, err := reference.ParseNormalizedNamed(ref) if err != nil { return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) } tagged, ok := r.(reference.NamedTagged) if !ok { return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) } return NewReference(tagged) } // NewReference returns an OpenShift reference for a reference.NamedTagged func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { r := strings.SplitN(reference.Path(dockerRef), "/", 3) if len(r) != 2 { return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", reference.FamiliarString(dockerRef)) } return openshiftReference{ namespace: r[0], stream: r[1], dockerReference: dockerRef, }, nil } func (ref openshiftReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref openshiftReference) StringWithinTransport() string { return reference.FamiliarString(ref.dockerReference) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref openshiftReference) DockerReference() reference.Named { return ref.dockerReference } // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. func (ref openshiftReference) PolicyConfigurationIdentity() string { res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) } return res } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref openshiftReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (ref openshiftReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(sys, ref) if err != nil { return nil, err } return genericImage.FromSource(ctx, sys, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref openshiftReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(sys, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref openshiftReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(ctx, sys, ref) } // DeleteImage deletes the named image from the registry, if supported. func (ref openshiftReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { return errors.Errorf("Deleting images not implemented for atomic: images") } image-4.0.1/openshift/openshift_transport_test.go000066400000000000000000000104211354546467100223130ustar00rootroot00000000000000package openshift import ( "context" "testing" "github.com/containers/image/v4/docker/reference" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256digest = "@sha256:" + sha256digestHex ) func TestTransportName(t *testing.T) { assert.Equal(t, "atomic", Transport.Name()) } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ "registry.example.com/ns/stream" + sha256digest, "registry.example.com/ns/stream:notlatest", "registry.example.com/ns/stream", "registry.example.com/ns", "registry.example.com", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } for _, scope := range []string{ "registry.example.com/too/deep/hierarchy", "registry.example.com/ns/stream:tag1:tag2", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } func TestNewReference(t *testing.T) { // too many ns r, err := reference.ParseNormalizedNamed("registry.example.com/ns1/ns2/ns3/stream:tag") require.NoError(t, err) tagged, ok := r.(reference.NamedTagged) require.True(t, ok) _, err = NewReference(tagged) assert.Error(t, err) r, err = reference.ParseNormalizedNamed("registry.example.com/ns/stream:tag") require.NoError(t, err) tagged, ok = r.(reference.NamedTagged) require.True(t, ok) _, err = NewReference(tagged) assert.NoError(t, err) } func TestParseReference(t *testing.T) { // Success ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) osRef, ok := ref.(openshiftReference) require.True(t, ok) assert.Equal(t, "ns", osRef.namespace) assert.Equal(t, "stream", osRef.stream) assert.Equal(t, "notlatest", osRef.dockerReference.Tag()) assert.Equal(t, "registry.example.com:8443", reference.Domain(osRef.dockerReference)) // Components creating an invalid Docker Reference name _, err = ParseReference("registry.example.com/ns/UPPERCASEISINVALID:notlatest") assert.Error(t, err) _, err = ParseReference("registry.example.com/ns/stream:invalid!tag@value=") assert.Error(t, err) } func TestReferenceDockerReference(t *testing.T) { ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) dockerRef := ref.DockerReference() require.NotNil(t, dockerRef) assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", dockerRef.String()) } func TestReferenceTransport(t *testing.T) { ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.StringWithinTransport()) // We should do one more round to verify that the output can be parsed, to an equal value, // but that is untested because it depends on per-user configuration. } func TestReferencePolicyConfigurationIdentity(t *testing.T) { // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) assert.Equal(t, "registry.example.com:8443/ns/stream:notlatest", ref.PolicyConfigurationIdentity()) } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { // Just a smoke test, the substance is tested in policyconfiguration.TestDockerReference. ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) assert.Equal(t, []string{ "registry.example.com:8443/ns/stream", "registry.example.com:8443/ns", "registry.example.com:8443", }, ref.PolicyConfigurationNamespaces()) } // openshiftReference.NewImage, openshiftReference.NewImageSource, openshiftReference.NewImageDestination untested because they depend // on per-user configuration when initializing httpClient. func TestReferenceDeleteImage(t *testing.T) { ref, err := ParseReference("registry.example.com:8443/ns/stream:notlatest") require.NoError(t, err) err = ref.DeleteImage(context.Background(), nil) assert.Error(t, err) } image-4.0.1/openshift/testdata/000077500000000000000000000000001354546467100164255ustar00rootroot00000000000000image-4.0.1/openshift/testdata/admin.kubeconfig000066400000000000000000000010111354546467100215440ustar00rootroot00000000000000apiVersion: v1 clusters: - cluster: certificate-authority-data: Q2x1c3RlciBDQQ== server: https://172.17.0.2:8443 name: 172-17-0-2:8443 contexts: - context: cluster: 172-17-0-2:8443 namespace: default user: system:admin/172-17-0-2:8443 name: default/172-17-0-2:8443/system:admin current-context: default/172-17-0-2:8443/system:admin kind: Config preferences: {} users: - name: system:admin/172-17-0-2:8443 user: client-certificate-data: Q2xpZW50IGNlcnQ= client-key-data: Q2xpZW50IGtleQ== image-4.0.1/ostree/000077500000000000000000000000001354546467100141165ustar00rootroot00000000000000image-4.0.1/ostree/ostree_dest.go000066400000000000000000000376311354546467100167770ustar00rootroot00000000000000// +build containers_image_ostree package ostree import ( "bytes" "context" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "syscall" "time" "unsafe" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/archive" "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" selinux "github.com/opencontainers/selinux/go-selinux" "github.com/ostreedev/ostree-go/pkg/otbuiltin" "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) // #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux // #include // #include // #include // #include // #include // #include // #include // #include import "C" type blobToImport struct { Size int64 Digest digest.Digest BlobPath string } type descriptor struct { Size int64 `json:"size"` Digest digest.Digest `json:"digest"` } type fsLayersSchema1 struct { BlobSum digest.Digest `json:"blobSum"` } type manifestSchema struct { LayersDescriptors []descriptor `json:"layers"` FSLayers []fsLayersSchema1 `json:"fsLayers"` } type ostreeImageDestination struct { ref ostreeReference manifest string schema manifestSchema tmpDirPath string blobs map[string]*blobToImport digest digest.Digest signaturesLen int repo *C.struct_OstreeRepo } // newImageDestination returns an ImageDestination for writing to an existing ostree. func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) if err := ensureDirectoryExists(tmpDirPath); err != nil { return nil, err } return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (d *ostreeImageDestination) Reference() types.ImageReference { return d.ref } // Close removes resources associated with an initialized ImageDestination, if any. func (d *ostreeImageDestination) Close() error { if d.repo != nil { C.g_object_unref(C.gpointer(d.repo)) } return os.RemoveAll(d.tmpDirPath) } func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { return []string{ manifest.DockerV2Schema2MediaType, } } // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. func (d *ostreeImageDestination) SupportsSignatures(ctx context.Context) error { return nil } // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression { return types.PreserveOriginal } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { return false } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { return true } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool { return false // N/A, DockerReference() returns nil. } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (d *ostreeImageDestination) HasThreadSafePutBlob() bool { return false } // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") if err != nil { return types.BlobInfo{}, err } blobPath := filepath.Join(tmpDir, "content") blobFile, err := os.Create(blobPath) if err != nil { return types.BlobInfo{}, err } defer blobFile.Close() digester := digest.Canonical.Digester() tee := io.TeeReader(stream, digester.Hash()) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, tee) if err != nil { return types.BlobInfo{}, err } computedDigest := digester.Digest() if inputInfo.Size != -1 && size != inputInfo.Size { return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) } if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } hash := computedDigest.Hex() d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} return types.BlobInfo{Digest: computedDigest, Size: size}, nil } func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { entries, err := ioutil.ReadDir(dir) if err != nil { return err } for _, info := range entries { fullpath := filepath.Join(dir, info.Name()) if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { if err := os.Remove(fullpath); err != nil { return err } continue } if selinuxHnd != nil { relPath, err := filepath.Rel(root, fullpath) if err != nil { return err } // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, // thus we benefit from maintaining the same SELinux label they would have on the host as we could // use hard links instead of copying the files. relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) relPathC := C.CString(relPath) defer C.free(unsafe.Pointer(relPathC)) var context *C.char res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) if int(res) < 0 && err != syscall.ENOENT { return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) } if int(res) == 0 { defer C.freecon(context) fullpathC := C.CString(fullpath) defer C.free(unsafe.Pointer(fullpathC)) res, err = C.lsetfilecon_raw(fullpathC, context) if int(res) < 0 { return errors.Wrapf(err, "cannot setfilecon_raw %s to %s", fullpath, C.GoString(context)) } } } if info.IsDir() { if usermode { if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { return err } } err = fixFiles(selinuxHnd, root, fullpath, usermode) if err != nil { return err } } else if usermode && (info.Mode().IsRegular()) { if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { return err } } } return nil } func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { opts := otbuiltin.NewCommitOptions() opts.AddMetadataString = metadata opts.Timestamp = time.Now() // OCI layers have no parent OSTree commit opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" _, err := repo.Commit(root, branch, opts) return err } func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) { mfz := pgzip.NewWriter(output) defer mfz.Close() metaPacker := storage.NewJSONPacker(mfz) stream, err := os.OpenFile(file, os.O_RDONLY, 0) if err != nil { return "", -1, err } defer stream.Close() gzReader, err := archive.DecompressStream(stream) if err != nil { return "", -1, err } defer gzReader.Close() its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) if err != nil { return "", -1, err } digester := digest.Canonical.Digester() written, err := io.Copy(digester.Hash(), its) if err != nil { return "", -1, err } return digester.Digest(), written, nil } func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") if err := ensureDirectoryExists(destinationPath); err != nil { return err } defer func() { os.Remove(blob.BlobPath) os.RemoveAll(destinationPath) }() var tarSplitOutput bytes.Buffer uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath) if err != nil { return err } if os.Getuid() == 0 { if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { return err } if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { return err } } else { os.MkdirAll(destinationPath, 0755) if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { return err } if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { return err } } return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize), fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()), fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) } func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Dir(blob.BlobPath) return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { if d.repo == nil { repo, err := openRepo(d.ref.repo) if err != nil { return false, types.BlobInfo{}, err } d.repo = repo } branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") if err != nil || !found { return found, types.BlobInfo{}, err } found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size") if err != nil || !found { return found, types.BlobInfo{}, err } found, data, err = readMetadata(d.repo, branch, "docker.size") if err != nil || !found { return found, types.BlobInfo{}, err } size, err := strconv.ParseInt(data, 10, 64) if err != nil { return false, types.BlobInfo{}, err } return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil } // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { d.manifest = string(manifestBlob) if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { return err } manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) if err := ensureParentDirectoryExists(manifestPath); err != nil { return err } digest, err := manifest.Digest(manifestBlob) if err != nil { return err } d.digest = digest return ioutil.WriteFile(manifestPath, manifestBlob, 0644) } func (d *ostreeImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) if err := ensureParentDirectoryExists(path); err != nil { return err } for i, sig := range signatures { signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { return err } } d.signaturesLen = len(signatures) return nil } func (d *ostreeImageDestination) Commit(ctx context.Context) error { runtime.LockOSThread() defer runtime.UnlockOSThread() repo, err := otbuiltin.OpenRepo(d.ref.repo) if err != nil { return err } _, err = repo.PrepareTransaction() if err != nil { return err } var selinuxHnd *C.struct_selabel_handle if os.Getuid() == 0 && selinux.GetEnabled() { selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) if selinuxHnd == nil { return errors.Wrapf(err, "cannot open the SELinux DB") } defer C.selabel_close(selinuxHnd) } checkLayer := func(hash string) error { blob := d.blobs[hash] // if the blob is not present in d.blobs then it is already stored in OSTree, // and we don't need to import it. if blob == nil { return nil } err := d.importBlob(selinuxHnd, repo, blob) if err != nil { return err } delete(d.blobs, hash) return nil } for _, layer := range d.schema.LayersDescriptors { hash := layer.Digest.Hex() if err = checkLayer(hash); err != nil { return err } } for _, layer := range d.schema.FSLayers { hash := layer.BlobSum.Hex() if err = checkLayer(hash); err != nil { return err } } // Import the other blobs that are not layers for _, blob := range d.blobs { err := d.importConfig(repo, blob) if err != nil { return err } } manifestPath := filepath.Join(d.tmpDirPath, "manifest") metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("signatures=%d", d.signaturesLen), fmt.Sprintf("docker.digest=%s", string(d.digest))} if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil { return err } _, err = repo.CommitTransaction() return err } func ensureDirectoryExists(path string) error { if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { if err := os.MkdirAll(path, 0755); err != nil { return err } } return nil } func ensureParentDirectoryExists(path string) error { return ensureDirectoryExists(filepath.Dir(path)) } image-4.0.1/ostree/ostree_src.go000066400000000000000000000267001354546467100166220ustar00rootroot00000000000000// +build containers_image_ostree package ostree import ( "bytes" "context" "encoding/base64" "fmt" "io" "io/ioutil" "strconv" "strings" "unsafe" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/ioutils" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" glib "github.com/ostreedev/ostree-go/pkg/glibobject" "github.com/pkg/errors" "github.com/vbatts/tar-split/tar/asm" "github.com/vbatts/tar-split/tar/storage" ) // #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 // #include // #include // #include // #include // #include // #include import "C" type ostreeImageSource struct { ref ostreeReference tmpDir string repo *C.struct_OstreeRepo // get the compressed layer by its uncompressed checksum compressed map[digest.Digest]digest.Digest } // newImageSource returns an ImageSource for reading from an existing directory. func newImageSource(tmpDir string, ref ostreeReference) (types.ImageSource, error) { return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil } // Reference returns the reference used to set up this source. func (s *ostreeImageSource) Reference() types.ImageReference { return s.ref } // Close removes resources associated with an initialized ImageSource, if any. func (s *ostreeImageSource) Close() error { if s.repo != nil { C.g_object_unref(C.gpointer(s.repo)) } return nil } func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) { var metadataKey string if isCompressed { metadataKey = "docker.uncompressed_size" } else { metadataKey = "docker.size" } b := fmt.Sprintf("ociimage/%s", blob) found, data, err := readMetadata(s.repo, b, metadataKey) if err != nil || !found { return 0, err } return strconv.ParseInt(data, 10, 64) } func (s *ostreeImageSource) getLenSignatures() (int64, error) { b := fmt.Sprintf("ociimage/%s", s.ref.branchName) found, data, err := readMetadata(s.repo, b, "signatures") if err != nil { return -1, err } if !found { // if 'signatures' is not present, just return 0 signatures. return 0, nil } return strconv.ParseInt(data, 10, 64) } func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { b := fmt.Sprintf("ociimage/%s", blob) found, out, err := readMetadata(s.repo, b, "tarsplit.output") if err != nil || !found { return nil, err } return base64.StdEncoding.DecodeString(out) } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) } if s.repo == nil { repo, err := openRepo(s.ref.repo) if err != nil { return nil, "", err } s.repo = repo } b := fmt.Sprintf("ociimage/%s", s.ref.branchName) found, out, err := readMetadata(s.repo, b, "docker.manifest") if err != nil { return nil, "", err } if !found { return nil, "", errors.New("manifest not found") } m := []byte(out) return m, manifest.GuessMIMEType(m), nil } func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { return nil, "", errors.New("manifest lists are not supported by this transport") } func openRepo(path string) (*C.struct_OstreeRepo, error) { var cerr *C.GError cpath := C.CString(path) defer C.free(unsafe.Pointer(cpath)) pathc := C.g_file_new_for_path(cpath) defer C.g_object_unref(C.gpointer(pathc)) repo := C.ostree_repo_new(pathc) r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) if !r { C.g_object_unref(C.gpointer(repo)) return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } return repo, nil } type ostreePathFileGetter struct { repo *C.struct_OstreeRepo parentRoot *C.GFile } type ostreeReader struct { stream *C.GFileInputStream } func (o ostreeReader) Close() error { C.g_object_unref(C.gpointer(o.stream)) return nil } func (o ostreeReader) Read(p []byte) (int, error) { var cerr *C.GError instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) if b == nil { return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } defer C.g_bytes_unref(b) count := int(C.g_bytes_get_size(b)) if count == 0 { return 0, io.EOF } data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] copy(p, data) return count, nil } func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { var cerr *C.GError var ref *C.char defer C.free(unsafe.Pointer(ref)) cCommit := C.CString(commit) defer C.free(unsafe.Pointer(cCommit)) if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } if ref == nil { return false, "", nil } var variant *C.GVariant if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } defer C.g_variant_unref(variant) if variant != nil { cKey := C.CString(key) defer C.free(unsafe.Pointer(cKey)) metadata := C.g_variant_get_child_value(variant, 0) defer C.g_variant_unref(metadata) data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) if data != nil { defer C.g_variant_unref(data) ptr := (*C.char)(C.g_variant_get_string(data, nil)) val := C.GoString(ptr) return true, val, nil } } return false, "", nil } func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { var cerr *C.GError var parentRoot *C.GFile cCommit := C.CString(commit) defer C.free(unsafe.Pointer(cCommit)) if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } C.g_object_ref(C.gpointer(repo)) return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil } func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { var file *C.GFile if strings.HasPrefix(filename, "./") { filename = filename[2:] } cfilename := C.CString(filename) defer C.free(unsafe.Pointer(cfilename)) file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) var cerr *C.GError stream := C.g_file_read(file, nil, &cerr) if stream == nil { return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) } return &ostreeReader{stream: stream}, nil } func (o ostreePathFileGetter) Close() { C.g_object_unref(C.gpointer(o.repo)) C.g_object_unref(C.gpointer(o.parentRoot)) } func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { getter, err := newOSTreePathFileGetter(s.repo, commit) if err != nil { return nil, err } defer getter.Close() return getter.Get(path) } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *ostreeImageSource) HasThreadSafeGetBlob() bool { return false } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { blob := info.Digest.Hex() // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. if s.compressed == nil { _, err := s.LayerInfosForCopy(ctx) if err != nil { return nil, -1, err } } compressedBlob, isCompressed := s.compressed[info.Digest] if isCompressed { blob = compressedBlob.Hex() } branch := fmt.Sprintf("ociimage/%s", blob) if s.repo == nil { repo, err := openRepo(s.ref.repo) if err != nil { return nil, 0, err } s.repo = repo } layerSize, err := s.getBlobUncompressedSize(blob, isCompressed) if err != nil { return nil, 0, err } tarsplit, err := s.getTarSplitData(blob) if err != nil { return nil, 0, err } // if tarsplit is nil we are looking at the manifest. Return directly the file in /content if tarsplit == nil { file, err := s.readSingleFile(branch, "/content") if err != nil { return nil, 0, err } return file, layerSize, nil } mf := bytes.NewReader(tarsplit) mfz, err := pgzip.NewReader(mf) if err != nil { return nil, 0, err } metaUnpacker := storage.NewJSONUnpacker(mfz) getter, err := newOSTreePathFileGetter(s.repo, branch) if err != nil { mfz.Close() return nil, 0, err } ots := asm.NewOutputTarStream(getter, metaUnpacker) rc := ioutils.NewReadCloserWrapper(ots, func() error { getter.Close() mfz.Close() return ots.Close() }) return rc, layerSize, nil } func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { return nil, errors.New("manifest lists are not supported by this transport") } lenSignatures, err := s.getLenSignatures() if err != nil { return nil, err } branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) if s.repo == nil { repo, err := openRepo(s.ref.repo) if err != nil { return nil, err } s.repo = repo } signatures := [][]byte{} for i := int64(1); i <= lenSignatures; i++ { sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) if err != nil { return nil, err } defer sigReader.Close() sig, err := ioutil.ReadAll(sigReader) if err != nil { return nil, err } signatures = append(signatures, sig) } return signatures, nil } // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of // the image, after they've been decompressed. func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { updatedBlobInfos := []types.BlobInfo{} manifestBlob, manifestType, err := s.GetManifest(ctx, nil) if err != nil { return nil, err } man, err := manifest.FromBlob(manifestBlob, manifestType) s.compressed = make(map[digest.Digest]digest.Digest) layerBlobs := man.LayerInfos() for _, layerBlob := range layerBlobs { branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") if err != nil || !found { return nil, err } found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size") if err != nil || !found { return nil, err } uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64) if err != nil { return nil, err } uncompressedDigest := digest.Digest(uncompressedDigestStr) blobInfo := types.BlobInfo{ Digest: uncompressedDigest, Size: uncompressedSize, MediaType: layerBlob.MediaType, } s.compressed[uncompressedDigest] = layerBlob.Digest updatedBlobInfos = append(updatedBlobInfos, blobInfo) } return updatedBlobInfos, nil } image-4.0.1/ostree/ostree_transport.go000066400000000000000000000211671354546467100200710ustar00rootroot00000000000000// +build containers_image_ostree package ostree import ( "bytes" "context" "fmt" "os" "path/filepath" "regexp" "strings" "github.com/containers/image/v4/directory/explicitfilepath" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) const defaultOSTreeRepo = "/ostree/repo" // Transport is an ImageTransport for ostree paths. var Transport = ostreeTransport{} type ostreeTransport struct{} func (t ostreeTransport) Name() string { return "ostree" } func init() { transports.Register(Transport) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { sep := strings.Index(scope, ":") if sep < 0 { return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) } repo := scope[:sep] if !strings.HasPrefix(repo, "/") { return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) } cleaned := filepath.Clean(repo) if cleaned != repo { return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) } // FIXME? In the namespaces within a repo, // we could be verifying the various character set and length restrictions // from docker/distribution/reference.regexp.go, but other than that there // are few semantically invalid strings. return nil } // ostreeReference is an ImageReference for ostree paths. type ostreeReference struct { image string branchName string repo string } type ostreeImageCloser struct { types.ImageCloser size int64 } func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" var image = "" s := strings.SplitN(ref, "@/", 2) if len(s) == 1 { image, repo = s[0], defaultOSTreeRepo } else { image, repo = s[0], "/"+s[1] } return NewReference(image, repo) } // NewReference returns an OSTree reference for a specified repo and image. func NewReference(image string, repo string) (types.ImageReference, error) { // image is not _really_ in a containers/image/docker/reference format; // as far as the libOSTree ociimage/* namespace is concerned, it is more or // less an arbitrary string with an implied tag. // Parse the image using reference.ParseNormalizedNamed so that we can // check whether the images has a tag specified and we can add ":latest" if needed ostreeImage, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err } if reference.IsNameOnly(ostreeImage) { image = image + ":latest" } resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) if err != nil { // With os.IsNotExist(err), the parent directory of repo is also not existent; // that should ordinarily not happen, but it would be a bit weird to reject // references which do not specify a repo just because the implicit defaultOSTreeRepo // does not exist. if os.IsNotExist(err) && repo == defaultOSTreeRepo { resolved = repo } else { return nil, err } } // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // from being ambiguous with values of PolicyConfigurationIdentity. if strings.Contains(resolved, ":") { return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) } return ostreeReference{ image: image, branchName: encodeOStreeRef(image), repo: resolved, }, nil } func (ref ostreeReference) Transport() types.ImageTransport { return Transport } // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. func (ref ostreeReference) StringWithinTransport() string { return fmt.Sprintf("%s@%s", ref.image, ref.repo) } // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. func (ref ostreeReference) DockerReference() reference.Named { return nil } func (ref ostreeReference) PolicyConfigurationIdentity() string { return fmt.Sprintf("%s:%s", ref.repo, ref.image) } // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. func (ref ostreeReference) PolicyConfigurationNamespaces() []string { s := strings.SplitN(ref.image, ":", 2) if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) } name := s[0] res := []string{} for { res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) lastSlash := strings.LastIndex(name, "/") if lastSlash == -1 { break } name = name[:lastSlash] } return res } func (s *ostreeImageCloser) Size() (int64, error) { return s.size, nil } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { var tmpDir string if sys == nil || sys.OSTreeTmpDirPath == "" { tmpDir = os.TempDir() } else { tmpDir = sys.OSTreeTmpDirPath } src, err := newImageSource(tmpDir, ref) if err != nil { return nil, err } return image.FromSource(ctx, sys, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { var tmpDir string if sys == nil || sys.OSTreeTmpDirPath == "" { tmpDir = os.TempDir() } else { tmpDir = sys.OSTreeTmpDirPath } return newImageSource(tmpDir, ref) } // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { var tmpDir string if sys == nil || sys.OSTreeTmpDirPath == "" { tmpDir = os.TempDir() } else { tmpDir = sys.OSTreeTmpDirPath } return newImageDestination(ref, tmpDir) } // DeleteImage deletes the named image from the registry, if supported. func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { return errors.Errorf("Deleting images not implemented for ostree: images") } var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) func encodeOStreeRef(in string) string { var buffer bytes.Buffer for i := range in { sub := in[i : i+1] if ostreeRefRegexp.MatchString(sub) { buffer.WriteString(sub) } else { buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) } } return buffer.String() } // manifestPath returns a path for the manifest within a ostree using our conventions. func (ref ostreeReference) manifestPath() string { return filepath.Join("manifest", "manifest.json") } // signaturePath returns a path for a signature within a ostree using our conventions. func (ref ostreeReference) signaturePath(index int) string { return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) } image-4.0.1/ostree/ostree_transport_test.go000066400000000000000000000272751354546467100211360ustar00rootroot00000000000000// +build containers_image_ostree package ostree import ( "context" "fmt" "io/ioutil" "os" "path/filepath" "strings" "testing" _ "github.com/containers/image/v4/internal/testing/explicitfilepath-tmpdir" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256digest = "@sha256:" + sha256digestHex ) func TestTransportName(t *testing.T) { assert.Equal(t, "ostree", Transport.Name()) } // A helper to replace $TMP in a repo path with a real temporary directory func withTmpDir(repo string, tmpDir string) string { return strings.Replace(repo, "$TMP", tmpDir, -1) } // A common list of repo suffixes to test for the various ImageReference methods. var repoSuffixes = []struct{ repoSuffix, resolvedRepo string }{ {"", "/ostree/repo"}, {"@/ostree/repo", "/ostree/repo"}, // /ostree/repo is accepted even if neither /ostree/repo nor /ostree exists, as a special case. {"@$TMP/at@sign@repo", "$TMP/at@sign@repo"}, // Rejected as ambiguous: /repo:with:colons could either be an (/repo, with:colons) policy configuration identity, or a (/repo:with, colons) policy configuration namespace. {"@$TMP/repo:with:colons", ""}, } // A common list of cases for image name parsing and normalization var imageNameTestcases = []struct{ input, normalized, branchName string }{ {"busybox:notlatest", "busybox:notlatest", "busybox_3Anotlatest"}, // Explicit tag {"busybox", "busybox:latest", "busybox_3Alatest"}, // Default tag {"docker.io/library/busybox:latest", "docker.io/library/busybox:latest", "docker.io_2Flibrary_2Fbusybox_3Alatest"}, // A hierarchical name {"127.0.0.1:5000/busybox:latest", "127.0.0.1:5000/busybox:latest", "127.0.0.1_3A5000_2Fbusybox_3Alatest"}, // Port usage {"busybox" + sha256digest, "busybox" + sha256digest, "busybox_40sha256_3A0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}, {"UPPERCASEISINVALID", "", ""}, // Invalid input {"busybox:invalid+tag", "", ""}, // Invalid tag value {"busybox:tag:with:colons", "", ""}, // Multiple colons - treated as a tag which contains a colon, which is invalid {"", "", ""}, // Empty input is rejected (invalid repository.Named) } func TestTransportParseReference(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreeParseReference") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range imageNameTestcases { for _, suffix := range repoSuffixes { fullInput := c.input + withTmpDir(suffix.repoSuffix, tmpDir) ref, err := Transport.ParseReference(fullInput) if c.normalized == "" || suffix.resolvedRepo == "" { assert.Error(t, err, fullInput) } else { require.NoError(t, err, fullInput) ostreeRef, ok := ref.(ostreeReference) require.True(t, ok, fullInput) assert.Equal(t, c.normalized, ostreeRef.image, fullInput) assert.Equal(t, c.branchName, ostreeRef.branchName, fullInput) assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, fullInput) } } } } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { for _, scope := range []string{ "/etc:docker.io/library/busybox:notlatest", // This also demonstrates that two colons are interpreted as repo:name:tag. "/etc:docker.io/library/busybox", "/etc:docker.io/library", "/etc:docker.io", "/etc:repo", "/this/does/not/exist:notlatest", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } for _, scope := range []string{ "/colon missing as a path-reference delimiter", "relative/path:busybox", "/double//slashes:busybox", "/has/./dot:busybox", "/has/dot/../dot:busybox", "/trailing/slash/:busybox", } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } func TestNewReference(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreeNewReference") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range imageNameTestcases { for _, suffix := range repoSuffixes { if suffix.repoSuffix == "" { continue } caseName := c.input + suffix.repoSuffix ref, err := NewReference(c.input, withTmpDir(strings.TrimPrefix(suffix.repoSuffix, "@"), tmpDir)) if c.normalized == "" || suffix.resolvedRepo == "" { assert.Error(t, err, caseName) } else { require.NoError(t, err, caseName) ostreeRef, ok := ref.(ostreeReference) require.True(t, ok, caseName) assert.Equal(t, c.normalized, ostreeRef.image, caseName) assert.Equal(t, c.branchName, ostreeRef.branchName, caseName) assert.Equal(t, withTmpDir(suffix.resolvedRepo, tmpDir), ostreeRef.repo, caseName) } } } for _, path := range []string{ "/", "/etc", tmpDir, "relativepath", tmpDir + "/thisdoesnotexist", } { _, err := NewReference("busybox", path) require.NoError(t, err, path) } _, err = NewReference("busybox", tmpDir+"/thisparentdoesnotexist/something") assert.Error(t, err) } // A common list of reference formats to test for the various ImageReference methods. var validReferenceTestCases = []struct{ input, stringWithinTransport, policyConfigurationIdentity string }{ {"busybox", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // Everything implied {"busybox:latest@/ostree/repo", "busybox:latest@/ostree/repo", "/ostree/repo:busybox:latest"}, // All implied values explicitly specified {"example.com/ns/foo:bar@$TMP/non-DEFAULT", "example.com/ns/foo:bar@$TMP/non-DEFAULT", "$TMP/non-DEFAULT:example.com/ns/foo:bar"}, // All values explicitly specified, a hierarchical name // A non-canonical path. Testing just one, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. {"busybox@$TMP/.", "busybox:latest@$TMP", "$TMP:busybox:latest"}, // "/" as a corner case {"busybox@/", "busybox:latest@/", "/:busybox:latest"}, } func TestReferenceTransport(t *testing.T) { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) assert.Equal(t, Transport, ref.Transport()) } func TestReferenceStringWithinTransport(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreeStringWithinTransport") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) require.NoError(t, err, c.input) stringRef := ref.StringWithinTransport() assert.Equal(t, withTmpDir(c.stringWithinTransport, tmpDir), stringRef, c.input) // Do one more round to verify that the output can be parsed, to an equal value. ref2, err := Transport.ParseReference(stringRef) require.NoError(t, err, c.input) stringRef2 := ref2.StringWithinTransport() assert.Equal(t, stringRef, stringRef2, c.input) } } func TestReferenceDockerReference(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreeDockerReference") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) require.NoError(t, err, c.input) dockerRef := ref.DockerReference() assert.Nil(t, dockerRef, c.input) } } func TestReferencePolicyConfigurationIdentity(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationIdentity") require.NoError(t, err) defer os.RemoveAll(tmpDir) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(withTmpDir(c.input, tmpDir)) require.NoError(t, err, c.input) assert.Equal(t, withTmpDir(c.policyConfigurationIdentity, tmpDir), ref.PolicyConfigurationIdentity(), c.input) } } func TestReferencePolicyConfigurationNamespaces(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreePolicyConfigurationNamespaces") require.NoError(t, err) defer os.RemoveAll(tmpDir) // Test both that DockerReferenceIdentity returns the expected value (fullName+suffix), // and that DockerReferenceNamespaces starts with the expected value (fullName), i.e. that the two functions are // consistent. for inputName, expectedNS := range map[string][]string{ "example.com/ns/repo": {"example.com/ns/repo", "example.com/ns", "example.com"}, "example.com/repo": {"example.com/repo", "example.com"}, "localhost/ns/repo": {"localhost/ns/repo", "localhost/ns", "localhost"}, "localhost/repo": {"localhost/repo", "localhost"}, "ns/repo": {"ns/repo", "ns"}, "repo": {"repo"}, } { // Test with a known path which should exist. Test just one non-canonical // path, the various other cases are tested in explicitfilepath.ResolvePathToFullyExplicit. for _, repoInput := range []string{tmpDir, tmpDir + "/./."} { fullName := inputName + ":notlatest" ref, err := NewReference(fullName, repoInput) require.NoError(t, err, fullName) identity := ref.PolicyConfigurationIdentity() assert.Equal(t, tmpDir+":"+expectedNS[0]+":notlatest", identity, fullName) ns := ref.PolicyConfigurationNamespaces() require.NotNil(t, ns, fullName) require.Len(t, ns, len(expectedNS), fullName) moreSpecific := identity for i := range expectedNS { assert.Equal(t, tmpDir+":"+expectedNS[i], ns[i], fmt.Sprintf("%s item %d", fullName, i)) assert.True(t, strings.HasPrefix(moreSpecific, ns[i])) moreSpecific = ns[i] } } } } func TestReferenceNewImage(t *testing.T) { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) _, err = ref.NewImage(context.Background(), nil) assert.Error(t, err) } func TestReferenceNewImageSource(t *testing.T) { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) _, err = ref.NewImageSource(context.Background(), nil) require.NoError(t, err) } func TestReferenceNewImageDestination(t *testing.T) { otherTmpDir, err := ioutil.TempDir("", "ostree-transport-test") require.NoError(t, err) defer os.RemoveAll(otherTmpDir) for _, c := range []struct { sys *types.SystemContext tmpDir string }{ {nil, os.TempDir()}, {&types.SystemContext{}, os.TempDir()}, {&types.SystemContext{OSTreeTmpDirPath: otherTmpDir}, otherTmpDir}, } { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) dest, err := ref.NewImageDestination(context.Background(), c.sys) require.NoError(t, err) ostreeDest, ok := dest.(*ostreeImageDestination) require.True(t, ok) assert.Equal(t, c.tmpDir+"/busybox_3Alatest", ostreeDest.tmpDirPath) defer dest.Close() } } func TestReferenceDeleteImage(t *testing.T) { tmpDir, err := ioutil.TempDir("", "ostreeDeleteImage") require.NoError(t, err) defer os.RemoveAll(tmpDir) ref, err := Transport.ParseReference(withTmpDir("busybox@$TMP/this-repo-does-not-exist", tmpDir)) require.NoError(t, err) err = ref.DeleteImage(context.Background(), nil) assert.Error(t, err) } func TestEncodeOSTreeRef(t *testing.T) { // Just a smoke test assert.Equal(t, "busybox_3Alatest", encodeOStreeRef("busybox:latest")) } func TestReferenceManifestPath(t *testing.T) { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) ostreeRef, ok := ref.(ostreeReference) require.True(t, ok) assert.Equal(t, fmt.Sprintf("manifest%cmanifest.json", filepath.Separator), ostreeRef.manifestPath()) } func TestReferenceSignaturePath(t *testing.T) { ref, err := Transport.ParseReference("busybox") require.NoError(t, err) ostreeRef, ok := ref.(ostreeReference) require.True(t, ok) for _, c := range []struct { input int suffix string }{ {0, "-1"}, {42, "-43"}, } { assert.Equal(t, fmt.Sprintf("manifest%csignature%s", filepath.Separator, c.suffix), ostreeRef.signaturePath(c.input), string(c.input)) } } image-4.0.1/pkg/000077500000000000000000000000001354546467100133765ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/000077500000000000000000000000001354546467100161545ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/boltdb/000077500000000000000000000000001354546467100174225ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/boltdb/boltdb.go000066400000000000000000000302311354546467100212160ustar00rootroot00000000000000// Package boltdb implements a BlobInfoCache backed by BoltDB. package boltdb import ( "fmt" "os" "sync" "time" "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v4/types" bolt "github.com/etcd-io/bbolt" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) var ( // NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade // we can simply start over with a different filename; update blobInfoCacheFilename. // FIXME: For CRI-O, does this need to hide information between different users? // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. uncompressedDigestBucket = []byte("uncompressedDigest") // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest // (as a set of key=digest, value="" pairs) digestByUncompressedBucket = []byte("digestByUncompressed") // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). knownLocationsBucket = []byte("knownLocations") ) // Concurrency: // See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely // difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. // pathLock contains a lock for a specific BoltDB database path. type pathLock struct { refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. } var ( // pathLocks contains a lock for each currently open file. // This must be global so that independently created instances of boltDBCache exclude each other. // The map is protected by pathLocksMutex. // FIXME? Should this be based on device:inode numbers instead of paths instead? pathLocks = map[string]*pathLock{} pathLocksMutex = sync.Mutex{} ) // lockPath obtains the pathLock for path. // The caller must call unlockPath eventually. func lockPath(path string) { pl := func() *pathLock { // A scope for defer pathLocksMutex.Lock() defer pathLocksMutex.Unlock() pl, ok := pathLocks[path] if ok { pl.refCount++ } else { pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} pathLocks[path] = pl } return pl }() pl.mutex.Lock() } // unlockPath releases the pathLock for path. func unlockPath(path string) { pathLocksMutex.Lock() defer pathLocksMutex.Unlock() pl, ok := pathLocks[path] if !ok { // Should this return an error instead? BlobInfoCache ultimately ignores errors… panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) } pl.mutex.Unlock() pl.refCount-- if pl.refCount == 0 { delete(pathLocks, path) } } // cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. // // Note that we don’t keep the database open across operations, because that would lock the file and block any other // users; instead, we need to open/close it for every single write or lookup. type cache struct { path string } // New returns a BlobInfoCache implementation which uses a BoltDB file at path. // // Most users should call blobinfocache.DefaultCache instead. func New(path string) types.BlobInfoCache { return &cache{path: path} } // view returns runs the specified fn within a read-only transaction on the database. func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding // a read lock, blocking any future writes. // Hence this preliminary check, which is RACY: Another process could remove the file // between the Lstat call and opening the database. if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { return err } lockPath(bdc.path) defer unlockPath(bdc.path) db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } defer func() { if err := db.Close(); retErr == nil && err != nil { retErr = err } }() return db.View(fn) } // update returns runs the specified fn within a read-write transaction on the database. func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { lockPath(bdc.path) defer unlockPath(bdc.path) db, err := bolt.Open(bdc.path, 0600, nil) if err != nil { return err } defer func() { if err := db.Close(); retErr == nil && err != nil { retErr = err } }() return db.Update(fn) } // uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { if b := tx.Bucket(uncompressedDigestBucket); b != nil { if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { d, err := digest.Parse(string(uncompressedBytes)) if err == nil { return d } // FIXME? Log err (but throttle the log volume on repeated accesses)? } } // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings // when we already record a (compressed, uncompressed) pair. if b := tx.Bucket(digestByUncompressedBucket); b != nil { if b = b.Bucket([]byte(anyDigest.String())); b != nil { c := b.Cursor() if k, _ := c.First(); k != nil { // The bucket is non-empty return anyDigest } } } return "" } // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { var res digest.Digest if err := bdc.view(func(tx *bolt.Tx) error { res = bdc.uncompressedDigest(tx, anyDigest) return nil }); err != nil { // Including os.IsNotExist(err) return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? } return res } // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. // It’s allowed for anyDigest == uncompressed. // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { _ = bdc.update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) if err != nil { return err } key := []byte(anyDigest.String()) if previousBytes := b.Get(key); previousBytes != nil { previous, err := digest.Parse(string(previousBytes)) if err != nil { return err } if previous != uncompressed { logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) } } if err := b.Put(key, []byte(uncompressed.String())); err != nil { return err } b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) if err != nil { return err } b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) if err != nil { return err } if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. return err } return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { _ = bdc.update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) if err != nil { return err } b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) if err != nil { return err } b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) if err != nil { return err } b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) if err != nil { return err } value, err := time.Now().MarshalBinary() if err != nil { return err } if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. return err } return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } // appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates. func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime { b := scopeBucket.Bucket([]byte(digest.String())) if b == nil { return candidates } _ = b.ForEach(func(k, v []byte) error { t := time.Time{} if err := t.UnmarshalBinary(v); err != nil { return err } candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: types.BICReplacementCandidate{ Digest: digest, Location: types.BICLocationReference{Opaque: string(k)}, }, LastSeen: t, }) return nil }) // FIXME? Log error (but throttle the log volume on repeated accesses)? return candidates } // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { res := []prioritize.CandidateWithTime{} var uncompressedDigestValue digest.Digest // = "" if err := bdc.view(func(tx *bolt.Tx) error { scopeBucket := tx.Bucket(knownLocationsBucket) if scopeBucket == nil { return nil } scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) if scopeBucket == nil { return nil } scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) if scopeBucket == nil { return nil } res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest) if canSubstitute { if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { b := tx.Bucket(digestByUncompressedBucket) if b != nil { b = b.Bucket([]byte(uncompressedDigestValue.String())) if b != nil { if err := b.ForEach(func(k, _ []byte) error { d, err := digest.Parse(string(k)) if err != nil { return err } if d != primaryDigest && d != uncompressedDigestValue { res = bdc.appendReplacementCandidates(res, scopeBucket, d) } return nil }); err != nil { return err } } } if uncompressedDigestValue != primaryDigest { res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue) } } } return nil }); err != nil { // Including os.IsNotExist(err) return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)? } return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) } image-4.0.1/pkg/blobinfocache/boltdb/boltdb_test.go000066400000000000000000000020311354546467100222520ustar00rootroot00000000000000package boltdb import ( "io/ioutil" "os" "path/filepath" "testing" "github.com/containers/image/v4/pkg/blobinfocache/internal/test" "github.com/containers/image/v4/types" "github.com/stretchr/testify/require" ) func newTestCache(t *testing.T) (types.BlobInfoCache, func(t *testing.T)) { // We need a separate temporary directory here, because bolt.Open(…, &bolt.Options{Readonly:true}) can't deal with // an existing but empty file, and incorrectly fails without releasing the lock - which in turn causes // any future writes to hang. Creating a temporary directory allows us to use a path to a // non-existent file, thus replicating the expected conditions for creating a new DB. dir, err := ioutil.TempDir("", "boltdb") require.NoError(t, err) return New(filepath.Join(dir, "db")), func(t *testing.T) { err = os.RemoveAll(dir) require.NoError(t, err) } } func TestNew(t *testing.T) { test.GenericCache(t, newTestCache) } // FIXME: Tests for the various corner cases / failure cases of boltDBCache should be added here. image-4.0.1/pkg/blobinfocache/default.go000066400000000000000000000052321354546467100201310ustar00rootroot00000000000000package blobinfocache import ( "fmt" "os" "path/filepath" "strconv" "github.com/containers/image/v4/pkg/blobinfocache/boltdb" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" "github.com/sirupsen/logrus" ) const ( // blobInfoCacheFilename is the file name used for blob info caches. // If the format changes in an incompatible way, increase the version number. blobInfoCacheFilename = "blob-info-cache-v1.boltdb" // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. systemBlobInfoCacheDir = "/var/lib/containers/cache" ) // blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid. // euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory. func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) { if sys != nil && sys.BlobInfoCacheDir != "" { return sys.BlobInfoCacheDir, nil } // FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged // and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe. if euid == 0 { if sys != nil && sys.RootForImplicitAbsolutePaths != "" { return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil } return systemBlobInfoCacheDir, nil } // This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts. dataDir := os.Getenv("XDG_DATA_HOME") if dataDir == "" { home := os.Getenv("HOME") if home == "" { return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty") } dataDir = filepath.Join(home, ".local", "share") } return filepath.Join(dataDir, "containers", "cache"), nil } func getRootlessUID() int { uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") if uidEnv != "" { u, _ := strconv.Atoi(uidEnv) return u } return os.Geteuid() } // DefaultCache returns the default BlobInfoCache implementation appropriate for sys. func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { dir, err := blobInfoCacheDir(sys, getRootlessUID()) if err != nil { logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename) return memory.New() } path := filepath.Join(dir, blobInfoCacheFilename) if err := os.MkdirAll(dir, 0700); err != nil { logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) return memory.New() } logrus.Debugf("Using blob info cache at %s", path) return boltdb.New(path) } image-4.0.1/pkg/blobinfocache/default_test.go000066400000000000000000000117661354546467100212010ustar00rootroot00000000000000package blobinfocache import ( "io/ioutil" "os" "path/filepath" "testing" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/containers/image/v4/pkg/blobinfocache/boltdb" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" ) func TestBlobInfoCacheDir(t *testing.T) { const nondefaultDir = "/this/is/not/the/default/cache/dir" const rootPrefix = "/root/prefix" const homeDir = "/fake/home/directory" const xdgDataHome = "/fake/home/directory/XDG" // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. oldXRD, hasXRD := os.LookupEnv("XDG_RUNTIME_DIR") defer func() { if hasXRD { os.Setenv("XDG_RUNTIME_DIR", oldXRD) } else { os.Unsetenv("XDG_RUNTIME_DIR") } }() // FIXME: This should be a shared helper in internal/testing oldHome, hasHome := os.LookupEnv("HOME") defer func() { if hasHome { os.Setenv("HOME", oldHome) } else { os.Unsetenv("HOME") } }() os.Setenv("HOME", homeDir) os.Setenv("XDG_DATA_HOME", xdgDataHome) // The default paths and explicit overrides for _, c := range []struct { sys *types.SystemContext euid int expected string }{ // The common case {nil, 0, systemBlobInfoCacheDir}, {nil, 1, filepath.Join(xdgDataHome, "containers", "cache")}, // There is a context, but it does not override the path. {&types.SystemContext{}, 0, systemBlobInfoCacheDir}, {&types.SystemContext{}, 1, filepath.Join(xdgDataHome, "containers", "cache")}, // Path overridden {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 0, nondefaultDir}, {&types.SystemContext{BlobInfoCacheDir: nondefaultDir}, 1, nondefaultDir}, // Root overridden {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 0, filepath.Join(rootPrefix, systemBlobInfoCacheDir)}, {&types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, 1, filepath.Join(xdgDataHome, "containers", "cache")}, // Root and path overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, BlobInfoCacheDir: nondefaultDir, }, 0, nondefaultDir, }, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, BlobInfoCacheDir: nondefaultDir, }, 1, nondefaultDir, }, } { path, err := blobInfoCacheDir(c.sys, c.euid) require.NoError(t, err) assert.Equal(t, c.expected, path) } // Paths used by unprivileged users for _, c := range []struct { xdgDH, home, expected string }{ {"", homeDir, filepath.Join(homeDir, ".local", "share", "containers", "cache")}, // HOME only {xdgDataHome, "", filepath.Join(xdgDataHome, "containers", "cache")}, // XDG_DATA_HOME only {xdgDataHome, homeDir, filepath.Join(xdgDataHome, "containers", "cache")}, // both {"", "", ""}, // neither } { if c.xdgDH != "" { os.Setenv("XDG_DATA_HOME", c.xdgDH) } else { os.Unsetenv("XDG_DATA_HOME") } if c.home != "" { os.Setenv("HOME", c.home) } else { os.Unsetenv("HOME") } for _, sys := range []*types.SystemContext{nil, {}} { path, err := blobInfoCacheDir(sys, 1) if c.expected != "" { require.NoError(t, err) assert.Equal(t, c.expected, path) } else { assert.Error(t, err) } } } } func TestDefaultCache(t *testing.T) { tmpDir, err := ioutil.TempDir("", "TestDefaultCache") require.NoError(t, err) //defer os.RemoveAll(tmpDir) // Success normalDir := filepath.Join(tmpDir, "normal") c := DefaultCache(&types.SystemContext{BlobInfoCacheDir: normalDir}) // This is ugly hard-coding internals of boltDBCache: assert.Equal(t, boltdb.New(filepath.Join(normalDir, blobInfoCacheFilename)), c) // Error running blobInfoCacheDir: // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. oldXRD, hasXRD := os.LookupEnv("XDG_RUNTIME_DIR") defer func() { if hasXRD { os.Setenv("XDG_RUNTIME_DIR", oldXRD) } else { os.Unsetenv("XDG_RUNTIME_DIR") } }() // FIXME: This should be a shared helper in internal/testing oldHome, hasHome := os.LookupEnv("HOME") defer func() { if hasHome { os.Setenv("HOME", oldHome) } else { os.Unsetenv("HOME") } }() os.Unsetenv("HOME") os.Unsetenv("XDG_DATA_HOME") c = DefaultCache(nil) assert.IsType(t, memory.New(), c) // Error creating the parent directory: unwritableDir := filepath.Join(tmpDir, "unwritable") err = os.Mkdir(unwritableDir, 700) require.NoError(t, err) defer os.Chmod(unwritableDir, 0700) // To make it possible to remove it again err = os.Chmod(unwritableDir, 0500) require.NoError(t, err) st, _ := os.Stat(unwritableDir) logrus.Errorf("%s: %#v", unwritableDir, st) c = DefaultCache(&types.SystemContext{BlobInfoCacheDir: filepath.Join(unwritableDir, "subdirectory")}) assert.IsType(t, memory.New(), c) } image-4.0.1/pkg/blobinfocache/internal/000077500000000000000000000000001354546467100177705ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/internal/prioritize/000077500000000000000000000000001354546467100221705ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/internal/prioritize/prioritize.go000066400000000000000000000115451354546467100247250ustar00rootroot00000000000000// Package prioritize provides utilities for prioritizing locations in // types.BlobInfoCache.CandidateLocations. package prioritize import ( "sort" "time" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" ) // replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates, // and therefore ultimately by types.BlobInfoCache.CandidateLocations. // This is a heuristic/guess, and could well use a different value. const replacementAttempts = 5 // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { Candidate types.BICReplacementCandidate // The replacement candidate LastSeen time.Time // Time the candidate was last known to exist (either read or written) } // candidateSortState is a local state implementing sort.Interface on candidates to prioritize, // along with the specially-treated digest values for the implementation of sort.Interface.Less type candidateSortState struct { cs []CandidateWithTime // The entries to sort primaryDigest digest.Digest // The digest the user actually asked for uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest } func (css *candidateSortState) Len() int { return len(css.cs) } func (css *candidateSortState) Less(i, j int) bool { xi := css.cs[i] xj := css.cs[j] // primaryDigest entries come first, more recent first. // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) // First, deal with the primaryDigest/uncompressedDigest cases: if xi.Candidate.Digest != xj.Candidate.Digest { // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter if xi.Candidate.Digest == css.primaryDigest { return true } if xj.Candidate.Digest == css.primaryDigest { return false } if css.uncompressedDigest != "" { if xi.Candidate.Digest == css.uncompressedDigest { return false } if xj.Candidate.Digest == css.uncompressedDigest { return true } } } else { // xi.Candidate.Digest == xj.Candidate.Digest // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { return xi.LastSeen.After(xj.LastSeen) } } // Neither of the digests are primaryDigest/uncompressedDigest: if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time return xi.LastSeen.After(xj.LastSeen) } // Fall back to digest, if timestamps end up _exactly_ the same (how?!) return xi.Candidate.Digest < xj.Candidate.Digest } func (css *candidateSortState) Swap(i, j int) { css.cs[i], css.cs[j] = css.cs[j], css.cs[i] } // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the // number of entries to limit, only to make testing simpler. func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate { // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. sort.Sort(&candidateSortState{ cs: cs, primaryDigest: primaryDigest, uncompressedDigest: uncompressedDigest, }) resLength := len(cs) if resLength > maxCandidates { resLength = maxCandidates } res := make([]types.BICReplacementCandidate, resLength) for i := range res { res[i] = cs[i].Candidate } return res } // DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times, // the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest), // and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations. // // WARNING: The array of candidates is destructively modified. (The implementation of this function could of course // make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.) func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate { return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts) } image-4.0.1/pkg/blobinfocache/internal/prioritize/prioritize_test.go000066400000000000000000000175301354546467100257640ustar00rootroot00000000000000package prioritize import ( "fmt" "testing" "time" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" ) const ( digestUnknown = digest.Digest("sha256:1111111111111111111111111111111111111111111111111111111111111111") digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222") digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333") digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444") digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555") digestCompressedPrimary = digest.Digest("sha256:6666666666666666666666666666666666666666666666666666666666666666") ) var ( // cssLiteral contains a non-trivial candidateSortState shared among several tests below. cssLiteral = candidateSortState{ cs: []CandidateWithTime{ {types.BICReplacementCandidate{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}}, time.Unix(1, 0)}, {types.BICReplacementCandidate{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}}, time.Unix(1, 1)}, {types.BICReplacementCandidate{Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}}, time.Unix(1, 1)}, {types.BICReplacementCandidate{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}}, time.Unix(1, 0)}, {types.BICReplacementCandidate{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}}, time.Unix(1, 1)}, {types.BICReplacementCandidate{Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}}, time.Unix(1, 1)}, {types.BICReplacementCandidate{Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}}, time.Unix(2, 0)}, {types.BICReplacementCandidate{Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}}, time.Unix(1, 0)}, }, primaryDigest: digestCompressedPrimary, uncompressedDigest: digestUncompressed, } // cssExpectedReplacementCandidates is the fully-sorted, unlimited, result of prioritizing cssLiteral. cssExpectedReplacementCandidates = []types.BICReplacementCandidate{ {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P2"}}, {Digest: digestCompressedPrimary, Location: types.BICLocationReference{Opaque: "P1"}}, {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B2"}}, {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A2"}}, {Digest: digestCompressedB, Location: types.BICLocationReference{Opaque: "B1"}}, {Digest: digestCompressedA, Location: types.BICLocationReference{Opaque: "A1"}}, {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U2"}}, {Digest: digestUncompressed, Location: types.BICLocationReference{Opaque: "U1"}}, } ) func TestCandidateSortStateLen(t *testing.T) { css := cssLiteral assert.Equal(t, 8, css.Len()) css.cs = []CandidateWithTime{} assert.Equal(t, 0, css.Len()) } func TestCandidateSortStateLess(t *testing.T) { type p struct { d digest.Digest t int64 } // Primary criteria: Also ensure that time does not matter for _, c := range []struct { name string res int d0, d1 digest.Digest }{ {"primary < any", -1, digestCompressedPrimary, digestCompressedA}, {"any < uncompressed", -1, digestCompressedA, digestUncompressed}, {"primary < uncompressed", -1, digestCompressedPrimary, digestUncompressed}, } { for _, tms := range [][2]int64{{1, 2}, {2, 1}, {1, 1}} { caseName := fmt.Sprintf("%s %v", c.name, tms) css := candidateSortState{ cs: []CandidateWithTime{ {types.BICReplacementCandidate{Digest: c.d0, Location: types.BICLocationReference{Opaque: "L0"}}, time.Unix(tms[0], 0)}, {types.BICReplacementCandidate{Digest: c.d1, Location: types.BICLocationReference{Opaque: "L1"}}, time.Unix(tms[1], 0)}, }, primaryDigest: digestCompressedPrimary, uncompressedDigest: digestUncompressed, } assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) if c.d0 != digestUncompressed && c.d1 != digestUncompressed { css.uncompressedDigest = "" assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) css.uncompressedDigest = css.primaryDigest assert.Equal(t, c.res < 0, css.Less(0, 1), caseName) assert.Equal(t, c.res > 0, css.Less(1, 0), caseName) } } } // Ordering within the three primary groups for _, c := range []struct { name string res int p0, p1 p }{ {"primary: t=2 < t=1", -1, p{digestCompressedPrimary, 2}, p{digestCompressedPrimary, 1}}, {"primary: t=1 == t=1", 0, p{digestCompressedPrimary, 1}, p{digestCompressedPrimary, 1}}, {"uncompressed: t=2 < t=1", -1, p{digestUncompressed, 2}, p{digestUncompressed, 1}}, {"uncompressed: t=1 == t=1", 0, p{digestUncompressed, 1}, p{digestUncompressed, 1}}, {"any: t=2 < t=1, [d=A vs. d=B lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedB, 1}}, {"any: t=2 < t=1, [d=B vs. d=A lower-priority]", -1, p{digestCompressedB, 2}, p{digestCompressedA, 1}}, {"any: t=2 < t=1, [d=A vs. d=A lower-priority]", -1, p{digestCompressedA, 2}, p{digestCompressedA, 1}}, {"any: t=1 == t=1, d=A < d=B", -1, p{digestCompressedA, 1}, p{digestCompressedB, 1}}, {"any: t=1 == t=1, d=A == d=A", 0, p{digestCompressedA, 1}, p{digestCompressedA, 1}}, } { css := candidateSortState{ cs: []CandidateWithTime{ {types.BICReplacementCandidate{Digest: c.p0.d, Location: types.BICLocationReference{Opaque: "L0"}}, time.Unix(c.p0.t, 0)}, {types.BICReplacementCandidate{Digest: c.p1.d, Location: types.BICLocationReference{Opaque: "L1"}}, time.Unix(c.p1.t, 0)}, }, primaryDigest: digestCompressedPrimary, uncompressedDigest: digestUncompressed, } assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) if c.p0.d != digestUncompressed && c.p1.d != digestUncompressed { css.uncompressedDigest = "" assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) css.uncompressedDigest = css.primaryDigest assert.Equal(t, c.res < 0, css.Less(0, 1), c.name) assert.Equal(t, c.res > 0, css.Less(1, 0), c.name) } } } func TestCandidateSortStateSwap(t *testing.T) { freshCSS := func() candidateSortState { // Return a deep copy of cssLiteral which is safe to modify. res := cssLiteral res.cs = append([]CandidateWithTime{}, cssLiteral.cs...) return res } css := freshCSS() css.Swap(0, 1) assert.Equal(t, cssLiteral.cs[1], css.cs[0]) assert.Equal(t, cssLiteral.cs[0], css.cs[1]) assert.Equal(t, cssLiteral.cs[2], css.cs[2]) css = freshCSS() css.Swap(1, 1) assert.Equal(t, cssLiteral, css) } func TestDestructivelyPrioritizeReplacementCandidatesWithMax(t *testing.T) { for _, max := range []int{0, 1, replacementAttempts, 100} { // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess res := destructivelyPrioritizeReplacementCandidatesWithMax(append([]CandidateWithTime{}, cssLiteral.cs...), digestCompressedPrimary, digestUncompressed, max) if max > len(cssExpectedReplacementCandidates) { max = len(cssExpectedReplacementCandidates) } assert.Equal(t, cssExpectedReplacementCandidates[:max], res) } } func TestDestructivelyPrioritizeReplacementCandidates(t *testing.T) { // Just a smoke test; we mostly rely on test coverage in TestCandidateSortStateLess res := DestructivelyPrioritizeReplacementCandidates(append([]CandidateWithTime{}, cssLiteral.cs...), digestCompressedPrimary, digestUncompressed) assert.Equal(t, cssExpectedReplacementCandidates[:replacementAttempts], res) } image-4.0.1/pkg/blobinfocache/internal/test/000077500000000000000000000000001354546467100207475ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/internal/test/test.go000066400000000000000000000200371354546467100222570ustar00rootroot00000000000000// Package test provides generic BlobInfoCache test helpers. package test import ( "testing" "github.com/containers/image/v4/internal/testing/mocks" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" ) const ( digestUnknown = digest.Digest("sha256:1111111111111111111111111111111111111111111111111111111111111111") digestUncompressed = digest.Digest("sha256:2222222222222222222222222222222222222222222222222222222222222222") digestCompressedA = digest.Digest("sha256:3333333333333333333333333333333333333333333333333333333333333333") digestCompressedB = digest.Digest("sha256:4444444444444444444444444444444444444444444444444444444444444444") digestCompressedUnrelated = digest.Digest("sha256:5555555555555555555555555555555555555555555555555555555555555555") digestCompressedPrimary = digest.Digest("sha256:6666666666666666666666666666666666666666666666666666666666666666") ) // GenericCache runs an implementation-independent set of tests, given a // newTestCache, which can be called repeatedly and always returns a (cache, cleanup callback) pair func GenericCache(t *testing.T, newTestCache func(t *testing.T) (types.BlobInfoCache, func(t *testing.T))) { for _, s := range []struct { name string fn func(t *testing.T, cache types.BlobInfoCache) }{ {"UncompressedDigest", testGenericUncompressedDigest}, {"RecordDigestUncompressedPair", testGenericRecordDigestUncompressedPair}, {"RecordKnownLocations", testGenericRecordKnownLocations}, {"CandidateLocations", testGenericCandidateLocations}, } { t.Run(s.name, func(t *testing.T) { cache, cleanup := newTestCache(t) defer cleanup(t) s.fn(t, cache) }) } } func testGenericUncompressedDigest(t *testing.T, cache types.BlobInfoCache) { // Nothing is known. assert.Equal(t, digest.Digest(""), cache.UncompressedDigest(digestUnknown)) cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) // Known compressed→uncompressed mapping assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA)) assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB)) // This implicitly marks digestUncompressed as uncompressed. assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed)) // Known uncompressed→self mapping cache.RecordDigestUncompressedPair(digestCompressedUnrelated, digestCompressedUnrelated) assert.Equal(t, digestCompressedUnrelated, cache.UncompressedDigest(digestCompressedUnrelated)) } func testGenericRecordDigestUncompressedPair(t *testing.T, cache types.BlobInfoCache) { for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. // Known compressed→uncompressed mapping cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedA)) // Two mappings to the same uncompressed digest cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestCompressedB)) // Mapping an uncompresesd digest to self cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed) assert.Equal(t, digestUncompressed, cache.UncompressedDigest(digestUncompressed)) } } func testGenericRecordKnownLocations(t *testing.T, cache types.BlobInfoCache) { transport := mocks.NameImageTransport("==BlobInfocache transport mock") for i := 0; i < 2; i++ { // Record the same data twice to ensure redundant writes don’t break things. for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other. scope := types.BICTransportScope{Opaque: scopeName} for _, digest := range []digest.Digest{digestCompressedA, digestCompressedB} { // Two different digests should not affect each other either. lr1 := types.BICLocationReference{Opaque: scopeName + "1"} lr2 := types.BICLocationReference{Opaque: scopeName + "2"} cache.RecordKnownLocation(transport, scope, digest, lr2) cache.RecordKnownLocation(transport, scope, digest, lr1) assert.Equal(t, []types.BICReplacementCandidate{ {Digest: digest, Location: lr1}, {Digest: digest, Location: lr2}, }, cache.CandidateLocations(transport, scope, digest, false)) } } } } // candidate is a shorthand for types.BICReplacementCandiddate type candidate struct { d digest.Digest lr string } func assertCandidatesMatch(t *testing.T, scopeName string, expected []candidate, actual []types.BICReplacementCandidate) { e := make([]types.BICReplacementCandidate, len(expected)) for i, ev := range expected { e[i] = types.BICReplacementCandidate{Digest: ev.d, Location: types.BICLocationReference{Opaque: scopeName + ev.lr}} } assert.Equal(t, e, actual) } func testGenericCandidateLocations(t *testing.T, cache types.BlobInfoCache) { transport := mocks.NameImageTransport("==BlobInfocache transport mock") cache.RecordDigestUncompressedPair(digestCompressedA, digestUncompressed) cache.RecordDigestUncompressedPair(digestCompressedB, digestUncompressed) cache.RecordDigestUncompressedPair(digestUncompressed, digestUncompressed) digestNameSet := []struct { n string d digest.Digest }{ {"U", digestUncompressed}, {"A", digestCompressedA}, {"B", digestCompressedB}, {"CU", digestCompressedUnrelated}, } for _, scopeName := range []string{"A", "B"} { // Run the test in two different scopes to verify they don't affect each other. scope := types.BICTransportScope{Opaque: scopeName} // Nothing is known. assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, false)) assert.Equal(t, []types.BICReplacementCandidate{}, cache.CandidateLocations(transport, scope, digestUnknown, true)) // Record "2" entries before "1" entries; then results should sort "1" (more recent) before "2" (older) for _, suffix := range []string{"2", "1"} { for _, e := range digestNameSet { cache.RecordKnownLocation(transport, scope, e.d, types.BICLocationReference{Opaque: scopeName + e.n + suffix}) } } // No substitutions allowed: for _, e := range digestNameSet { assertCandidatesMatch(t, scopeName, []candidate{ {d: e.d, lr: e.n + "1"}, {d: e.d, lr: e.n + "2"}, }, cache.CandidateLocations(transport, scope, e.d, false)) } // With substitutions: The original digest is always preferred, then other compressed, then the uncompressed one. assertCandidatesMatch(t, scopeName, []candidate{ {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"}, {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"}, {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"}, }, cache.CandidateLocations(transport, scope, digestCompressedA, true)) assertCandidatesMatch(t, scopeName, []candidate{ {d: digestCompressedB, lr: "B1"}, {d: digestCompressedB, lr: "B2"}, {d: digestCompressedA, lr: "A1"}, {d: digestCompressedA, lr: "A2"}, {d: digestUncompressed, lr: "U1"}, // Beyond the replacementAttempts limit: {d: digestUncompressed, lr: "U2"}, }, cache.CandidateLocations(transport, scope, digestCompressedB, true)) assertCandidatesMatch(t, scopeName, []candidate{ {d: digestUncompressed, lr: "U1"}, {d: digestUncompressed, lr: "U2"}, // "1" entries were added after "2", and A/Bs are sorted in the reverse of digestNameSet order {d: digestCompressedB, lr: "B1"}, {d: digestCompressedA, lr: "A1"}, {d: digestCompressedB, lr: "B2"}, // Beyond the replacementAttempts limit: {d: digestCompressedA, lr: "A2"}, }, cache.CandidateLocations(transport, scope, digestUncompressed, true)) // Locations are known, but no relationships assertCandidatesMatch(t, scopeName, []candidate{ {d: digestCompressedUnrelated, lr: "CU1"}, {d: digestCompressedUnrelated, lr: "CU2"}, }, cache.CandidateLocations(transport, scope, digestCompressedUnrelated, true)) } } image-4.0.1/pkg/blobinfocache/memory/000077500000000000000000000000001354546467100174645ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/memory/memory.go000066400000000000000000000153671354546467100213370ustar00rootroot00000000000000// Package memory implements an in-memory BlobInfoCache. package memory import ( "sync" "time" "github.com/containers/image/v4/pkg/blobinfocache/internal/prioritize" "github.com/containers/image/v4/types" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) // locationKey only exists to make lookup in knownLocations easier. type locationKey struct { transport string scope types.BICTransportScope blobDigest digest.Digest } // cache implements an in-memory-only BlobInfoCache type cache struct { mutex sync.Mutex // The following fields can only be accessed with mutex held. uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference } // New returns a BlobInfoCache implementation which is in-memory only. // // This is primarily intended for tests, but also used as a fallback // if blobinfocache.DefaultCache can’t determine, or set up, the // location for a persistent cache. Most users should use // blobinfocache.DefaultCache. instead of calling this directly. // Manual users of types.{ImageSource,ImageDestination} might also use // this instead of a persistent cache. func New() types.BlobInfoCache { return &cache{ uncompressedDigests: map[digest.Digest]digest.Digest{}, digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{}, knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, } } // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). func (mem *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { mem.mutex.Lock() defer mem.mutex.Unlock() return mem.uncompressedDigestLocked(anyDigest) } // uncompressedDigestLocked implements types.BlobInfoCache.UncompressedDigest, but must be called only with mem.mutex held. func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Digest { if d, ok := mem.uncompressedDigests[anyDigest]; ok { return d } // Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest. // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings // when we already record a (compressed, uncompressed) pair. if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 { return anyDigest } return "" } // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. // It’s allowed for anyDigest == uncompressed. // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { mem.mutex.Lock() defer mem.mutex.Unlock() if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed { logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) } mem.uncompressedDigests[anyDigest] = uncompressed anyDigestSet, ok := mem.digestsByUncompressed[uncompressed] if !ok { anyDigestSet = map[digest.Digest]struct{}{} mem.digestsByUncompressed[uncompressed] = anyDigestSet } anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again. } // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { mem.mutex.Lock() defer mem.mutex.Unlock() key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest} locationScope, ok := mem.knownLocations[key] if !ok { locationScope = map[types.BICLocationReference]time.Time{} mem.knownLocations[key] = locationScope } locationScope[location] = time.Now() // Possibly overwriting an older entry. } // appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime { locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present for l, t := range locations { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: types.BICReplacementCandidate{ Digest: digest, Location: l, }, LastSeen: t, }) } return candidates } // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map for d := range otherDigests { if d != primaryDigest && d != uncompressedDigest { res = mem.appendReplacementCandidates(res, transport, scope, d) } } if uncompressedDigest != primaryDigest { res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest) } } } return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) } image-4.0.1/pkg/blobinfocache/memory/memory_test.go000066400000000000000000000005111354546467100223570ustar00rootroot00000000000000package memory import ( "testing" "github.com/containers/image/v4/pkg/blobinfocache/internal/test" "github.com/containers/image/v4/types" ) func newTestCache(t *testing.T) (types.BlobInfoCache, func(t *testing.T)) { return New(), func(t *testing.T) {} } func TestNew(t *testing.T) { test.GenericCache(t, newTestCache) } image-4.0.1/pkg/blobinfocache/none/000077500000000000000000000000001354546467100171135ustar00rootroot00000000000000image-4.0.1/pkg/blobinfocache/none/none.go000066400000000000000000000050521354546467100204030ustar00rootroot00000000000000// Package none implements a dummy BlobInfoCache which records no data. package none import ( "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" ) // noCache implements a dummy BlobInfoCache which records no data. type noCache struct { } // NoCache implements BlobInfoCache by not recording any data. // // This exists primarily for implementations of configGetter for // Manifest.Inspect, because configs only have one representation. // Any use of BlobInfoCache with blobs should usually use at least a // short-lived cache, ideally blobinfocache.DefaultCache. var NoCache types.BlobInfoCache = noCache{} // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { return "" } // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. // It’s allowed for anyDigest == uncompressed. // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { } // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { } // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { return nil } image-4.0.1/pkg/compression/000077500000000000000000000000001354546467100157375ustar00rootroot00000000000000image-4.0.1/pkg/compression/compression.go000066400000000000000000000124221354546467100206300ustar00rootroot00000000000000package compression import ( "bytes" "compress/bzip2" "fmt" "io" "io/ioutil" "github.com/containers/image/v4/pkg/compression/internal" "github.com/containers/image/v4/pkg/compression/types" "github.com/klauspost/pgzip" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" ) // Algorithm is a compression algorithm that can be used for CompressStream. type Algorithm = types.Algorithm var ( // Gzip compression. Gzip = internal.NewAlgorithm("gzip", []byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor) // Bzip2 compression. Bzip2 = internal.NewAlgorithm("bzip2", []byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor) // Xz compression. Xz = internal.NewAlgorithm("Xz", []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor) // Zstd compression. Zstd = internal.NewAlgorithm("zstd", []byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor) compressionAlgorithms = map[string]Algorithm{ Gzip.Name(): Gzip, Bzip2.Name(): Bzip2, Xz.Name(): Xz, Zstd.Name(): Zstd, } ) // AlgorithmByName returns the compressor by its name func AlgorithmByName(name string) (Algorithm, error) { algorithm, ok := compressionAlgorithms[name] if ok { return algorithm, nil } return Algorithm{}, fmt.Errorf("cannot find compressor for %q", name) } // DecompressorFunc returns the decompressed stream, given a compressed stream. // The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). type DecompressorFunc = internal.DecompressorFunc // GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. func GzipDecompressor(r io.Reader) (io.ReadCloser, error) { return pgzip.NewReader(r) } // Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) { return ioutil.NopCloser(bzip2.NewReader(r)), nil } // XzDecompressor is a DecompressorFunc for the xz compression algorithm. func XzDecompressor(r io.Reader) (io.ReadCloser, error) { r, err := xz.NewReader(r) if err != nil { return nil, err } return ioutil.NopCloser(r), nil } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. func gzipCompressor(r io.Writer, level *int) (io.WriteCloser, error) { if level != nil { return pgzip.NewWriterLevel(r, *level) } return pgzip.NewWriter(r), nil } // bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. func bzip2Compressor(r io.Writer, level *int) (io.WriteCloser, error) { return nil, fmt.Errorf("bzip2 compression not supported") } // xzCompressor is a CompressorFunc for the xz compression algorithm. func xzCompressor(r io.Writer, level *int) (io.WriteCloser, error) { return xz.NewWriter(r) } // CompressStream returns the compressor by its name func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { return internal.AlgorithmCompressor(algo)(dest, level) } // DetectCompressionFormat returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. // Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. func DetectCompressionFormat(input io.Reader) (Algorithm, DecompressorFunc, io.Reader, error) { buffer := [8]byte{} n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. return Algorithm{}, nil, nil, err } var retAlgo Algorithm var decompressor DecompressorFunc for _, algo := range compressionAlgorithms { if bytes.HasPrefix(buffer[:n], internal.AlgorithmPrefix(algo)) { logrus.Debugf("Detected compression format %s", algo.Name()) retAlgo = algo decompressor = internal.AlgorithmDecompressor(algo) break } } if decompressor == nil { logrus.Debugf("No compression detected") } return retAlgo, decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil } // DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. // Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { _, d, r, e := DetectCompressionFormat(input) return d, r, e } // AutoDecompress takes a stream and returns an uncompressed version of the // same stream. // The caller must call Close() on the returned stream (even if the input does not need, // or does not even support, closing!). func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) { decompressor, stream, err := DetectCompression(stream) if err != nil { return nil, false, errors.Wrapf(err, "Error detecting compression") } var res io.ReadCloser if decompressor != nil { res, err = decompressor(stream) if err != nil { return nil, false, errors.Wrapf(err, "Error initializing decompression") } } else { res = ioutil.NopCloser(stream) } return res, decompressor != nil, nil } image-4.0.1/pkg/compression/compression_test.go000066400000000000000000000067551354546467100217030ustar00rootroot00000000000000package compression import ( "bytes" "io" "io/ioutil" "os" "testing" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDetectCompression(t *testing.T) { cases := []string{ "fixtures/Hello.uncompressed", "fixtures/Hello.gz", "fixtures/Hello.bz2", "fixtures/Hello.xz", "fixtures/Hello.zst", } // The original stream is preserved. for _, c := range cases { originalContents, err := ioutil.ReadFile(c) require.NoError(t, err, c) stream, err := os.Open(c) require.NoError(t, err, c) defer stream.Close() _, updatedStream, err := DetectCompression(stream) require.NoError(t, err, c) updatedContents, err := ioutil.ReadAll(updatedStream) require.NoError(t, err, c) assert.Equal(t, originalContents, updatedContents, c) } // The correct decompressor is chosen, and the result is as expected. for _, c := range cases { stream, err := os.Open(c) require.NoError(t, err, c) defer stream.Close() decompressor, updatedStream, err := DetectCompression(stream) require.NoError(t, err, c) var uncompressedStream io.Reader if decompressor == nil { uncompressedStream = updatedStream } else { s, err := decompressor(updatedStream) require.NoError(t, err) defer s.Close() uncompressedStream = s } uncompressedContents, err := ioutil.ReadAll(uncompressedStream) require.NoError(t, err, c) assert.Equal(t, []byte("Hello"), uncompressedContents, c) } // Empty input is handled reasonably. decompressor, updatedStream, err := DetectCompression(bytes.NewReader([]byte{})) require.NoError(t, err) assert.Nil(t, decompressor) updatedContents, err := ioutil.ReadAll(updatedStream) require.NoError(t, err) assert.Equal(t, []byte{}, updatedContents) // Error reading input reader, writer := io.Pipe() defer reader.Close() writer.CloseWithError(errors.New("Expected error reading input in DetectCompression")) _, _, err = DetectCompression(reader) assert.Error(t, err) } func TestAutoDecompress(t *testing.T) { cases := []struct { filename string isCompressed bool }{ {"fixtures/Hello.uncompressed", false}, {"fixtures/Hello.gz", true}, {"fixtures/Hello.bz2", true}, {"fixtures/Hello.xz", true}, } // The correct decompressor is chosen, and the result is as expected. for _, c := range cases { stream, err := os.Open(c.filename) require.NoError(t, err, c.filename) defer stream.Close() uncompressedStream, isCompressed, err := AutoDecompress(stream) require.NoError(t, err, c.filename) defer uncompressedStream.Close() assert.Equal(t, c.isCompressed, isCompressed) uncompressedContents, err := ioutil.ReadAll(uncompressedStream) require.NoError(t, err, c.filename) assert.Equal(t, []byte("Hello"), uncompressedContents, c.filename) } // Empty input is handled reasonably. uncompressedStream, isCompressed, err := AutoDecompress(bytes.NewReader([]byte{})) require.NoError(t, err) assert.False(t, isCompressed) uncompressedContents, err := ioutil.ReadAll(uncompressedStream) require.NoError(t, err) assert.Equal(t, []byte{}, uncompressedContents) // Error initializing a decompressor (for a detected format) uncompressedStream, isCompressed, err = AutoDecompress(bytes.NewReader([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00})) assert.Error(t, err) // Error reading input reader, writer := io.Pipe() defer reader.Close() writer.CloseWithError(errors.New("Expected error reading input in AutoDecompress")) _, _, err = AutoDecompress(reader) assert.Error(t, err) } image-4.0.1/pkg/compression/fixtures/000077500000000000000000000000001354546467100176105ustar00rootroot00000000000000image-4.0.1/pkg/compression/fixtures/Hello.bz2000066400000000000000000000000531354546467100212700ustar00rootroot00000000000000BZh91AY&SYTd@!h3M3"(H *2Iimage-4.0.1/pkg/compression/fixtures/Hello.gz000066400000000000000000000000311354546467100212070ustar00rootroot00000000000000pWHimage-4.0.1/pkg/compression/fixtures/Hello.uncompressed000066400000000000000000000000051354546467100232770ustar00rootroot00000000000000Helloimage-4.0.1/pkg/compression/fixtures/Hello.xz000066400000000000000000000001001354546467100212250ustar00rootroot000000000000007zXZִF!t/HelloȬ{;\Q-}YZimage-4.0.1/pkg/compression/fixtures/Hello.zst000066400000000000000000000000221354546467100214070ustar00rootroot00000000000000(/X)HelloD}uimage-4.0.1/pkg/compression/internal/000077500000000000000000000000001354546467100175535ustar00rootroot00000000000000image-4.0.1/pkg/compression/internal/types.go000066400000000000000000000041641354546467100212530ustar00rootroot00000000000000package internal import "io" // CompressorFunc writes the compressed stream to the given writer using the specified compression level. // The caller must call Close() on the stream (even if the input stream does not need closing!). type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) // DecompressorFunc returns the decompressed stream, given a compressed stream. // The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). type DecompressorFunc func(io.Reader) (io.ReadCloser, error) // Algorithm is a compression algorithm that can be used for CompressStream. type Algorithm struct { name string prefix []byte decompressor DecompressorFunc compressor CompressorFunc } // NewAlgorithm creates an Algorithm instance. // This function exists so that Algorithm instances can only be created by code that // is allowed to import this internal subpackage. func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { return Algorithm{ name: name, prefix: prefix, decompressor: decompressor, compressor: compressor, } } // Name returns the name for the compression algorithm. func (c Algorithm) Name() string { return c.name } // AlgorithmCompressor returns the compressor field of algo. // This is a function instead of a public method so that it is only callable from by code // that is allowed to import this internal subpackage. func AlgorithmCompressor(algo Algorithm) CompressorFunc { return algo.compressor } // AlgorithmDecompressor returns the decompressor field of algo. // This is a function instead of a public method so that it is only callable from by code // that is allowed to import this internal subpackage. func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { return algo.decompressor } // AlgorithmPrefix returns the prefix field of algo. // This is a function instead of a public method so that it is only callable from by code // that is allowed to import this internal subpackage. func AlgorithmPrefix(algo Algorithm) []byte { return algo.prefix } image-4.0.1/pkg/compression/types/000077500000000000000000000000001354546467100171035ustar00rootroot00000000000000image-4.0.1/pkg/compression/types/types.go000066400000000000000000000007651354546467100206060ustar00rootroot00000000000000package types import ( "github.com/containers/image/v4/pkg/compression/internal" ) // DecompressorFunc returns the decompressed stream, given a compressed stream. // The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). type DecompressorFunc = internal.DecompressorFunc // Algorithm is a compression algorithm provided and supported by pkg/compression. // It can’t be supplied from the outside. type Algorithm = internal.Algorithm image-4.0.1/pkg/compression/zstd.go000066400000000000000000000026121354546467100172530ustar00rootroot00000000000000package compression import ( "io" "github.com/klauspost/compress/zstd" ) type wrapperZstdDecoder struct { decoder *zstd.Decoder } func (w *wrapperZstdDecoder) Close() error { w.decoder.Close() return nil } func (w *wrapperZstdDecoder) DecodeAll(input, dst []byte) ([]byte, error) { return w.decoder.DecodeAll(input, dst) } func (w *wrapperZstdDecoder) Read(p []byte) (int, error) { return w.decoder.Read(p) } func (w *wrapperZstdDecoder) Reset(r io.Reader) error { return w.decoder.Reset(r) } func (w *wrapperZstdDecoder) WriteTo(wr io.Writer) (int64, error) { return w.decoder.WriteTo(wr) } func zstdReader(buf io.Reader) (io.ReadCloser, error) { decoder, err := zstd.NewReader(buf) return &wrapperZstdDecoder{decoder: decoder}, err } func zstdWriter(dest io.Writer) (io.WriteCloser, error) { return zstd.NewWriter(dest) } func zstdWriterWithLevel(dest io.Writer, level int) (io.WriteCloser, error) { el := zstd.EncoderLevelFromZstd(level) return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) } // zstdCompressor is a CompressorFunc for the zstd compression algorithm. func zstdCompressor(r io.Writer, level *int) (io.WriteCloser, error) { if level == nil { return zstdWriter(r) } return zstdWriterWithLevel(r, *level) } // ZstdDecompressor is a DecompressorFunc for the zstd compression algorithm. func ZstdDecompressor(r io.Reader) (io.ReadCloser, error) { return zstdReader(r) } image-4.0.1/pkg/docker/000077500000000000000000000000001354546467100146455ustar00rootroot00000000000000image-4.0.1/pkg/docker/config/000077500000000000000000000000001354546467100161125ustar00rootroot00000000000000image-4.0.1/pkg/docker/config/config.go000066400000000000000000000276211354546467100177160ustar00rootroot00000000000000package config import ( "encoding/base64" "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "strings" "github.com/containers/image/v4/types" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" "github.com/docker/docker/pkg/homedir" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type dockerAuthConfig struct { Auth string `json:"auth,omitempty"` } type dockerConfigFile struct { AuthConfigs map[string]dockerAuthConfig `json:"auths"` CredHelpers map[string]string `json:"credHelpers,omitempty"` } var ( defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") dockerHomePath = filepath.FromSlash(".docker/config.json") dockerLegacyHomePath = ".dockercfg" enableKeyring = false // ErrNotLoggedIn is returned for users not logged into a registry // that they are trying to logout of ErrNotLoggedIn = errors.New("not logged in") // ErrNotSupported is returned for unsupported methods ErrNotSupported = errors.New("not supported") ) // SetAuthentication stores the username and password in the auth.json file func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { if ch, exists := auths.CredHelpers[registry]; exists { return false, setAuthToCredHelper(ch, registry, username, password) } // Set the credentials to kernel keyring if enableKeyring is true. // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. // Hence, we want to fall-back to using the authfile in case the keyring failed. // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. if enableKeyring { err := setAuthToKernelKeyring(registry, username, password) if err == nil { logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) return false, nil } logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) } creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) newCreds := dockerAuthConfig{Auth: creds} auths.AuthConfigs[registry] = newCreds return true, nil }) } // GetAuthentication returns the registry credentials stored in // either auth.json file or .docker/config.json // If an entry is not found empty strings are returned for the username and password func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { if sys != nil && sys.DockerAuthConfig != nil { logrus.Debug("Returning credentials from DockerAuthConfig") return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil } if enableKeyring { username, password, err := getAuthFromKernelKeyring(registry) if err == nil { logrus.Debug("returning credentials from kernel keyring") return username, password, nil } } dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath) var paths []string pathToAuth, err := getPathToAuth(sys) if err == nil { paths = append(paths, pathToAuth) } else { // Error means that the path set for XDG_RUNTIME_DIR does not exist // but we don't want to completely fail in the case that the user is pulling a public image // Logging the error as a warning instead and moving on to pulling the image logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) } paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath) for _, path := range paths { legacyFormat := path == dockerLegacyPath username, password, err := findAuthentication(registry, path, legacyFormat) if err != nil { logrus.Debugf("Credentials not found") return "", "", err } if username != "" && password != "" { logrus.Debugf("Returning credentials from %s", path) return username, password, nil } } logrus.Debugf("Credentials not found") return "", "", nil } // RemoveAuthentication deletes the credentials stored in auth.json func RemoveAuthentication(sys *types.SystemContext, registry string) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { // First try cred helpers. if ch, exists := auths.CredHelpers[registry]; exists { return false, deleteAuthFromCredHelper(ch, registry) } // Next if keyring is enabled try kernel keyring if enableKeyring { err := deleteAuthFromKernelKeyring(registry) if err == nil { logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) return false, nil } logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") } if _, ok := auths.AuthConfigs[registry]; ok { delete(auths.AuthConfigs, registry) } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { delete(auths.AuthConfigs, normalizeRegistry(registry)) } else { return false, ErrNotLoggedIn } return true, nil }) } // RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring func RemoveAllAuthentication(sys *types.SystemContext) error { return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { if enableKeyring { err := removeAllAuthFromKernelKeyring() if err == nil { logrus.Debugf("removing all credentials from kernel keyring") return false, nil } logrus.Debugf("error removing credentials from kernel keyring") } auths.CredHelpers = make(map[string]string) auths.AuthConfigs = make(map[string]dockerAuthConfig) return true, nil }) } // getPath gets the path of the auth.json file // The path can be overriden by the user if the overwrite-path flag is set // If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers // Otherwise, the auth.json file is stored in /run/containers/UID func getPathToAuth(sys *types.SystemContext) (string, error) { if sys != nil { if sys.AuthFilePath != "" { return sys.AuthFilePath, nil } if sys.RootForImplicitAbsolutePaths != "" { return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil } } runtimeDir := os.Getenv("XDG_RUNTIME_DIR") if runtimeDir != "" { // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. _, err := os.Stat(runtimeDir) if os.IsNotExist(err) { // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil } return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil } // readJSONFile unmarshals the authentications stored in the auth.json file and returns it // or returns an empty dockerConfigFile data structure if auth.json does not exist // if the file exists and is empty, readJSONFile returns an error func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { var auths dockerConfigFile raw, err := ioutil.ReadFile(path) if err != nil { if os.IsNotExist(err) { auths.AuthConfigs = map[string]dockerAuthConfig{} return auths, nil } return dockerConfigFile{}, err } if legacyFormat { if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) } return auths, nil } if err = json.Unmarshal(raw, &auths); err != nil { return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) } return auths, nil } // modifyJSON writes to auth.json if the dockerConfigFile has been updated func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { path, err := getPathToAuth(sys) if err != nil { return err } dir := filepath.Dir(path) if _, err := os.Stat(dir); os.IsNotExist(err) { if err = os.MkdirAll(dir, 0700); err != nil { return errors.Wrapf(err, "error creating directory %q", dir) } } auths, err := readJSONFile(path, false) if err != nil { return errors.Wrapf(err, "error reading JSON file %q", path) } updated, err := editor(&auths) if err != nil { return errors.Wrapf(err, "error updating %q", path) } if updated { newData, err := json.MarshalIndent(auths, "", "\t") if err != nil { return errors.Wrapf(err, "error marshaling JSON %q", path) } if err = ioutil.WriteFile(path, newData, 0755); err != nil { return errors.Wrapf(err, "error writing to file %q", path) } } return nil } func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds, err := helperclient.Get(p, registry) if err != nil { return "", "", err } return creds.Username, creds.Secret, nil } func setAuthToCredHelper(credHelper, registry, username, password string) error { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) creds := &credentials.Credentials{ ServerURL: registry, Username: username, Secret: password, } return helperclient.Store(p, creds) } func deleteAuthFromCredHelper(credHelper, registry string) error { helperName := fmt.Sprintf("docker-credential-%s", credHelper) p := helperclient.NewShellProgramFunc(helperName) return helperclient.Erase(p, registry) } // findAuthentication looks for auth of registry in path func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { auths, err := readJSONFile(path, legacyFormat) if err != nil { return "", "", errors.Wrapf(err, "error reading JSON file %q", path) } // First try cred helpers. They should always be normalized. if ch, exists := auths.CredHelpers[registry]; exists { return getAuthFromCredHelper(ch, registry) } // I'm feeling lucky if val, exists := auths.AuthConfigs[registry]; exists { return decodeDockerAuth(val.Auth) } // bad luck; let's normalize the entries first registry = normalizeRegistry(registry) normalizedAuths := map[string]dockerAuthConfig{} for k, v := range auths.AuthConfigs { normalizedAuths[normalizeRegistry(k)] = v } if val, exists := normalizedAuths[registry]; exists { return decodeDockerAuth(val.Auth) } return "", "", nil } func decodeDockerAuth(s string) (string, string, error) { decoded, err := base64.StdEncoding.DecodeString(s) if err != nil { return "", "", err } parts := strings.SplitN(string(decoded), ":", 2) if len(parts) != 2 { // if it's invalid just skip, as docker does return "", "", nil } user := parts[0] password := strings.Trim(parts[1], "\x00") return user, password, nil } // convertToHostname converts a registry url which has http|https prepended // to just an hostname. // Copied from github.com/docker/docker/registry/auth.go func convertToHostname(url string) string { stripped := url if strings.HasPrefix(url, "http://") { stripped = strings.TrimPrefix(url, "http://") } else if strings.HasPrefix(url, "https://") { stripped = strings.TrimPrefix(url, "https://") } nameParts := strings.SplitN(stripped, "/", 2) return nameParts[0] } func normalizeRegistry(registry string) string { normalized := convertToHostname(registry) switch normalized { case "registry-1.docker.io", "docker.io": return "index.docker.io" } return normalized } image-4.0.1/pkg/docker/config/config_linux.go000066400000000000000000000054001354546467100211240ustar00rootroot00000000000000package config import ( "fmt" "strings" "github.com/containers/image/v4/internal/pkg/keyctl" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const keyDescribePrefix = "container-registry-login:" func getAuthFromKernelKeyring(registry string) (string, string, error) { userkeyring, err := keyctl.UserKeyring() if err != nil { return "", "", err } key, err := userkeyring.Search(genDescription(registry)) if err != nil { return "", "", err } authData, err := key.Get() if err != nil { return "", "", err } parts := strings.SplitN(string(authData), "\x00", 2) if len(parts) != 2 { return "", "", nil } return parts[0], parts[1], nil } func deleteAuthFromKernelKeyring(registry string) error { userkeyring, err := keyctl.UserKeyring() if err != nil { return err } key, err := userkeyring.Search(genDescription(registry)) if err != nil { return err } return key.Unlink() } func removeAllAuthFromKernelKeyring() error { keys, err := keyctl.ReadUserKeyring() if err != nil { return err } userkeyring, err := keyctl.UserKeyring() if err != nil { return err } for _, k := range keys { keyAttr, err := k.Describe() if err != nil { return err } // split string "type;uid;gid;perm;description" keyAttrs := strings.SplitN(keyAttr, ";", 5) if len(keyAttrs) < 5 { return errors.Errorf("Key attributes of %d are not avaliable", k.ID()) } keyDescribe := keyAttrs[4] if strings.HasPrefix(keyDescribe, keyDescribePrefix) { err := keyctl.Unlink(userkeyring, k) if err != nil { return errors.Wrapf(err, "error unlinking key %d", k.ID()) } logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) } } return nil } func setAuthToKernelKeyring(registry, username, password string) error { keyring, err := keyctl.SessionKeyring() if err != nil { return err } id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) if err != nil { return err } // sets all permission(view,read,write,search,link,set attribute) for current user // it enables the user to search the key after it linked to user keyring and unlinked from session keyring err = keyctl.SetPerm(id, keyctl.PermUserAll) if err != nil { return err } // link the key to userKeyring userKeyring, err := keyctl.UserKeyring() if err != nil { return errors.Wrapf(err, "error getting user keyring") } err = keyctl.Link(userKeyring, id) if err != nil { return errors.Wrapf(err, "error linking the key to user keyring") } // unlink the key from session keyring err = keyctl.Unlink(keyring, id) if err != nil { return errors.Wrapf(err, "error unlinking the key from session keyring") } return nil } func genDescription(registry string) string { return fmt.Sprintf("%s%s", keyDescribePrefix, registry) } image-4.0.1/pkg/docker/config/config_test.go000066400000000000000000000265321354546467100207550ustar00rootroot00000000000000package config import ( "encoding/json" "fmt" "io/ioutil" "os" "path/filepath" "testing" "github.com/containers/image/v4/types" "github.com/containers/storage/pkg/homedir" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetPathToAuth(t *testing.T) { uid := fmt.Sprintf("%d", os.Getuid()) tmpDir, err := ioutil.TempDir("", "TestGetPathToAuth") require.NoError(t, err) // Environment is per-process, so this looks very unsafe; actually it seems fine because tests are not // run in parallel unless they opt in by calling t.Parallel(). So don’t do that. oldXRD, hasXRD := os.LookupEnv("XDG_RUNTIME_DIR") defer func() { if hasXRD { os.Setenv("XDG_RUNTIME_DIR", oldXRD) } else { os.Unsetenv("XDG_RUNTIME_DIR") } }() for _, c := range []struct { sys *types.SystemContext xrd string expected string }{ // Default paths {&types.SystemContext{}, "", "/run/containers/" + uid + "/auth.json"}, {nil, "", "/run/containers/" + uid + "/auth.json"}, // SystemContext overrides {&types.SystemContext{AuthFilePath: "/absolute/path"}, "", "/absolute/path"}, {&types.SystemContext{RootForImplicitAbsolutePaths: "/prefix"}, "", "/prefix/run/containers/" + uid + "/auth.json"}, // XDG_RUNTIME_DIR defined {nil, tmpDir, tmpDir + "/containers/auth.json"}, {nil, tmpDir + "/thisdoesnotexist", ""}, } { if c.xrd != "" { os.Setenv("XDG_RUNTIME_DIR", c.xrd) } else { os.Unsetenv("XDG_RUNTIME_DIR") } res, err := getPathToAuth(c.sys) if c.expected == "" { assert.Error(t, err) } else { require.NoError(t, err) assert.Equal(t, c.expected, res) } } } func TestGetAuth(t *testing.T) { origXDG := os.Getenv("XDG_RUNTIME_DIR") tmpDir1, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary XDG_RUNTIME_DIR directory: %q", tmpDir1) // override XDG_RUNTIME_DIR os.Setenv("XDG_RUNTIME_DIR", tmpDir1) defer func() { err := os.RemoveAll(tmpDir1) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir1, err) } os.Setenv("XDG_RUNTIME_DIR", origXDG) }() origHomeDir := homedir.Get() tmpDir2, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary home directory: %q", tmpDir2) //override homedir os.Setenv(homedir.Key(), tmpDir2) defer func() { err := os.RemoveAll(tmpDir2) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir2, err) } os.Setenv(homedir.Key(), origHomeDir) }() configDir1 := filepath.Join(tmpDir1, "containers") if err := os.MkdirAll(configDir1, 0700); err != nil { t.Fatal(err) } configDir2 := filepath.Join(tmpDir2, ".docker") if err := os.MkdirAll(configDir2, 0700); err != nil { t.Fatal(err) } configPaths := [2]string{filepath.Join(configDir1, "auth.json"), filepath.Join(configDir2, "config.json")} for _, configPath := range configPaths { for _, tc := range []struct { name string hostname string path string expectedUsername string expectedPassword string expectedError error sys *types.SystemContext }{ { name: "no auth config", hostname: "index.docker.io", }, { name: "empty hostname", path: filepath.Join("testdata", "example.json"), }, { name: "match one", hostname: "example.org", path: filepath.Join("testdata", "example.json"), expectedUsername: "example", expectedPassword: "org", }, { name: "match none", hostname: "registry.example.org", path: filepath.Join("testdata", "example.json"), }, { name: "match docker.io", hostname: "docker.io", path: filepath.Join("testdata", "full.json"), expectedUsername: "docker", expectedPassword: "io", }, { name: "match docker.io normalized", hostname: "docker.io", path: filepath.Join("testdata", "abnormal.json"), expectedUsername: "index", expectedPassword: "docker.io", }, { name: "normalize registry", hostname: "https://example.org/v1", path: filepath.Join("testdata", "full.json"), expectedUsername: "example", expectedPassword: "org", }, { name: "match localhost", hostname: "http://localhost", path: filepath.Join("testdata", "full.json"), expectedUsername: "local", expectedPassword: "host", }, { name: "match ip", hostname: "10.10.30.45:5000", path: filepath.Join("testdata", "full.json"), expectedUsername: "10.10", expectedPassword: "30.45-5000", }, { name: "match port", hostname: "https://localhost:5000", path: filepath.Join("testdata", "abnormal.json"), expectedUsername: "local", expectedPassword: "host-5000", }, { name: "use system context", hostname: "example.org", path: filepath.Join("testdata", "example.json"), expectedUsername: "foo", expectedPassword: "bar", sys: &types.SystemContext{ DockerAuthConfig: &types.DockerAuthConfig{ Username: "foo", Password: "bar", }, }, }, } { if tc.path == "" { if err := os.RemoveAll(configPath); err != nil { t.Fatal(err) } } t.Run(tc.name, func(t *testing.T) { if tc.path != "" { contents, err := ioutil.ReadFile(tc.path) if err != nil { t.Fatal(err) } if err := ioutil.WriteFile(configPath, contents, 0640); err != nil { t.Fatal(err) } } var sys *types.SystemContext if tc.sys != nil { sys = tc.sys } username, password, err := GetAuthentication(sys, tc.hostname) assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedUsername, username) assert.Equal(t, tc.expectedPassword, password) }) } } } func TestGetAuthFromLegacyFile(t *testing.T) { origHomeDir := homedir.Get() tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary home directory: %q", tmpDir) // override homedir os.Setenv(homedir.Key(), tmpDir) defer func() { err := os.RemoveAll(tmpDir) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) } os.Setenv(homedir.Key(), origHomeDir) }() configPath := filepath.Join(tmpDir, ".dockercfg") contents, err := ioutil.ReadFile(filepath.Join("testdata", "legacy.json")) if err != nil { t.Fatal(err) } for _, tc := range []struct { name string hostname string expectedUsername string expectedPassword string expectedError error }{ { name: "normalize registry", hostname: "https://docker.io/v1", expectedUsername: "docker", expectedPassword: "io-legacy", }, { name: "ignore schema and path", hostname: "http://index.docker.io/v1", expectedUsername: "docker", expectedPassword: "io-legacy", }, } { t.Run(tc.name, func(t *testing.T) { if err := ioutil.WriteFile(configPath, contents, 0640); err != nil { t.Fatal(err) } username, password, err := GetAuthentication(nil, tc.hostname) assert.Equal(t, tc.expectedError, err) assert.Equal(t, tc.expectedUsername, username) assert.Equal(t, tc.expectedPassword, password) }) } } func TestGetAuthPreferNewConfig(t *testing.T) { origHomeDir := homedir.Get() tmpDir, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary home directory: %q", tmpDir) // override homedir os.Setenv(homedir.Key(), tmpDir) defer func() { err := os.RemoveAll(tmpDir) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir, err) } os.Setenv(homedir.Key(), origHomeDir) }() configDir := filepath.Join(tmpDir, ".docker") if err := os.Mkdir(configDir, 0750); err != nil { t.Fatal(err) } for _, data := range []struct { source string target string }{ { source: filepath.Join("testdata", "full.json"), target: filepath.Join(configDir, "config.json"), }, { source: filepath.Join("testdata", "legacy.json"), target: filepath.Join(tmpDir, ".dockercfg"), }, } { contents, err := ioutil.ReadFile(data.source) if err != nil { t.Fatal(err) } if err := ioutil.WriteFile(data.target, contents, 0640); err != nil { t.Fatal(err) } } username, password, err := GetAuthentication(nil, "docker.io") assert.Equal(t, nil, err) assert.Equal(t, "docker", username) assert.Equal(t, "io", password) } func TestGetAuthFailsOnBadInput(t *testing.T) { origXDG := os.Getenv("XDG_RUNTIME_DIR") tmpDir1, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary XDG_RUNTIME_DIR directory: %q", tmpDir1) // override homedir os.Setenv("XDG_RUNTIME_DIR", tmpDir1) defer func() { err := os.RemoveAll(tmpDir1) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir1, err) } os.Setenv("XDG_RUNTIME_DIR", origXDG) }() origHomeDir := homedir.Get() tmpDir2, err := ioutil.TempDir("", "test_docker_client_get_auth") if err != nil { t.Fatal(err) } t.Logf("using temporary home directory: %q", tmpDir2) // override homedir os.Setenv(homedir.Key(), tmpDir2) defer func() { err := os.RemoveAll(tmpDir2) if err != nil { t.Logf("failed to cleanup temporary home directory %q: %v", tmpDir2, err) } os.Setenv(homedir.Key(), origHomeDir) }() configDir := filepath.Join(tmpDir1, "containers") if err := os.Mkdir(configDir, 0750); err != nil { t.Fatal(err) } configPath := filepath.Join(configDir, "auth.json") // no config file present username, password, err := GetAuthentication(nil, "index.docker.io") if err != nil { t.Fatalf("got unexpected error: %#+v", err) } if len(username) > 0 || len(password) > 0 { t.Fatalf("got unexpected not empty username/password: %q/%q", username, password) } if err := ioutil.WriteFile(configPath, []byte("Json rocks! Unless it doesn't."), 0640); err != nil { t.Fatalf("failed to write file %q: %v", configPath, err) } username, password, err = GetAuthentication(nil, "index.docker.io") if err == nil { t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password) } if _, ok := errors.Cause(err).(*json.SyntaxError); !ok { t.Fatalf("expected JSON syntax error, not: %#+v", err) } // remove the invalid config file os.RemoveAll(configPath) // no config file present username, password, err = GetAuthentication(nil, "index.docker.io") if err != nil { t.Fatalf("got unexpected error: %#+v", err) } if len(username) > 0 || len(password) > 0 { t.Fatalf("got unexpected not empty username/password: %q/%q", username, password) } configPath = filepath.Join(tmpDir2, ".dockercfg") if err := ioutil.WriteFile(configPath, []byte("I'm certainly not a json string."), 0640); err != nil { t.Fatalf("failed to write file %q: %v", configPath, err) } username, password, err = GetAuthentication(nil, "index.docker.io") if err == nil { t.Fatalf("got unexpected non-error: username=%q, password=%q", username, password) } if _, ok := errors.Cause(err).(*json.SyntaxError); !ok { t.Fatalf("expected JSON syntax error, not: %#+v", err) } } image-4.0.1/pkg/docker/config/config_unsupported.go000066400000000000000000000006451354546467100223630ustar00rootroot00000000000000// +build !linux // +build !386 !amd64 package config func getAuthFromKernelKeyring(registry string) (string, string, error) { return "", "", ErrNotSupported } func deleteAuthFromKernelKeyring(registry string) error { return ErrNotSupported } func setAuthToKernelKeyring(registry, username, password string) error { return ErrNotSupported } func removeAllAuthFromKernelKeyring() error { return ErrNotSupported } image-4.0.1/pkg/docker/config/testdata/000077500000000000000000000000001354546467100177235ustar00rootroot00000000000000image-4.0.1/pkg/docker/config/testdata/abnormal.json000066400000000000000000000006771354546467100224230ustar00rootroot00000000000000{ "auths": { "example.org": { "auth": "ZXhhbXBsZTpvcmc=" }, "https://index.docker.io/v1": { "auth": "aW5kZXg6ZG9ja2VyLmlv" }, "https://127.0.0.1:5000": { "auth": "MTI3LjA6MC4xLTUwMDA=" }, "http://localhost": { "auth": "bG9jYWw6aG9zdA==" }, "https://localhost:5001": { "auth": "bG9jYWw6aG9zdC01MDAx" }, "localhost:5000": { "auth": "bG9jYWw6aG9zdC01MDAw" } } } image-4.0.1/pkg/docker/config/testdata/example.json000066400000000000000000000001211354546467100222430ustar00rootroot00000000000000{ "auths": { "example.org": { "auth": "ZXhhbXBsZTpvcmc=" } } } image-4.0.1/pkg/docker/config/testdata/full.json000066400000000000000000000007231354546467100215620ustar00rootroot00000000000000{ "auths": { "example.org": { "auth": "ZXhhbXBsZTpvcmc=" }, "index.docker.io": { "auth": "aW5kZXg6ZG9ja2VyLmlv" }, "docker.io": { "auth": "ZG9ja2VyOmlv" }, "localhost": { "auth": "bG9jYWw6aG9zdA==" }, "localhost:5000": { "auth": "bG9jYWw6aG9zdC01MDAw" }, "10.10.30.45": { "auth": "MTAuMTA6MzAuNDU=" }, "10.10.30.45:5000": { "auth": "MTAuMTA6MzAuNDUtNTAwMA==" } } } image-4.0.1/pkg/docker/config/testdata/legacy.json000066400000000000000000000002131354546467100220560ustar00rootroot00000000000000{ "docker.io/v2": { "auth": "ZG9ja2VyOmlvLWxlZ2FjeQ==" }, "https://localhost/v1": { "auth": "bG9jYWw6aG9zdC1sZWdhY3k=" } } image-4.0.1/pkg/strslice/000077500000000000000000000000001354546467100152265ustar00rootroot00000000000000image-4.0.1/pkg/strslice/README.md000066400000000000000000000002171354546467100165050ustar00rootroot00000000000000This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). image-4.0.1/pkg/strslice/strslice.go000066400000000000000000000014041354546467100174040ustar00rootroot00000000000000package strslice import "encoding/json" // StrSlice represents a string or an array of strings. // We need to override the json decoder to accept both options. type StrSlice []string // UnmarshalJSON decodes the byte slice whether it's a string or an array of // strings. This method is needed to implement json.Unmarshaler. func (e *StrSlice) UnmarshalJSON(b []byte) error { if len(b) == 0 { // With no input, we preserve the existing value by returning nil and // leaving the target alone. This allows defining default values for // the type. return nil } p := make([]string, 0, 1) if err := json.Unmarshal(b, &p); err != nil { var s string if err := json.Unmarshal(b, &s); err != nil { return err } p = append(p, s) } *e = p return nil } image-4.0.1/pkg/strslice/strslice_test.go000066400000000000000000000036421354546467100204510ustar00rootroot00000000000000package strslice import ( "encoding/json" "reflect" "testing" ) func TestStrSliceMarshalJSON(t *testing.T) { for _, testcase := range []struct { input StrSlice expected string }{ // MADNESS(stevvooe): No clue why nil would be "" but empty would be // "null". Had to make a change here that may affect compatibility. {input: nil, expected: "null"}, {StrSlice{}, "[]"}, {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, } { data, err := json.Marshal(testcase.input) if err != nil { t.Fatal(err) } if string(data) != testcase.expected { t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) } } } func TestStrSliceUnmarshalJSON(t *testing.T) { parts := map[string][]string{ "": {"default", "values"}, "[]": {}, `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, } for json, expectedParts := range parts { strs := StrSlice{"default", "values"} if err := strs.UnmarshalJSON([]byte(json)); err != nil { t.Fatal(err) } actualParts := []string(strs) if !reflect.DeepEqual(actualParts, expectedParts) { t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) } } } func TestStrSliceUnmarshalString(t *testing.T) { var e StrSlice echo, err := json.Marshal("echo") if err != nil { t.Fatal(err) } if err := json.Unmarshal(echo, &e); err != nil { t.Fatal(err) } if len(e) != 1 { t.Fatalf("expected 1 element after unmarshal: %q", e) } if e[0] != "echo" { t.Fatalf("expected `echo`, got: %q", e[0]) } } func TestStrSliceUnmarshalSlice(t *testing.T) { var e StrSlice echo, err := json.Marshal([]string{"echo"}) if err != nil { t.Fatal(err) } if err := json.Unmarshal(echo, &e); err != nil { t.Fatal(err) } if len(e) != 1 { t.Fatalf("expected 1 element after unmarshal: %q", e) } if e[0] != "echo" { t.Fatalf("expected `echo`, got: %q", e[0]) } } image-4.0.1/pkg/sysregistriesv2/000077500000000000000000000000001354546467100165655ustar00rootroot00000000000000image-4.0.1/pkg/sysregistriesv2/system_registries_v2.go000066400000000000000000000371241354546467100233160ustar00rootroot00000000000000package sysregistriesv2 import ( "fmt" "io/ioutil" "os" "path/filepath" "regexp" "strings" "sync" "github.com/BurntSushi/toml" "github.com/containers/image/v4/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/containers/image/v4/docker/reference" ) // systemRegistriesConfPath is the path to the system-wide registry // configuration file and is used to add/subtract potential registries for // obtaining images. You can override this at build time with // -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path' var systemRegistriesConfPath = builtinRegistriesConfPath // builtinRegistriesConfPath is the path to the registry configuration file. // DO NOT change this, instead see systemRegistriesConfPath above. const builtinRegistriesConfPath = "/etc/containers/registries.conf" // Endpoint describes a remote location of a registry. type Endpoint struct { // The endpoint's remote location. Location string `toml:"location,omitempty"` // If true, certs verification will be skipped and HTTP (non-TLS) // connections will be allowed. Insecure bool `toml:"insecure,omitempty"` } // rewriteReference will substitute the provided reference `prefix` to the // endpoints `location` from the `ref` and creates a new named reference from it. // The function errors if the newly created reference is not parsable. func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { refString := ref.String() if !refMatchesPrefix(refString, prefix) { return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) } newNamedRef := strings.Replace(refString, prefix, e.Location, 1) newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { return nil, errors.Wrapf(err, "error rewriting reference") } logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String()) return newParsedRef, nil } // Registry represents a registry. type Registry struct { // Prefix is used for matching images, and to translate one namespace to // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` // and we pull from "example.com/bar/myimage:latest", the image will // effectively be pulled from "example.com/foo/bar/myimage:latest". // If no Prefix is specified, it defaults to the specified location. Prefix string `toml:"prefix"` // A registry is an Endpoint too Endpoint // The registry's mirrors. Mirrors []Endpoint `toml:"mirror,omitempty"` // If true, pulling from the registry will be blocked. Blocked bool `toml:"blocked,omitempty"` // If true, mirrors will only be used for digest pulls. Pulling images by // tag can potentially yield different images, depending on which endpoint // we pull from. Forcing digest-pulls for mirrors avoids that issue. MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` } // PullSource consists of an Endpoint and a Reference. Note that the reference is // rewritten according to the registries prefix and the Endpoint's location. type PullSource struct { Endpoint Endpoint Reference reference.Named } // PullSourcesFromReference returns a slice of PullSource's based on the passed // reference. func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { var endpoints []Endpoint if r.MirrorByDigestOnly { // Only use mirrors when the reference is a digest one. if _, isDigested := ref.(reference.Canonical); isDigested { endpoints = append(r.Mirrors, r.Endpoint) } else { endpoints = []Endpoint{r.Endpoint} } } else { endpoints = append(r.Mirrors, r.Endpoint) } sources := []PullSource{} for _, ep := range endpoints { rewritten, err := ep.rewriteReference(ref, r.Prefix) if err != nil { return nil, err } sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) } return sources, nil } // V1TOMLregistries is for backwards compatibility to sysregistries v1 type V1TOMLregistries struct { Registries []string `toml:"registries"` } // V1TOMLConfig is for backwards compatibility to sysregistries v1 type V1TOMLConfig struct { Search V1TOMLregistries `toml:"search"` Insecure V1TOMLregistries `toml:"insecure"` Block V1TOMLregistries `toml:"block"` } // V1RegistriesConf is the sysregistries v1 configuration format. type V1RegistriesConf struct { V1TOMLConfig `toml:"registries"` } // Nonempty returns true if config contains at least one configuration entry. func (config *V1RegistriesConf) Nonempty() bool { return (len(config.V1TOMLConfig.Search.Registries) != 0 || len(config.V1TOMLConfig.Insecure.Registries) != 0 || len(config.V1TOMLConfig.Block.Registries) != 0) } // V2RegistriesConf is the sysregistries v2 configuration format. type V2RegistriesConf struct { Registries []Registry `toml:"registry"` // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` } // Nonempty returns true if config contains at least one configuration entry. func (config *V2RegistriesConf) Nonempty() bool { return (len(config.Registries) != 0 || len(config.UnqualifiedSearchRegistries) != 0) } // tomlConfig is the data type used to unmarshal the toml config. type tomlConfig struct { V2RegistriesConf V1RegistriesConf // for backwards compatibility with sysregistries v1 } // InvalidRegistries represents an invalid registry configurations. An example // is when "registry.com" is defined multiple times in the configuration but // with conflicting security settings. type InvalidRegistries struct { s string } // Error returns the error string. func (e *InvalidRegistries) Error() string { return e.s } // parseLocation parses the input string, performs some sanity checks and returns // the sanitized input string. An error is returned if the input string is // empty or if contains an "http{s,}://" prefix. func parseLocation(input string) (string, error) { trimmed := strings.TrimRight(input, "/") if trimmed == "" { return "", &InvalidRegistries{s: "invalid location: cannot be empty"} } if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) return "", &InvalidRegistries{s: msg} } return trimmed, nil } // ConvertToV2 returns a v2 config corresponding to a v1 one. func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { regMap := make(map[string]*Registry) // The order of the registries is not really important, but make it deterministic (the same for the same config file) // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. registryOrder := []string{} getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object var err error location, err = parseLocation(location) if err != nil { return nil, err } reg, exists := regMap[location] if !exists { reg = &Registry{ Endpoint: Endpoint{Location: location}, Mirrors: []Endpoint{}, Prefix: location, } regMap[location] = reg registryOrder = append(registryOrder, location) } return reg, nil } for _, blocked := range config.V1TOMLConfig.Block.Registries { reg, err := getRegistry(blocked) if err != nil { return nil, err } reg.Blocked = true } for _, insecure := range config.V1TOMLConfig.Insecure.Registries { reg, err := getRegistry(insecure) if err != nil { return nil, err } reg.Insecure = true } res := &V2RegistriesConf{ UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, } for _, location := range registryOrder { reg := regMap[location] res.Registries = append(res.Registries, *reg) } return res, nil } // anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. var anchoredDomainRegexp = regexp.MustCompile("^" + reference.DomainRegexp.String() + "$") // postProcess checks the consistency of all the configuration, looks for conflicts, // and normalizes the configuration (e.g., sets the Prefix to Location if not set). func (config *V2RegistriesConf) postProcess() error { regMap := make(map[string][]*Registry) for i := range config.Registries { reg := &config.Registries[i] // make sure Location and Prefix are valid var err error reg.Location, err = parseLocation(reg.Location) if err != nil { return err } if reg.Prefix == "" { reg.Prefix = reg.Location } else { reg.Prefix, err = parseLocation(reg.Prefix) if err != nil { return err } } // make sure mirrors are valid for _, mir := range reg.Mirrors { mir.Location, err = parseLocation(mir.Location) if err != nil { return err } } regMap[reg.Location] = append(regMap[reg.Location], reg) } // Given a registry can be mentioned multiple times (e.g., to have // multiple prefixes backed by different mirrors), we need to make sure // there are no conflicts among them. // // Note: we need to iterate over the registries array to ensure a // deterministic behavior which is not guaranteed by maps. for _, reg := range config.Registries { others, _ := regMap[reg.Location] for _, other := range others { if reg.Insecure != other.Insecure { msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) return &InvalidRegistries{s: msg} } if reg.Blocked != other.Blocked { msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) return &InvalidRegistries{s: msg} } } } for i := range config.UnqualifiedSearchRegistries { registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) if err != nil { return err } if !anchoredDomainRegexp.MatchString(registry) { return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} } config.UnqualifiedSearchRegistries[i] = registry } return nil } // ConfigPath returns the path to the system-wide registry configuration file. func ConfigPath(ctx *types.SystemContext) string { confPath := systemRegistriesConfPath if ctx != nil { if ctx.SystemRegistriesConfPath != "" { confPath = ctx.SystemRegistriesConfPath } else if ctx.RootForImplicitAbsolutePaths != "" { confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) } } return confPath } // configMutex is used to synchronize concurrent accesses to configCache. var configMutex = sync.Mutex{} // configCache caches already loaded configs with config paths as keys and is // used to avoid redudantly parsing configs. Concurrent accesses to the cache // are synchronized via configMutex. var configCache = make(map[string]*V2RegistriesConf) // InvalidateCache invalidates the registry cache. This function is meant to be // used for long-running processes that need to reload potential changes made to // the cached registry config files. func InvalidateCache() { configMutex.Lock() defer configMutex.Unlock() configCache = make(map[string]*V2RegistriesConf) } // getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { configPath := ConfigPath(ctx) configMutex.Lock() // if the config has already been loaded, return the cached registries if config, inCache := configCache[configPath]; inCache { configMutex.Unlock() return config, nil } configMutex.Unlock() return TryUpdatingCache(ctx) } // TryUpdatingCache loads the configuration from the provided `SystemContext` // without using the internal cache. On success, the loaded configuration will // be added into the internal registry cache. func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { configPath := ConfigPath(ctx) configMutex.Lock() defer configMutex.Unlock() // load the config config, err := loadRegistryConf(configPath) if err != nil { // Return an empty []Registry if we use the default config, // which implies that the config path of the SystemContext // isn't set. Note: if ctx.SystemRegistriesConfPath points to // the default config, we will still return an error. if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { return &V2RegistriesConf{Registries: []Registry{}}, nil } return nil, err } v2Config := &config.V2RegistriesConf // backwards compatibility for v1 configs if config.V1RegistriesConf.Nonempty() { if config.V2RegistriesConf.Nonempty() { return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"} } v2, err := config.V1RegistriesConf.ConvertToV2() if err != nil { return nil, err } v2Config = v2 } if err := v2Config.postProcess(); err != nil { return nil, err } // populate the cache configCache[configPath] = v2Config return v2Config, nil } // GetRegistries loads and returns the registries specified in the config. // Note the parsed content of registry config files is cached. For reloading, // use `InvalidateCache` and re-call `GetRegistries`. func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { config, err := getConfig(ctx) if err != nil { return nil, err } return config.Registries, nil } // UnqualifiedSearchRegistries returns a list of host[:port] entries to try // for unqualified image search, in the returned order) func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { config, err := getConfig(ctx) if err != nil { return nil, err } return config.UnqualifiedSearchRegistries, nil } // refMatchesPrefix returns true iff ref, // which is a registry, repository namespace, repository or image reference (as formatted by // reference.Domain(), reference.Named.Name() or reference.Reference.String() // — note that this requires the name to start with an explicit hostname!), // matches a Registry.Prefix value. // (This is split from the caller primarily to make testing easier.) func refMatchesPrefix(ref, prefix string) bool { switch { case len(ref) < len(prefix): return false case len(ref) == len(prefix): return ref == prefix case len(ref) > len(prefix): if !strings.HasPrefix(ref, prefix) { return false } c := ref[len(prefix)] // This allows "example.com:5000" to match "example.com", // which is unintended; that will get fixed eventually, DON'T RELY // ON THE CURRENT BEHAVIOR. return c == ':' || c == '/' || c == '@' default: panic("Internal error: impossible comparison outcome") } } // FindRegistry returns the Registry with the longest prefix for ref, // which is a registry, repository namespace repository or image reference (as formatted by // reference.Domain(), reference.Named.Name() or reference.Reference.String() // — note that this requires the name to start with an explicit hostname!). // If no Registry prefixes the image, nil is returned. func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { config, err := getConfig(ctx) if err != nil { return nil, err } reg := Registry{} prefixLen := 0 for _, r := range config.Registries { if refMatchesPrefix(ref, r.Prefix) { length := len(r.Prefix) if length > prefixLen { reg = r prefixLen = length } } } if prefixLen != 0 { return ®, nil } return nil, nil } // Loads the registry configuration file from the filesystem and then unmarshals // it. Returns the unmarshalled object. func loadRegistryConf(configPath string) (*tomlConfig, error) { config := &tomlConfig{} configBytes, err := ioutil.ReadFile(configPath) if err != nil { return nil, err } err = toml.Unmarshal(configBytes, &config) return config, err } image-4.0.1/pkg/sysregistriesv2/system_registries_v2_test.go000066400000000000000000000430201354546467100243450ustar00rootroot00000000000000package sysregistriesv2 import ( "fmt" "io/ioutil" "os" "path/filepath" "testing" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/containers/image/v4/docker/reference" ) func TestParseLocation(t *testing.T) { var err error var location string // invalid locations _, err = parseLocation("https://example.com") assert.NotNil(t, err) assert.Contains(t, err.Error(), "invalid location 'https://example.com': URI schemes are not supported") _, err = parseLocation("john.doe@example.com") assert.Nil(t, err) // valid locations location, err = parseLocation("example.com") assert.Nil(t, err) assert.Equal(t, "example.com", location) location, err = parseLocation("example.com/") // trailing slashes are stripped assert.Nil(t, err) assert.Equal(t, "example.com", location) location, err = parseLocation("example.com//////") // trailing slahes are stripped assert.Nil(t, err) assert.Equal(t, "example.com", location) location, err = parseLocation("example.com:5000/with/path") assert.Nil(t, err) assert.Equal(t, "example.com:5000/with/path", location) } func TestEmptyConfig(t *testing.T) { registries, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: "testdata/empty.conf"}) assert.Nil(t, err) assert.Equal(t, 0, len(registries)) // When SystemRegistriesConfPath is not explicitly specified (but RootForImplicitAbsolutePaths might be), missing file is treated // the same as an empty one, without reporting an error. nonexistentRoot, err := filepath.Abs("testdata/this-does-not-exist") require.NoError(t, err) registries, err = GetRegistries(&types.SystemContext{RootForImplicitAbsolutePaths: nonexistentRoot}) assert.Nil(t, err) assert.Equal(t, 0, len(registries)) } func TestMirrors(t *testing.T) { sys := &types.SystemContext{SystemRegistriesConfPath: "testdata/mirrors.conf"} registries, err := GetRegistries(sys) assert.Nil(t, err) assert.Equal(t, 2, len(registries)) reg, err := FindRegistry(sys, "registry.com/image:tag") assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, 2, len(reg.Mirrors)) assert.Equal(t, "mirror-1.registry.com", reg.Mirrors[0].Location) assert.False(t, reg.Mirrors[0].Insecure) assert.Equal(t, "mirror-2.registry.com", reg.Mirrors[1].Location) assert.True(t, reg.Mirrors[1].Insecure) } func TestRefMatchesPrefix(t *testing.T) { for _, c := range []struct { ref, prefix string expected bool }{ // Prefix is a reference.Domain() value {"docker.io", "docker.io", true}, {"docker.io", "example.com", false}, {"example.com:5000", "example.com:5000", true}, {"example.com:50000", "example.com:5000", false}, {"example.com:5000", "example.com", true}, // FIXME FIXME This is unintended and undocumented, don't rely on this behavior {"example.com/foo", "example.com", true}, {"example.com/foo/bar", "example.com", true}, {"example.com/foo/bar:baz", "example.com", true}, {"example.com/foo/bar@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com", true}, // Prefix is a reference.Named.Name() value or a repo namespace {"docker.io", "docker.io/library", false}, {"docker.io/library", "docker.io/library", true}, {"example.com/library", "docker.io/library", false}, {"docker.io/libraryy", "docker.io/library", false}, {"docker.io/library/busybox", "docker.io/library", true}, {"docker.io", "docker.io/library/busybox", false}, {"docker.io/library/busybox", "docker.io/library/busybox", true}, {"example.com/library/busybox", "docker.io/library/busybox", false}, {"docker.io/library/busybox2", "docker.io/library/busybox", false}, // Prefix is a single image {"example.com", "example.com/foo:bar", false}, {"example.com/foo", "example.com/foo:bar", false}, {"example.com/foo:bar", "example.com/foo:bar", true}, {"example.com/foo:bar2", "example.com/foo:bar", false}, {"example.com", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, {"example.com/foo", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", true}, {"example.com/foo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", false}, } { res := refMatchesPrefix(c.ref, c.prefix) assert.Equal(t, c.expected, res, fmt.Sprintf("%s vs. %s", c.ref, c.prefix)) } } func TestConfigPath(t *testing.T) { const nondefaultPath = "/this/is/not/the/default/registries.conf" const variableReference = "$HOME" const rootPrefix = "/root/prefix" for _, c := range []struct { sys *types.SystemContext expected string }{ // The common case {nil, systemRegistriesConfPath}, // There is a context, but it does not override the path. {&types.SystemContext{}, systemRegistriesConfPath}, // Path overridden {&types.SystemContext{SystemRegistriesConfPath: nondefaultPath}, nondefaultPath}, // Root overridden { &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, filepath.Join(rootPrefix, systemRegistriesConfPath), }, // Root and path overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, SystemRegistriesConfPath: nondefaultPath, }, nondefaultPath, }, // No environment expansion happens in the overridden paths {&types.SystemContext{SystemRegistriesConfPath: variableReference}, variableReference}, } { path := ConfigPath(c.sys) assert.Equal(t, c.expected, path) } } func TestFindRegistry(t *testing.T) { sys := &types.SystemContext{SystemRegistriesConfPath: "testdata/find-registry.conf"} registries, err := GetRegistries(sys) assert.Nil(t, err) assert.Equal(t, 5, len(registries)) reg, err := FindRegistry(sys, "simple-prefix.com/foo/bar:latest") assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "simple-prefix.com", reg.Prefix) assert.Equal(t, reg.Location, "registry.com:5000") // path match reg, err = FindRegistry(sys, "simple-prefix.com/") assert.Nil(t, err) assert.NotNil(t, reg) // hostname match reg, err = FindRegistry(sys, "simple-prefix.com") assert.Nil(t, err) assert.NotNil(t, reg) // invalid match reg, err = FindRegistry(sys, "simple-prefix.comx") assert.Nil(t, err) assert.Nil(t, reg) reg, err = FindRegistry(sys, "complex-prefix.com:4000/with/path/and/beyond:tag") assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "complex-prefix.com:4000/with/path", reg.Prefix) assert.Equal(t, "another-registry.com:5000", reg.Location) reg, err = FindRegistry(sys, "no-prefix.com/foo:tag") assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "no-prefix.com", reg.Prefix) assert.Equal(t, "no-prefix.com", reg.Location) reg, err = FindRegistry(sys, "empty-prefix.com/foo:tag") assert.Nil(t, err) assert.NotNil(t, reg) assert.Equal(t, "empty-prefix.com", reg.Prefix) assert.Equal(t, "empty-prefix.com", reg.Location) _, err = FindRegistry(&types.SystemContext{SystemRegistriesConfPath: "testdata/this-does-not-exist.conf"}, "example.com") assert.Error(t, err) } func assertRegistryLocationsEqual(t *testing.T, expected []string, regs []Registry) { // verify the expected registries and their order names := []string{} for _, r := range regs { names = append(names, r.Location) } assert.Equal(t, expected, names) } func TestFindUnqualifiedSearchRegistries(t *testing.T) { sys := &types.SystemContext{SystemRegistriesConfPath: "testdata/unqualified-search.conf"} registries, err := GetRegistries(sys) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) unqRegs, err := UnqualifiedSearchRegistries(sys) assert.Nil(t, err) assert.Equal(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs) _, err = UnqualifiedSearchRegistries(&types.SystemContext{SystemRegistriesConfPath: "testdata/invalid-search.conf"}) assert.Error(t, err) } func TestInvalidV2Configs(t *testing.T) { for _, c := range []struct{ path, errorSubstring string }{ {"testdata/insecure-conflicts.conf", "registry 'registry.com' is defined multiple times with conflicting 'insecure' setting"}, {"testdata/blocked-conflicts.conf", "registry 'registry.com' is defined multiple times with conflicting 'blocked' setting"}, {"testdata/missing-registry-location.conf", "invalid location"}, {"testdata/missing-mirror-location.conf", "invalid location"}, {"testdata/invalid-prefix.conf", "invalid location"}, {"testdata/this-does-not-exist.conf", "no such file or directory"}, } { _, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: c.path}) assert.Error(t, err, c.path) if c.errorSubstring != "" { assert.Contains(t, err.Error(), c.errorSubstring, c.path) } } } func TestUnmarshalConfig(t *testing.T) { registries, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: "testdata/unmarshal.conf"}) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) } func TestV1BackwardsCompatibility(t *testing.T) { sys := &types.SystemContext{SystemRegistriesConfPath: "testdata/v1-compatibility.conf"} registries, err := GetRegistries(sys) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) unqRegs, err := UnqualifiedSearchRegistries(sys) assert.Nil(t, err) assert.Equal(t, []string{"registry-a.com", "registry-c.com", "registry-d.com"}, unqRegs) // check if merging works reg, err := FindRegistry(sys, "registry-b.com/bar/foo/barfoo:latest") assert.Nil(t, err) assert.NotNil(t, reg) assert.True(t, reg.Insecure) assert.True(t, reg.Blocked) for _, c := range []string{"testdata/v1-invalid-block.conf", "testdata/v1-invalid-insecure.conf", "testdata/v1-invalid-search.conf"} { _, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: c}) assert.Error(t, err, c) } } func TestMixingV1andV2(t *testing.T) { _, err := GetRegistries(&types.SystemContext{SystemRegistriesConfPath: "testdata/mixing-v1-v2.conf"}) assert.NotNil(t, err) assert.Contains(t, err.Error(), "mixing sysregistry v1/v2 is not supported") } func TestConfigCache(t *testing.T) { configFile, err := ioutil.TempFile("", "sysregistriesv2-test") require.NoError(t, err) defer os.Remove(configFile.Name()) defer configFile.Close() err = ioutil.WriteFile(configFile.Name(), []byte(` [[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" [[registry]] location = "blocked.registry.com" blocked = true [[registry]] location = "insecure.registry.com" insecure = true [[registry]] location = "untrusted.registry.com" insecure = true`), 0600) require.NoError(t, err) ctx := &types.SystemContext{SystemRegistriesConfPath: configFile.Name()} configCache = make(map[string]*V2RegistriesConf) registries, err := GetRegistries(ctx) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) // empty the config, but use the same SystemContext to show that the // previously specified registries are in the cache err = ioutil.WriteFile(configFile.Name(), []byte{}, 0600) require.NoError(t, err) registries, err = GetRegistries(ctx) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) } func TestInvalidateCache(t *testing.T) { ctx := &types.SystemContext{SystemRegistriesConfPath: "testdata/invalidate-cache.conf"} configCache = make(map[string]*V2RegistriesConf) registries, err := GetRegistries(ctx) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) assertRegistryLocationsEqual(t, []string{"registry.com", "blocked.registry.com", "insecure.registry.com", "untrusted.registry.com"}, registries) // invalidate the cache, make sure it's empty and reload InvalidateCache() assert.Equal(t, 0, len(configCache)) registries, err = GetRegistries(ctx) assert.Nil(t, err) assert.Equal(t, 4, len(registries)) assertRegistryLocationsEqual(t, []string{"registry.com", "blocked.registry.com", "insecure.registry.com", "untrusted.registry.com"}, registries) } func toNamedRef(t *testing.T, ref string) reference.Named { parsedRef, err := reference.ParseNamed(ref) require.NoError(t, err) return parsedRef } func TestRewriteReferenceSuccess(t *testing.T) { for _, c := range []struct{ inputRef, prefix, location, expected string }{ // Standard use cases {"example.com/image", "example.com", "example.com", "example.com/image"}, {"example.com/image:latest", "example.com", "example.com", "example.com/image:latest"}, {"example.com:5000/image", "example.com:5000", "example.com:5000", "example.com:5000/image"}, {"example.com:5000/image:latest", "example.com:5000", "example.com:5000", "example.com:5000/image:latest"}, // Separator test ('/', '@', ':') {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com", "example.com", "example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {"example.com/foo/image:latest", "example.com/foo", "example.com", "example.com/image:latest"}, {"example.com/foo/image:latest", "example.com/foo", "example.com/path", "example.com/path/image:latest"}, // Docker examples {"docker.io/library/image:latest", "docker.io", "docker.io", "docker.io/library/image:latest"}, {"docker.io/library/image", "docker.io/library", "example.com", "example.com/image"}, {"docker.io/library/image", "docker.io", "example.com", "example.com/library/image"}, {"docker.io/library/prefix/image", "docker.io/library/prefix", "example.com", "example.com/image"}, } { ref := toNamedRef(t, c.inputRef) testEndpoint := Endpoint{Location: c.location} out, err := testEndpoint.rewriteReference(ref, c.prefix) require.NoError(t, err) assert.Equal(t, c.expected, out.String()) } } func TestRewriteReferenceFailedDuringParseNamed(t *testing.T) { for _, c := range []struct{ inputRef, prefix, location string }{ // Invalid reference format {"example.com/foo/image:latest", "example.com/foo", "example.com/path/"}, {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com/foo", "example.com"}, {"example.com:5000/image:latest", "example.com", ""}, {"example.com:5000/image:latest", "example.com", "example.com:5000"}, // Malformed prefix {"example.com/foo/image:latest", "example.com//foo", "example.com/path"}, {"example.com/image:latest", "image", "anotherimage"}, {"example.com/foo/image:latest", "example.com/foo/", "example.com"}, {"example.com/foo/image", "example.com/fo", "example.com/foo"}, {"example.com/foo:latest", "example.com/fo", "example.com/foo"}, {"example.com/foo@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "example.com/fo", "example.com/foo"}, {"docker.io/library/image", "example.com", "example.com"}, } { ref := toNamedRef(t, c.inputRef) testEndpoint := Endpoint{Location: c.location} out, err := testEndpoint.rewriteReference(ref, c.prefix) assert.NotNil(t, err) assert.Nil(t, out) } } func TestPullSourcesFromReference(t *testing.T) { sys := &types.SystemContext{SystemRegistriesConfPath: "testdata/pull-sources-from-reference.conf"} registries, err := GetRegistries(sys) assert.Nil(t, err) assert.Equal(t, 2, len(registries)) // Registry A allowing any kind of pull from mirrors registryA, err := FindRegistry(sys, "registry-a.com/foo/image:latest") assert.Nil(t, err) assert.NotNil(t, registryA) // Digest referenceADigest := toNamedRef(t, "registry-a.com/foo/image@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") pullSources, err := registryA.PullSourcesFromReference(referenceADigest) assert.Nil(t, err) assert.Equal(t, 3, len(pullSources)) assert.Equal(t, "mirror-1.registry-a.com/image@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pullSources[0].Reference.String()) assert.True(t, pullSources[1].Endpoint.Insecure) // Tag referenceATag := toNamedRef(t, "registry-a.com/foo/image:aaa") pullSources, err = registryA.PullSourcesFromReference(referenceATag) assert.Nil(t, err) assert.Equal(t, 3, len(pullSources)) assert.Equal(t, "registry-a.com/bar/image:aaa", pullSources[2].Reference.String()) // Registry B allowing digests pull only from mirrors registryB, err := FindRegistry(sys, "registry-b.com/foo/image:latest") assert.Nil(t, err) assert.NotNil(t, registryB) // Digest referenceBDigest := toNamedRef(t, "registry-b.com/foo/image@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") pullSources, err = registryB.PullSourcesFromReference(referenceBDigest) assert.Nil(t, err) assert.Equal(t, 3, len(pullSources)) assert.Equal(t, "registry-b.com/bar", pullSources[2].Endpoint.Location) assert.Equal(t, "registry-b.com/bar/image@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", pullSources[2].Reference.String()) // Tag referenceBTag := toNamedRef(t, "registry-b.com/foo/image:aaa") pullSources, err = registryB.PullSourcesFromReference(referenceBTag) assert.Nil(t, err) assert.Equal(t, 1, len(pullSources)) } func TestTryUpdatingCache(t *testing.T) { ctx := &types.SystemContext{ SystemRegistriesConfPath: "testdata/try-update-cache-valid.conf", } configCache = make(map[string]*V2RegistriesConf) registries, err := TryUpdatingCache(ctx) assert.Nil(t, err) assert.Equal(t, 1, len(registries.Registries)) assert.Equal(t, 1, len(configCache)) ctxInvalid := &types.SystemContext{ SystemRegistriesConfPath: "testdata/try-update-cache-invalid.conf", } registries, err = TryUpdatingCache(ctxInvalid) assert.NotNil(t, err) assert.Nil(t, registries) assert.Equal(t, 1, len(configCache)) } image-4.0.1/pkg/sysregistriesv2/testdata/000077500000000000000000000000001354546467100203765ustar00rootroot00000000000000image-4.0.1/pkg/sysregistriesv2/testdata/blocked-conflicts.conf000066400000000000000000000003171354546467100246330ustar00rootroot00000000000000[[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" [[registry]] location = "registry.com" blocked = true image-4.0.1/pkg/sysregistriesv2/testdata/empty.conf000066400000000000000000000000001354546467100223710ustar00rootroot00000000000000image-4.0.1/pkg/sysregistriesv2/testdata/find-registry.conf000066400000000000000000000005311354546467100240320ustar00rootroot00000000000000[[registry]] location = "registry.com:5000" prefix = "simple-prefix.com" [[registry]] location = "another-registry.com:5000" prefix = "complex-prefix.com:4000/with/path" [[registry]] location = "registry.com:5000" prefix = "another-registry.com" [[registry]] location = "no-prefix.com" [[registry]] location = "empty-prefix.com" prefix = "" image-4.0.1/pkg/sysregistriesv2/testdata/insecure-conflicts.conf000066400000000000000000000003201354546467100250370ustar00rootroot00000000000000[[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" [[registry]] location = "registry.com" insecure = true image-4.0.1/pkg/sysregistriesv2/testdata/invalid-prefix.conf000066400000000000000000000005441354546467100241710ustar00rootroot00000000000000[[registry]] location = "registry.com:5000" prefix = "http://schema-is-invalid.com" [[registry]] location = "another-registry.com:5000" prefix = "complex-prefix.com:4000/with/path" [[registry]] location = "registry.com:5000" prefix = "another-registry.com" [[registry]] location = "no-prefix.com" [[registry]] location = "empty-prefix.com" prefix = "" image-4.0.1/pkg/sysregistriesv2/testdata/invalid-search.conf000066400000000000000000000001121354546467100241300ustar00rootroot00000000000000unqualified-search-registries = ["registry-a.com/namespace-is-forbidden"] image-4.0.1/pkg/sysregistriesv2/testdata/invalidate-cache.conf000066400000000000000000000005341354546467100244300ustar00rootroot00000000000000[[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" [[registry]] location = "blocked.registry.com" blocked = true [[registry]] location = "insecure.registry.com" insecure = true [[registry]] location = "untrusted.registry.com" insecure = true image-4.0.1/pkg/sysregistriesv2/testdata/mirrors.conf000066400000000000000000000003461354546467100227450ustar00rootroot00000000000000[[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" insecure = true [[registry]] location = "blocked.registry.com" blocked = true image-4.0.1/pkg/sysregistriesv2/testdata/missing-mirror-location.conf000066400000000000000000000003111354546467100260270ustar00rootroot00000000000000unqualified-search-registries = ["registry-a.com"] [[registry]] location = "registry-a.com" [[registry]] location = "registry-b.com" [[registry.mirror]] location = "mirror-b.com" [[registry.mirror]] image-4.0.1/pkg/sysregistriesv2/testdata/missing-registry-location.conf000066400000000000000000000002251354546467100263710ustar00rootroot00000000000000unqualified-search-registries = ["registry-a.com"] [[registry]] location = "registry-a.com" [[registry]] location = "registry-b.com" [[registry]] image-4.0.1/pkg/sysregistriesv2/testdata/mixing-v1-v2.conf000066400000000000000000000006301354546467100234100ustar00rootroot00000000000000unqualified-search-registries = ["registry-a.com", "registry-c.com"] [registries.search] registries = ["registry-a.com", "registry-c.com"] [registries.block] registries = ["registry-b.com"] [registries.insecure] registries = ["registry-d.com", "registry-e.com", "registry-a.com"] [[registry]] location = "registry-a.com" [[registry]] location = "registry-b.com" [[registry]] location = "registry-c.com"image-4.0.1/pkg/sysregistriesv2/testdata/pull-sources-from-reference.conf000066400000000000000000000006541354546467100266040ustar00rootroot00000000000000[[registry]] prefix = "registry-a.com/foo" location = "registry-a.com/bar" [[registry.mirror]] location = "mirror-1.registry-a.com" [[registry.mirror]] location = "mirror-2.registry-a.com" insecure = true [[registry]] prefix = "registry-b.com/foo" location = "registry-b.com/bar" mirror-by-digest-only = true [[registry.mirror]] location = "mirror-1.registry-b.com" [[registry.mirror]] location = "mirror-2.registry-b.com" image-4.0.1/pkg/sysregistriesv2/testdata/try-update-cache-invalid.conf000066400000000000000000000000101354546467100260170ustar00rootroot00000000000000invalid image-4.0.1/pkg/sysregistriesv2/testdata/try-update-cache-valid.conf000066400000000000000000000000471354546467100255020ustar00rootroot00000000000000[[registry]] location = "registry.com" image-4.0.1/pkg/sysregistriesv2/testdata/unmarshal.conf000066400000000000000000000005341354546467100232410ustar00rootroot00000000000000[[registry]] location = "registry.com" [[registry.mirror]] location = "mirror-1.registry.com" [[registry.mirror]] location = "mirror-2.registry.com" [[registry]] location = "blocked.registry.com" blocked = true [[registry]] location = "insecure.registry.com" insecure = true [[registry]] location = "untrusted.registry.com" insecure = true image-4.0.1/pkg/sysregistriesv2/testdata/unqualified-search.conf000066400000000000000000000003771354546467100250250ustar00rootroot00000000000000unqualified-search-registries = ["registry-a.com", "registry-c.com", "registry-d.com"] [[registry]] location = "registry-a.com" [[registry]] location = "registry-b.com" [[registry]] location = "registry-c.com" [[registry]] location = "registry-d.com" image-4.0.1/pkg/sysregistriesv2/testdata/v1-compatibility.conf000066400000000000000000000004011354546467100244350ustar00rootroot00000000000000[registries.search] registries = ["registry-a.com////", "registry-c.com", "registry-d.com"] [registries.block] registries = ["registry-b.com"] [registries.insecure] registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"] image-4.0.1/pkg/sysregistriesv2/testdata/v1-invalid-block.conf000066400000000000000000000005011354546467100243030ustar00rootroot00000000000000[registries.search] registries = ["registry-a.com////", "registry-c.com", "registry-d.com", "http://schema-is-invalid.com"] [registries.block] registries = ["registry-b.com", "http://schema-is-invalid.com"] [registries.insecure] registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"] image-4.0.1/pkg/sysregistriesv2/testdata/v1-invalid-insecure.conf000066400000000000000000000004411354546467100250310ustar00rootroot00000000000000[registries.search] registries = ["registry-a.com////", "registry-c.com", "registry-d.com"] [registries.block] registries = ["registry-b.com"] [registries.insecure] registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com", "http://schema-is-invalid.com"] image-4.0.1/pkg/sysregistriesv2/testdata/v1-invalid-search.conf000066400000000000000000000004411354546467100244610ustar00rootroot00000000000000[registries.search] registries = ["registry-a.com////", "registry-c.com", "registry-d.com", "http://schema-is-invalid.com"] [registries.block] registries = ["registry-b.com"] [registries.insecure] registries = ["registry-b.com////", "registry-d.com", "registry-e.com", "registry-a.com"] image-4.0.1/pkg/tlsclientconfig/000077500000000000000000000000001354546467100165655ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/000077500000000000000000000000001354546467100203765ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/full/000077500000000000000000000000001354546467100213405ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/full/ca-cert-1.crt000066400000000000000000000033711354546467100235320ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE+jCCAuKgAwIBAgIJAOwwZJiWYGNwMA0GCSqGSIb3DQEBCwUAMDExLzAtBgNV BAMMJmNvbnRhaW5lcnMvaW1hZ2UgdGVzdCBDQSBjZXJ0aWZpY2F0ZSAxMCAXDTE4 MDgyODE2MDQ0NVoYDzIxMTgwODA0MTYwNDQ1WjAxMS8wLQYDVQQDDCZjb250YWlu ZXJzL2ltYWdlIHRlc3QgQ0EgY2VydGlmaWNhdGUgMTCCAiIwDQYJKoZIhvcNAQEB BQADggIPADCCAgoCggIBAMhS5F4PXlmXAtnxqdbxVRv/YYqKqg+/YV7y2hRuRZk2 DVhcc2qCr80he6V9He9esAlGTAk/tLES4HasB27ry4ExZvAivNhf4VLSso8LANf6 /mFDAK3+istdlZ4hb2f4eAKmKaCEB3GxB1EMxBWB8BiSZhzSLQfMaWBLOt5HKxNv /7Ha2HOwQqCoqoKR6dg6nt9PV2VLuVsmgI6pKpn9CsXnsplak6hDzUtT71HH80De GsZsfSApRB/iSWlJie80hDKyP5HK5//qFfRAhlrfdb7wuqrsjdafO4WYskVFtvJy 1eU2jmI/EPO83dWhyW/COiMJNHh+8IPYlDP8tCbm8tdGnqF+pZTe5hlXEXvwJwF0 jxWlx6MhiDLX2T2Tq/ypOEsrAWFfRtKY+W1Hbm6w+i9vKKhpxkGFvg7Js/oKPif9 QqKKY6bpERQG9tNZzpU+PcX3y0AyQU1mk4WmlF07p40U2lGddvXwUokEunbvSIKp W3PINodekHuHdDVMA4bMS1SucJtp4MIPw2so83rfcwY0x2rc8ONWd97rJybqZtyf DThWjnCUa/QDuAe2G2dVN3O6h0CZD1h9/faWecu7ppRN1x7HMfgjT5p1y+ZvObJS fQr1AHZFx8lwRBBrAES0ygeuBIzgnCFo5pdeTE7cVbgCb1+5FeLiIhVXjZ9ZESJl AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAKdM SjM6m8+v/FwNboB+y0LC5Nu3BOLbSqo4V/OmNh94Lnk3gsGbJTEmNWfZ6Xa6svgx IcR7ayZz/y2iXF45rbxk1b/UGPe4SR8YqO4z4NnGVjC37HAW1AtoYXsdQTQMjg39 oGJSfRTn58BQbVj2DMYFK7hZhnn1a/9zQgz5wKlxEYqmF2UPHbERf0X6Nj8aj2gQ rHmCx6erAKHDtqRA6WBIEC+01bdeq5Ds+i4x5E1XJtkugNY/MWrid1NXPaiWeJeh HpebfxKXHqY8BKhTothBVJR5N+8YkJSFLCcSIrkZSvdu7vk8ZtuTKVHkjPPeCwKj iIP0/SDLDE2FIH3VXpkuT4FutNkut8P3DAVVgpKEj3SyzSbPchrbVsYdRJk/iqLO bRZot5V6W351U/GPvdyedpzykd9NWqVc1j624M8OXzrso48BKuhd2NlzeCHiev2d VPKkYQzhxrHfQICbLgwVTf9BRPQDhjgzbXVzcEMQEt0eM9bFRhbunlPkcK6zSTeH q6A2XEXuF4Y5f1azJNPX4x6RsPTRt1JmNUUOowcC+ilW3ALIlIQszzmUzKBaIVko 5A6Z2eih1Fj7AwzjnqErVGMhwHIzHkRc/NxE1s//oxwunqWCapduCHxvH7T5k6Dk donX0wDmrj980cDo5+X8ZjjroGJvoraSl0QV2e+g -----END CERTIFICATE----- image-4.0.1/pkg/tlsclientconfig/testdata/full/ca-cert-2.crt000066400000000000000000000033711354546467100235330ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE+jCCAuKgAwIBAgIJAPazFn6MWm78MA0GCSqGSIb3DQEBCwUAMDExLzAtBgNV BAMMJmNvbnRhaW5lcnMvaW1hZ2UgdGVzdCBDQSBjZXJ0aWZpY2F0ZSAyMCAXDTE4 MDgyODE2MDUyMVoYDzIxMTgwODA0MTYwNTIxWjAxMS8wLQYDVQQDDCZjb250YWlu ZXJzL2ltYWdlIHRlc3QgQ0EgY2VydGlmaWNhdGUgMjCCAiIwDQYJKoZIhvcNAQEB BQADggIPADCCAgoCggIBAKOEn+tOrG03S5LMxH/5LBCdQJ/JYVJCw9daPG1rEZSQ RavX6NBZ+JhGg9Gb2d724y595UQ81IuiD6VIz8q0p1Pz09kW6AwQHLcMi64Xg9eN AJ2SnEdpO2mYxydEpReWlkg82ZAgFHGMBUkie2Q07ascxo+MpXd8D4Q12uGVrjbA BUcVz5xVarhX024hYgbuhvaItD2Bg1bwtZ9uAI6pKbIddkEW4rr4K3xLb5IitOk3 Gr3Eg9e1ZIX+ZXbgGOgENlXZ4LouwK0aER9TjGIJ/KRTKZxbGG7lZhQ7ycamtjaX Al1k4zhZ/OkEJF9lsFfQ1KUcPoG6leQw0hMR+6j3iNDHXs3rEhj4brgNIGK5ou0n XKIaz0uaocYBnUPV0Bd3mHFuReouQIBFezoMGw2f9/SPq+aRwW5z0+xuMTJeDuX4 J4mr7+Cm+vLLc3hdTOtw8+oHkKIHwVpC6VWEKXLDzfXCTGbq5+at2qR9edd1DVLc o5wVKh31Oawd/0OxeUN+KRW+txBDLmpIsWxQqTt6S+qRhE3AoOO/3n8jzF/0G6LC raLxIk/NCFaVMQYxK2PCVaQ/qJA5sNGsiHtcC/xmmKW/+0IGN1WOnJBWWCFoy/3s CCXICmHEOT01aO3tTfLt67KyiDY9BCDhWTVtcrknturf/52qQAOuSXllwrIf+9QT AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAB/G UlpC8F1hq9t25fSA5Jf2P6lJXFX2dm+oJDR/dFHwj/tOU2ladVSae0mWzWnCsTyq sCbNcdUlJ1X6rQoVFDeMsnooWFo52P56dNRlPCbboyd1mKBQZffR5ASbmMkdcDbM ZYS5s6XXh80o9HoHKFF95XlBpnGY4GKuvm/LA6EikDcrcDrzwokfNJYEwjTKblh/ QIEdnAeNNohpSIm4flDmJriUwnZxKjEzLcxPbII7kWJwwa/EAySxCfstGJ4KY6Cl gE8a/bJ/R3t/g9ZrHyFxvKkVJ8ZdOE4KX3zbc/Hn2VVlL0/VBbenRuyt9DgSHY6I ghPIpixxI35h4HcmDKD+3Ga6tKO3WNZgTLF66+4UdtvQ9XEGI3l1u9HqNsnYkapi uzPHtIw7MZvNTj1ado4mun5k63Qgq0AsAF6Zffgvl7SNg7LyezMrs4+pythfvm8a c/blO06Ue2hIasjFGDyYGVGXnv1xuQE8zgQ/Ye4e5VRoa0bNJqLPtnH3s9S6+i8E sCtZYk4AZDMSRPE3jlUWdJ6yLEgdrqvtSuPZEX8sc0XnhPnM/OtzfGpRIID+IYVc hqDdB6zpnJ6D4ficmWVRY+HLI3XLKHcTfg1JDfXHK9guNXTd+h1fwtk64XBhVLaf YK5NHbrYyeuaY40cP2SqiDAhoSzzPpuronDjz4Nh -----END CERTIFICATE----- image-4.0.1/pkg/tlsclientconfig/testdata/full/client-cert-1.cert000066400000000000000000000033411354546467100245670ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE6DCCAtACCQCF7Uo5GTGOezANBgkqhkiG9w0BAQsFADA1MTMwMQYDVQQDDCpj b250YWluZXJzL2ltYWdlIHRlc3QgY2xpZW50IGNlcnRpZmljYXRlIDEwIBcNMTgw ODI4MTUzNjI5WhgPMjExODA4MDQxNTM2MjlaMDUxMzAxBgNVBAMMKmNvbnRhaW5l cnMvaW1hZ2UgdGVzdCBjbGllbnQgY2VydGlmaWNhdGUgMTCCAiIwDQYJKoZIhvcN AQEBBQADggIPADCCAgoCggIBAKEhMzeyArbV+VuNVP0ZeOrEDNzHooXY9yST0Ftl blXifm8uFrCx+QytGJEFuM8K/Z1AR6MH0x6yXtYsqyCkM3+tFHlErdjm9Zxe+4Oh e+3/NImmnyfMt7N7solyfoGm8RTu5/NhGCjUTVJxS5xwO5rg7UXNHNBSVVyW4VSQ cAUs/j8zqajdAOP+3rE1A9rPLRZTUkuuLZqSPvlth2d1EMeQmvi2EUXoArQ1JKDm FxdpeUJ7qoUyrGuvaSY9jTyvAaqRzBXA6bHZXUbeaCrB1feOY0NEmQdN0+knV/+s an7MVpTrIRtsL+RraGCJX8hJFoTKcc7SiFKUK7Kv5KeqP6GQkv347HlXLZ9Ay+Do b4bRyBu7tRnkoBlCtXSF+7MEFd82le0K9PRqqf4g4riSvPvWdTrvWxJEv1ntD8Mr DRtBfw8MoMTdqTkrUCKzoHKzl2BHb9RzjOuRQpcT4tfNHn0UIn280CbDqzeNnqfp x+1dXLTPiEVRz/BSekcjYcXjqrZPJZ6pm/f7IA041nPq4L/pzZH8NSyMf2K5CjRQ P+ThhHqY2oYQRk73PezZVLFUk1hIuiuyQaoQqLplvNNzeXslyoY02UgjUYm4VHQz ctHD5sETbNJO4oLm4hCVkGZUzogMd3Rki+f6TM0jL0LqCkWzPIsvkdVLrKk2xZOF YPH1AgMBAAEwDQYJKoZIhvcNAQELBQADggIBAG2SYSh6nYy6BR8XpdprRUwyUmDt v2Ee14ItrBFVjlyzJV+hVG9rjB4IUu+Xmj6J78m0y3ONrYS1bXfkIifJULVZFnBw e3Y4l2ke1K0GaMZNvr8JhKB4uX9P/WL5kmYCV6F7YgFqCBr+YcQHRBHYbpVmkc1S YFDzXmBPTI4OoYrNBHyjlF9zgLY40ZsL1Af/j6P11ufdNrqdiCJ6RcfCdsTMORd0 H+xyCjIX54A+V5CWhUix53gQEuN8W6zPyRtRzV+zLX05bYIC7gYqQ+j6qvh6dEkF zNr0YGLw2ecM/KLgocPPsaRGeAnXadnIP4Zt0YynAme0jSqYHK3JJD8qWZdj1QOd bJ9twiO+4G+UC2cMZ/OaujVpHr8QjSppHEb+uw4mUqiQtzXBH42DjKeUZFA3MXbp PWg8xmeuxS0uhb/j6Ookg9wREjcdb9dja7Ym/qslH5aix9CbULr4H6vllwMnFgiN cKXuqupnvCihxVe2n1RHQetvgacOyMoi9/1AwJ6WLnHU+8KHNSdlxD9JrzYQ+WeZ N82yqBZkKbmESj9BZuRT1Pl7y0qWAPmB9HiAr9A1LenoH/ZG2JBSCGiraUb6zxvg Ros7TQYAh1C3dgdwyiISVvCblVQdn4nFbYCBwFWbPrpMM/PNKQ8Hmdj/2rRKPX6q Ho8jfpXhdO8eMcbJ -----END CERTIFICATE----- image-4.0.1/pkg/tlsclientconfig/testdata/full/client-cert-1.key000066400000000000000000000063101354546467100244210ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQChITM3sgK21flb jVT9GXjqxAzcx6KF2Pckk9BbZW5V4n5vLhawsfkMrRiRBbjPCv2dQEejB9Mesl7W LKsgpDN/rRR5RK3Y5vWcXvuDoXvt/zSJpp8nzLeze7KJcn6BpvEU7ufzYRgo1E1S cUuccDua4O1FzRzQUlVcluFUkHAFLP4/M6mo3QDj/t6xNQPazy0WU1JLri2akj75 bYdndRDHkJr4thFF6AK0NSSg5hcXaXlCe6qFMqxrr2kmPY08rwGqkcwVwOmx2V1G 3mgqwdX3jmNDRJkHTdPpJ1f/rGp+zFaU6yEbbC/ka2hgiV/ISRaEynHO0ohSlCuy r+Snqj+hkJL9+Ox5Vy2fQMvg6G+G0cgbu7UZ5KAZQrV0hfuzBBXfNpXtCvT0aqn+ IOK4krz71nU671sSRL9Z7Q/DKw0bQX8PDKDE3ak5K1Ais6Bys5dgR2/Uc4zrkUKX E+LXzR59FCJ9vNAmw6s3jZ6n6cftXVy0z4hFUc/wUnpHI2HF46q2TyWeqZv3+yAN ONZz6uC/6c2R/DUsjH9iuQo0UD/k4YR6mNqGEEZO9z3s2VSxVJNYSLorskGqEKi6 ZbzTc3l7JcqGNNlII1GJuFR0M3LRw+bBE2zSTuKC5uIQlZBmVM6IDHd0ZIvn+kzN Iy9C6gpFszyLL5HVS6ypNsWThWDx9QIDAQABAoICABfPxTbk2pvUcT5DW5zvp7vh 7xitc+odzyAbq/3ltnGAwDiD8hx4oJpr9iZwVwhZ0nLrrep83Ik7yOITxdJUEbw7 grwNFzfnocJTEw43zg202jnBYuHJ0hf3HsJLJkDYv+XdDHAeGCjofujBD3B18YkI 1merUSfEExNUxMXvdm59YLFMXDU1O811u8kqUYCDf2E+PPosKlceZ6oKsqjqVul3 CD/bACB5kfS5qckRV7ZBAwd9KQz4GRzs1jgtfnLKVg+z7hoE4UREIBG6CmDPNmSY KTkmqQq4SKm7A7kn7LECV4U4XjMkQMubx9gEVSBPFMHY/QqYeEbj2LVWYw8YDMEC dWrAR9+kg5Y5LLlxTEHv3nJSAwWf2MmSxZGO3Gav9vxMv0+lUC90A874y61bnebf ombV+WfrpVrSQd3UvDSwGxbKIdIsqz/6VTd1qrURIRNiL57JfbeQVo7p/cRjFsk1 yzhPnFqVjgC3xt2bpzVMhFl1jovpk7YHctm305Oh1L5+O/OTILyuzVQOLVlmQlSh 69amtdWioUD+kuzAyxplue/achpk9ylB5/p2I5yHx7i4UOs8SVBHIgSi+6y8CMPi 1f9U8r+MbziLPP8fX4KqltZ15gMJRQ5yAjiaLOEq1ysvcdNxnSEHpDppN2XqDZXX th5ma+zuN/Y5eXLouIv9AoIBAQDV8sG8lskAr6aOlPhWyPgrOSrIH0Lvcc2/+Pn+ SkEry5HsZKXQhxbLb3WGGyVrocwv4RIRrsfqMGUN0HMbJbZLVTUTJ6uRDZe7zdP8 FlY8q453XClOXxkXjtqg/ekstm7e0ZtsDXICzajDKZBt9iH1RtWshKlGncBE8w47 gaMthbw8a2694ov34XC2A8AtrI/NOwXT1j2hhZ5oBVx8sDfNPgL/Tbklv90vWWQ8 uTOlrQCekLvcdAH+SnyBhq/9sOKmsR8O+Cq8hrKeRyMCRe50612wO3V3w6Ug76R0 4qH93hHH5fF/tT3ix4nUemjDC2fFt0dHBd8u0FvqzxFADuJTAoIBAQDAzMTLbKkq FEY6tsXxDcv/Vd5AY7TjuXOe4hwner25KUfTSEpo+0KQGi4wZcUhC9edqea4trvu 8aTD2E1t7HwU9qC3MLfbCsoFTC3PuI9zZuBYBQ1QJSab5xaLCSKEg/Xf+uVcFSTv lTRYHvAIUFQtH6A6xuGjOFSa3d0xbgOJYlGDSsdZ6eeEocpU86X6YEfJ4PU9RCXb 6XfFqxG4qBhEnJiGVaqNSFkrlyEScS5ZOZeNJfxsxaHtddopPsoZXKWeG5enPQ+S i2BGZEVd73vgASYz8fqb9j8zwcUrL6ycSiWvWx2r94DPZ9DKIBzAKNF4X0dUkXn7 NvRCloWYyGGXAoIBAC4AKw2scf1Wxv4DI74tKcbJXNx4dEDdfDqZgs7dElQjSfXE 0i4azZjiFyfFcG9K84cb4nGw3cUJsMCeoBEnM6HQ6T98GRRwEr7Li5e5CcOzs0cQ psT2B4QcL//LeDYn54C7GqrJ3UijBXUo26f48uY0275jK65GPs/UXqwGvJoOFiH7 i20CZ8vdSgmolsp7PtQGq8MXXlr7Ssrc1Dzu+qCDg8t236cxMJJ8quOvgl5N60Ms pWfJ/Z+6TjjfR7nJLYJftOjYDQBKCd+kNe/IL3QuIw/ASQp+I2QIgpirHd9ivvn3 A8zMoEjBVG49/4ZoTmChfo7TwV/kZs7+xJu8V4cCggEAMIsePa3lRfAG4r+kRRZZ N7fyFrpEEFZgUy/RMOuJm3ZWXE9GVPp2fvvoh00IflKR/mOJ8RYpaXc0Kg9rJ3M0 pr6WJqnAkQk9ZmoQ2s04aTEM6XyUJorEFtrvZSBurXjgTn3IhA/a1ev2Wv2nKCC5 oQbmfAYZR9RmEPwttkVh5JR5u5n3aZ8oKM/nts4GC210hdL7TGq9MYquGnoBI0JH ofbfvGsTXzFJKl2J+S/AIL0MeICI9wYnyDc1L89caVhXZuDBpV0Nb7NiETcr9APZ Z8RCTy/tDpnIvgpaz43YYx2UMXu6k9LkftQ/0LSXKJbebvVma1eZo/PpOl7V0msZ jwKCAQEAljJQnDUWiK73B1vMLjeEwuy/p86p43xCXGMW1rX6+AayYt3/ODNmKh8s AhOF2Tl0dDJTBibJcpcoGCKm4nL3k9tO23U9omrBz5obRDl4To7l+ALv3x+tf+Fq b/nVkZKhyUh99RneOjOfpEI6Cd4ffQkXA05/bFGdVVaMJ0yzeC8qQ/QGbyAwpZiL c1e7Kju5uv+rT4czqKmQ+YsKpSM1Xjz9Mzoxs5E3OCdXSYsv7oo/sS1aXunQTZ6L xv1M+F9YlCtgo8+1IdlYvcFb4WusAIDf3xjO1bDCvlYzv5JBKWtyO3BVKkfWzSx1 yKIoxKzIpzNh5dHk6iIDjp1B/YU6fA== -----END PRIVATE KEY----- image-4.0.1/pkg/tlsclientconfig/testdata/full/client-cert-2.cert000066400000000000000000000033411354546467100245700ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIE6DCCAtACCQCZd3nR9D1jPzANBgkqhkiG9w0BAQsFADA1MTMwMQYDVQQDDCpj b250YWluZXJzL2ltYWdlIHRlc3QgY2xpZW50IGNlcnRpZmljYXRlIDIwIBcNMTgw ODI4MTUzNzAyWhgPMjExODA4MDQxNTM3MDJaMDUxMzAxBgNVBAMMKmNvbnRhaW5l cnMvaW1hZ2UgdGVzdCBjbGllbnQgY2VydGlmaWNhdGUgMjCCAiIwDQYJKoZIhvcN AQEBBQADggIPADCCAgoCggIBAPQITxwSAieNkNlDDcZUlGeiQmm7tEF9TKqCF06j LvM+O8nA/DmNIIw84mAeuLH2/vv7jkS3Hz5MEj1duaiAAzhjRrcHTLj63ttcpomP yc5OyW1GiuVYWKLKdZWRPaiSBeIV+uCGMaxvAzPivYiNz1GhYYt0Jf84OZm8oCZT HEI/jNRp56nYQDIAW3dwf6LpQ2dyc3n1O2eysDp02AvWfIr9w8agUgMykWziN2ZX c3fs/8UJ1Ta81YTuuuwTfEB2TtUmzxYql4WjxzXOL/rN7PqlwG7SZ1q6hoACP4K7 v4tu7cXq79F16xHoKolJaIHnCznZrK3Hmp846/+x0VWn4Ic05f3qB89WSl7i6oa2 pdgDE5bteSFtEhNXvzy+eobcKqhhqT3TF6oQiD9ufz2s5eTRJmu31sVUOW2+2LaZ QipQu7G+awpqKh/k95GZNTXhWrDP1OQTtzYZui+l9Ri4lqM1LAlLb35zo6rqUj1A HfyQrwADFzRQRY/zPijI8I8fnfChGLuW3q/WC0CyrTIkviE/JGY7XmmyT/KxABhY rDCHWHXQ4nz14PulRBtvTJfMhHBcMM8guYzleeeGrcHX5y5IG7JR1QL+Y+h2zW0d 29RbusXKAkaN7SqlAGB8NIwU5x/Y9UWD0ar9IEXQT2bDkXHWTk2qvbAXwvqkv3r5 qbB9AgMBAAEwDQYJKoZIhvcNAQELBQADggIBAD0T3FqFhuJ5OGbYaay5Iw5EKLS+ njNIDNvsy9Q8+F1aa/F27KJfi8x8EQj1YabwOt1gS201jwJshC+AirrJUxdGBJqC cVDPGeqY+zMKd+17ZgWKza81Y9qYBjx01v//Et5YYKmeS8q3xTsvbloJJ5D7vx5y VcNwnO7yx5/IMDWCIAbw5j2BikILW0gMCfBK90o4z7Le7kPFLreLiUCfXYZjfbT7 bT2v8Oy0OISVNLQjajxepK5+C9Qupaj5nL0GtTj37FOs6rulcWEWqX+kGXSctvrA nuzcjGUkuQBOcMjEUaYRKLZ+Tghla4pFgJLrfKQgW+5Mahbnz3ehvzDc3LcScYCj u0qyP+w5rW8/Tm9vE9QqwblUX7wZ4/zqTDSv5spdHi5x4Q77MomjDEfP83QnEAhg Y4wixJBas64227rxJJQT30C2QcuwYMz4STQgjSGDPfomUr8tVPM8JcU4pq+fg7g9 T0MvfNAWgMhUZllhxTntbHVbv43A2p3eEE0fuW5SOJyAvt9ROZMvQcDWJfw0TDey dn9+Bz7CMbJvZAoV8P4Gm1+iuWZhUWBG7FdrXEzhbDh2GkSmd3jmUSISVA1G061b 5QVkW5NAr2jZrWhyTXiL0AFbD7QNKTJHma/IcRoJlxQ9rncIdgPLMNBbjridb4dn PllRbfAWuBgV7vLu -----END CERTIFICATE----- image-4.0.1/pkg/tlsclientconfig/testdata/full/client-cert-2.key000066400000000000000000000063101354546467100244220ustar00rootroot00000000000000-----BEGIN PRIVATE KEY----- MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQD0CE8cEgInjZDZ Qw3GVJRnokJpu7RBfUyqghdOoy7zPjvJwPw5jSCMPOJgHrix9v77+45Etx8+TBI9 XbmogAM4Y0a3B0y4+t7bXKaJj8nOTsltRorlWFiiynWVkT2okgXiFfrghjGsbwMz 4r2Ijc9RoWGLdCX/ODmZvKAmUxxCP4zUaeep2EAyAFt3cH+i6UNncnN59TtnsrA6 dNgL1nyK/cPGoFIDMpFs4jdmV3N37P/FCdU2vNWE7rrsE3xAdk7VJs8WKpeFo8c1 zi/6zez6pcBu0mdauoaAAj+Cu7+Lbu3F6u/RdesR6CqJSWiB5ws52aytx5qfOOv/ sdFVp+CHNOX96gfPVkpe4uqGtqXYAxOW7XkhbRITV788vnqG3CqoYak90xeqEIg/ bn89rOXk0SZrt9bFVDltvti2mUIqULuxvmsKaiof5PeRmTU14Vqwz9TkE7c2Gbov pfUYuJajNSwJS29+c6Oq6lI9QB38kK8AAxc0UEWP8z4oyPCPH53woRi7lt6v1gtA sq0yJL4hPyRmO15psk/ysQAYWKwwh1h10OJ89eD7pUQbb0yXzIRwXDDPILmM5Xnn hq3B1+cuSBuyUdUC/mPods1tHdvUW7rFygJGje0qpQBgfDSMFOcf2PVFg9Gq/SBF 0E9mw5Fx1k5Nqr2wF8L6pL96+amwfQIDAQABAoICAENiu/2jV2b2p4XnBzm6B1Tq sG4j/+2JnQ8EZ002sHNIvxfCK6P0vYUcFuK1+QhjjRul5i5BZuY7+VlqtSa7WqhK ea9224/E7p8iYXZg9zf595MuRJJ6J9ekEn4foigXUnqQ2TsAs1zLNtYwCWhYyGPh LV9lzkxAiV9Tf+G1V3tPsyLAr9DtvyRPhvJfZU9mHE/Hge2ucx36cMFjnHkAXFnB IzuI77ykTRYFDYk8Is19607MnUzFLizMA1/HLatbP/+J/OtBDaBAjnTkH8mg7Yx1 EsNHZHTgRt24QTNnhsgI9K2PZ3OunR/PvVc1px8f3rC11AIUuTS4ciqkPorplY0W Wcgy9+saF2EL6Peb5zaWTAiCewrcI2ME9G6ytZtWovHzJqyE4iJMaQtLeczoaTk5 lkl8r+zkAWGnxb6Xz1qd1UuAyXfZO3Ux7shxDJKxoQrgkHvAyEHkyBhJe3ghKElh C+mGr1z15R+buL8daUC+SG2z0H9gTcYiOcOznGUhtOgR2EPalAC/xKEtYjsEHf3F VtljEu29KPYcLokBFcI35Tij9KVfGJePMAEji8lLqrImwpPzsoPEdUVx7jvdy6gt hCsgIo8Vpd/3D12Q4ul+E7ztsWX6wYoxIYGK2/KSAjd0fpla/UFwkrpSSnqkGLR0 qhoxZz1158vuAtQCb7DdAoIBAQD8bHdBmWovRRywjPaLGPngVAkR4z5yo1OM9lRE So6O9zyZvk7cJ4JQYpS3o1nXY9mw5YAG2qPrArXBIWwHq0vi7gkhng7gbmn/1j7o ZrgEE5z//tt0Gl5Yqs35uiMcOlTrQ/1DsHWfJDw6udkQG5EwyrNRvcxaeXGx9dWc Yzfq93WODSFWhzhgy0CG0q2Zj/VtgoO6gwkZI9KX9X7ELKyCfIqczqMYge5uxp0l dTS6kNrhUACGCU5yBREKX/XwNxreFS6AuCG0KhBjwX3NeuBfKC2yGAOxr91Yz3l4 o91CKFXDVXWtpvJCW/zIaeSfByqhgBYUpZPg4VniNqMheB87AoIBAQD3fWiLbidV lRQ0O4jjAxtsUBKC95oxz4ipDerLlEAL7WA8h0eU05BLv8Nn5Ql5N7m/LTM6XWsg cWMObk6UDS4N1pR7Sg1sPAlzK52x8CYLDswnYWjeejCP5yY8v8u3NCqCPuxvDlPp 0InR82xRunFEEG9ukC06krGDyMWCwaRbQRJBkRagBvUJA3/A+ZAC2vdN5WhPtZT3 LT01T2Olk19URLgFTo1hia2o6L5cq7TXeNsZoNfLlaSV33/+fnYqb3F0SmDX+mOV 8zRV4bp9Qdc2vNkdzzC8s2EXr3UBl4miT/ovV0X5v+KPyfH2Rf/Hf0AoBhiHWRIO NT9L912QFOOnAoIBABzXR8j2/mroOp7xfDnoYKSA9YhVrozqnGE+w+IJosAy/3mR hPEikoEcwmE5CMrTXcwYbMhbst7nMF0gtHcr2z/Scriklo6ODw5sLEPheKT+mLGn LOvXF3CKE361Bc3z3EAFRKq3PrkwKrGLCoIMpEou3s44IWE0/wiWThHQRFNUctoI Jgb87DQjBPxilfM1v5UDlIl6708wCJ/ULOe9Mvi1wiCoe3oRXmzJxKrC+YNXiaq0 uVqXNZ9RdOD6ld8cbLVzNhz+7Nro83ZyZS1VHM3CiXYPyFxE+8Vp7zcZge5NLX9k BE4TBRsP55H+h2CkMPrC58L0KDFJjjuKgpkQYIECggEAb5scNBRepJdv3wYh+s2B 5lxNnMXvwRqntCTUhy25nCdVyLXwr8qgPaiihA2jMgjROMc3XNCX6K95Th6sTNOM uyzTFK4WU4LXeBppKL71SPNJWVDyK7HKiHpioe6T/XAG42lg0cwSR1SFcipl5I1B WsJWnfNikhFo/9bgStDsP0Ke6vZ0z4GTqpbrW6ivKrp336beXWOzY6wA+DNu6lIF IUlD+xCrbRrbN1qNzdiY2rpjg7Em32YCLJv3alq5CvXqodiQx5Tgp9Re+4Opx6aT WNncxzaR8eaqmDXFfAxMQufyGLswkSnZD6Kv/LEgYWSfF+13zkF6hPG6M5W/maPx 7QKCAQEA+EH/bZrfFIthC4pj2PZ7yR6AjvB0CUEc+s2Vk60kGCx7Ykl4OZfxvHHf A/4890Lb1+lEEo7+mBqq3IOOWKKYz+K7a81G668X3KltmJd3twMWzDy9+RN2sGbY ww3GQqS2B99N/E5e1N3TLm01aeqEUJqyYqsT0cx+vGQyiIqPF/1SLSqy0ZAl4ZrV Zb+Zl/EJxi+wPYuax90HFgHRc1RAkN5YDicLlmiPCUWjx9gPN5LOXKMQmh7J7cs1 n1YP3erz76BQA9q0dkJmHr54FXxkBaFR3SGvIjxjArkNSvl2GuT73kgfbVFfT5QB Kesl5Q7sHOaJXry12QuWE5/Kj4absA== -----END PRIVATE KEY----- image-4.0.1/pkg/tlsclientconfig/testdata/missing-cert/000077500000000000000000000000001354546467100230025ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/missing-cert/client-cert-1.key000077700000000000000000000000001354546467100323142../full/client-cert-1.keyustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/missing-key/000077500000000000000000000000001354546467100226355ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/missing-key/client-cert-1.cert000077700000000000000000000000001354546467100324612../full/client-cert-1.certustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-ca/000077500000000000000000000000001354546467100230615ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-ca/unreadable.crt000077700000000000000000000000001354546467100315272/this/does/not/existustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-cert/000077500000000000000000000000001354546467100234335ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.cert000077700000000000000000000000001354546467100325132/this/does/not/existustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-cert/client-cert-1.key000077700000000000000000000000001354546467100327452../full/client-cert-1.keyustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-key/000077500000000000000000000000001354546467100232665ustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.cert000077700000000000000000000000001354546467100331122../full/client-cert-1.certustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/testdata/unreadable-key/client-cert-1.key000077700000000000000000000000001354546467100322012/this/does/not/existustar00rootroot00000000000000image-4.0.1/pkg/tlsclientconfig/tlsclientconfig.go000066400000000000000000000057611354546467100223140ustar00rootroot00000000000000package tlsclientconfig import ( "crypto/tls" "io/ioutil" "net" "net/http" "os" "path/filepath" "strings" "time" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc func SetupCertificates(dir string, tlsc *tls.Config) error { logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) fs, err := ioutil.ReadDir(dir) if err != nil { if os.IsNotExist(err) { return nil } if os.IsPermission(err) { logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) return nil } return err } for _, f := range fs { fullPath := filepath.Join(dir, f.Name()) if strings.HasSuffix(f.Name(), ".crt") { logrus.Debugf(" crt: %s", fullPath) data, err := ioutil.ReadFile(fullPath) if err != nil { if os.IsNotExist(err) { // Dangling symbolic link? // Race with someone who deleted the // file after we read the directory's // list of contents? logrus.Warnf("error reading certificate %q: %v", fullPath, err) continue } return err } if tlsc.RootCAs == nil { systemPool, err := tlsconfig.SystemCertPool() if err != nil { return errors.Wrap(err, "unable to get system cert pool") } tlsc.RootCAs = systemPool } tlsc.RootCAs.AppendCertsFromPEM(data) } if strings.HasSuffix(f.Name(), ".cert") { certName := f.Name() keyName := certName[:len(certName)-5] + ".key" logrus.Debugf(" cert: %s", fullPath) if !hasFile(fs, keyName) { return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) } cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) if err != nil { return err } tlsc.Certificates = append(tlsc.Certificates, cert) } if strings.HasSuffix(f.Name(), ".key") { keyName := f.Name() certName := keyName[:len(keyName)-4] + ".cert" logrus.Debugf(" key: %s", fullPath) if !hasFile(fs, certName) { return errors.Errorf("missing client certificate %s for key %s", certName, keyName) } } } return nil } func hasFile(files []os.FileInfo, name string) bool { for _, f := range files { if f.Name() == name { return true } } return false } // NewTransport Creates a default transport func NewTransport() *http.Transport { direct := &net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, } tr := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: direct.Dial, TLSHandshakeTimeout: 10 * time.Second, // TODO(dmcgowan): Call close idle connections when complete and use keep alive DisableKeepAlives: true, } proxyDialer, err := sockets.DialerFromEnvironment(direct) if err == nil { tr.Dial = proxyDialer.Dial } return tr } image-4.0.1/pkg/tlsclientconfig/tlsclientconfig_test.go000066400000000000000000000062411354546467100233450ustar00rootroot00000000000000package tlsclientconfig import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "io/ioutil" "os" "sort" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSetupCertificates(t *testing.T) { // Success tlsc := tls.Config{} err := SetupCertificates("testdata/full", &tlsc) require.NoError(t, err) require.NotNil(t, tlsc.RootCAs) // RootCAs include SystemCertPool loadedSubjectBytes := map[string]struct{}{} for _, s := range tlsc.RootCAs.Subjects() { loadedSubjectBytes[string(s)] = struct{}{} } systemCertPool, err := x509.SystemCertPool() require.NoError(t, err) for _, s := range systemCertPool.Subjects() { _, ok := loadedSubjectBytes[string(s)] assert.True(t, ok) } // RootCAs include our certificates loadedSubjectCNs := map[string]struct{}{} for _, s := range tlsc.RootCAs.Subjects() { subjectRDN := pkix.RDNSequence{} rest, err := asn1.Unmarshal(s, &subjectRDN) require.NoError(t, err) require.Empty(t, rest) subject := pkix.Name{} subject.FillFromRDNSequence(&subjectRDN) loadedSubjectCNs[subject.CommonName] = struct{}{} } _, ok := loadedSubjectCNs["containers/image test CA certificate 1"] assert.True(t, ok) _, ok = loadedSubjectCNs["containers/image test CA certificate 2"] assert.True(t, ok) // Certificates include our certificates require.Len(t, tlsc.Certificates, 2) names := []string{} for _, c := range tlsc.Certificates { require.Len(t, c.Certificate, 1) parsed, err := x509.ParseCertificate(c.Certificate[0]) require.NoError(t, err) names = append(names, parsed.Subject.CommonName) } sort.Strings(names) assert.Equal(t, []string{ "containers/image test client certificate 1", "containers/image test client certificate 2", }, names) // Directory does not exist tlsc = tls.Config{} err = SetupCertificates("/this/does/not/exist", &tlsc) require.NoError(t, err) assert.Equal(t, &tls.Config{}, &tlsc) // Directory not accessible unreadableDir, err := ioutil.TempDir("", "containers-image-tlsclientconfig") require.NoError(t, err) defer func() { _ = os.Chmod(unreadableDir, 0700) _ = os.Remove(unreadableDir) }() err = os.Chmod(unreadableDir, 000) require.NoError(t, err) tlsc = tls.Config{} err = SetupCertificates(unreadableDir, &tlsc) assert.NoError(t, err) assert.Equal(t, &tls.Config{}, &tlsc) // Other error reading the directory tlsc = tls.Config{} err = SetupCertificates("/dev/null/is/not/a/directory", &tlsc) assert.Error(t, err) // Unreadable system cert pool untested // Unreadable CA certificate tlsc = tls.Config{} err = SetupCertificates("testdata/unreadable-ca", &tlsc) assert.NoError(t, err) assert.Nil(t, tlsc.RootCAs) // Misssing key file tlsc = tls.Config{} err = SetupCertificates("testdata/missing-key", &tlsc) assert.Error(t, err) // Missing certificate file tlsc = tls.Config{} err = SetupCertificates("testdata/missing-cert", &tlsc) assert.Error(t, err) // Unreadable key file tlsc = tls.Config{} err = SetupCertificates("testdata/unreadable-key", &tlsc) assert.Error(t, err) // Unreadable certificate file tlsc = tls.Config{} err = SetupCertificates("testdata/unreadable-cert", &tlsc) assert.Error(t, err) } image-4.0.1/registries.conf000066400000000000000000000072111354546467100156450ustar00rootroot00000000000000# For more information on this configuration file, see containers-registries.conf(5). # # There are multiple versions of the configuration syntax available, where the # second iteration is backwards compatible to the first one. Mixing up both # formats will result in an runtime error. # # The initial configuration format looks like this: # # Registries to search for images that are not fully-qualified. # i.e. foobar.com/my_image:latest vs my_image:latest [registries.search] registries = [] # Registries that do not use TLS when pulling images or uses self-signed # certificates. [registries.insecure] registries = [] # Blocked Registries, blocks the `docker daemon` from pulling from the blocked registry. If you specify # "*", then the docker daemon will only be allowed to pull from registries listed above in the search # registries. Blocked Registries is deprecated because other container runtimes and tools will not use it. # It is recommended that you use the trust policy file /etc/containers/policy.json to control which # registries you want to allow users to pull and push from. policy.json gives greater flexibility, and # supports all container runtimes and tools including the docker daemon, cri-o, buildah ... # The atomic CLI `atomic trust` can be used to easily configure the policy.json file. [registries.block] registries = [] # The second version of the configuration format allows to specify registry # mirrors: # # # An array of host[:port] registries to try when pulling an unqualified image, in order. # unqualified-search-registries = ["example.com"] # # [[registry]] # # The "prefix" field is used to choose the relevant [[registry]] TOML table; # # (only) the TOML table with the longest match for the input image name # # (taking into account namespace/repo/tag/digest separators) is used. # # # # If the prefix field is missing, it defaults to be the same as the "location" field. # prefix = "example.com/foo" # # # If true, unencrypted HTTP as well as TLS connections with untrusted # # certificates are allowed. # insecure = false # # # If true, pulling images with matching names is forbidden. # blocked = false # # # The physical location of the "prefix"-rooted namespace. # # # # By default, this equal to "prefix" (in which case "prefix" can be omitted # # and the [[registry]] TOML table can only specify "location"). # # # # Example: Given # # prefix = "example.com/foo" # # location = "internal-registry-for-example.net/bar" # # requests for the image example.com/foo/myimage:latest will actually work with the # # internal-registry-for-example.net/bar/myimage:latest image. # location = internal-registry-for-example.com/bar" # # # (Possibly-partial) mirrors for the "prefix"-rooted namespace. # # # # The mirrors are attempted in the specified order; the first one that can be # # contacted and contains the image will be used (and if none of the mirrors contains the image, # # the primary location specified by the "registry.location" field, or using the unmodified # # user-specified reference, is tried last). # # # # Each TOML table in the "mirror" array can contain the following fields, with the same semantics # # as if specified in the [[registry]] TOML table directly: # # - location # # - insecure # [[registry.mirror]] # location = "example-mirror-0.local/mirror-for-foo" # [[registry.mirror]] # location = "example-mirror-1.local/mirrors/foo" # insecure = true # # Given the above, a pull of example.com/foo/image:latest will try: # # 1. example-mirror-0.local/mirror-for-foo/image:latest # # 2. example-mirror-1.local/mirrors/foo/image:latest # # 3. internal-registry-for-example.net/bar/myimage:latest # # in order, and use the first one that exists. image-4.0.1/signature/000077500000000000000000000000001354546467100146165ustar00rootroot00000000000000image-4.0.1/signature/docker.go000066400000000000000000000046771354546467100164320ustar00rootroot00000000000000// Note: Consider the API unstable until the code supports at least three different image formats or transports. package signature import ( "fmt" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/manifest" "github.com/opencontainers/go-digest" ) // SignDockerManifest returns a signature for manifest as the specified dockerReference, // using mech and keyIdentity. func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { manifestDigest, err := manifest.Digest(m) if err != nil { return nil, err } sig := newUntrustedSignature(manifestDigest, dockerReference) return sig.sign(mech, keyIdentity) } // VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, // using mech. func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) if err != nil { return nil, err } sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { if keyIdentity != expectedKeyIdentity { return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} } return nil }, validateSignedDockerReference: func(signedDockerReference string) error { signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) if err != nil { return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} } if signedRef.String() != expectedRef.String() { return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", signedDockerReference, expectedDockerReference)} } return nil }, validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) if err != nil { return err } if !matches { return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} } return nil }, }) if err != nil { return nil, err } return sig, nil } image-4.0.1/signature/docker_test.go000066400000000000000000000102551354546467100174560ustar00rootroot00000000000000package signature import ( "io/ioutil" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSignDockerManifest(t *testing.T) { mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { t.Skipf("Signing not supported: %v", err) } manifest, err := ioutil.ReadFile("fixtures/image.manifest.json") require.NoError(t, err) // Successful signing signature, err := SignDockerManifest(manifest, TestImageSignatureReference, mech, TestKeyFingerprint) require.NoError(t, err) verified, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) assert.NoError(t, err) assert.Equal(t, TestImageSignatureReference, verified.DockerReference) assert.Equal(t, TestImageManifestDigest, verified.DockerManifestDigest) // Error computing Docker manifest invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") require.NoError(t, err) _, err = SignDockerManifest(invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint) assert.Error(t, err) // Error creating blob to sign _, err = SignDockerManifest(manifest, "", mech, TestKeyFingerprint) assert.Error(t, err) // Error signing _, err = SignDockerManifest(manifest, TestImageSignatureReference, mech, "this fingerprint doesn't exist") assert.Error(t, err) } func TestVerifyDockerManifestSignature(t *testing.T) { mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) require.NoError(t, err) defer mech.Close() manifest, err := ioutil.ReadFile("fixtures/image.manifest.json") require.NoError(t, err) signature, err := ioutil.ReadFile("fixtures/image.signature") require.NoError(t, err) // Successful verification sig, err := VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) require.NoError(t, err) assert.Equal(t, TestImageSignatureReference, sig.DockerReference) assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) // Verification using a different canonicalization of TestImageSignatureReference sig, err = VerifyDockerManifestSignature(signature, manifest, "docker.io/"+TestImageSignatureReference, mech, TestKeyFingerprint) require.NoError(t, err) assert.Equal(t, TestImageSignatureReference, sig.DockerReference) assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) // For extra paranoia, test that we return nil data on error. // Invalid docker reference on input sig, err = VerifyDockerManifestSignature(signature, manifest, "UPPERCASEISINVALID", mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) // Error computing Docker manifest invalidManifest, err := ioutil.ReadFile("fixtures/v2s1-invalid-signatures.manifest.json") require.NoError(t, err) sig, err = VerifyDockerManifestSignature(signature, invalidManifest, TestImageSignatureReference, mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) // Error verifying signature corruptSignature, err := ioutil.ReadFile("fixtures/corrupt.signature") sig, err = VerifyDockerManifestSignature(corruptSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) // Key fingerprint mismatch sig, err = VerifyDockerManifestSignature(signature, manifest, TestImageSignatureReference, mech, "unexpected fingerprint") assert.Error(t, err) assert.Nil(t, sig) // Invalid reference in the signature invalidReferenceSignature, err := ioutil.ReadFile("fixtures/invalid-reference.signature") sig, err = VerifyDockerManifestSignature(invalidReferenceSignature, manifest, TestImageSignatureReference, mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) // Docker reference mismatch sig, err = VerifyDockerManifestSignature(signature, manifest, "example.com/doesnt/match", mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) // Docker manifest digest mismatch sig, err = VerifyDockerManifestSignature(signature, []byte("unexpected manifest"), TestImageSignatureReference, mech, TestKeyFingerprint) assert.Error(t, err) assert.Nil(t, sig) } image-4.0.1/signature/fixtures/000077500000000000000000000000001354546467100164675ustar00rootroot00000000000000image-4.0.1/signature/fixtures/.gitignore000066400000000000000000000001411354546467100204530ustar00rootroot00000000000000/*.gpg~ /.gpg-v21-migrated /private-keys-v1.d /random_seed /gnupg_spawn_agent_sentinel.lock /.#* image-4.0.1/signature/fixtures/corrupt.signature000066400000000000000000000006341354546467100221130ustar00rootroot00000000000000xD$1Z)($391GɪZ)3%5$NON--JMK-JKNUR*I-.KMLju2sStS2AJ3Lͬ Ҍ SSLL͓- ,,SRS--M͓R ͌,- R S R LR LSMMATܑXWZPXRZ R_PPrQ*PqBА\s ML-- kk;00r02…SZQW~:k?g^WeĈ`v喙X,U,23|`*;Ϯ 2㢌{ߗ=s&%Rwշ6i̯kG1?cSajl[]űurK{ ljA_S~1/oy image-4.0.1/signature/fixtures/dir-img-manifest-digest-error/000077500000000000000000000000001354546467100242275ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-manifest-digest-error/manifest.json000077700000000000000000000000001354546467100362372../v2s1-invalid-signatures.manifest.jsonustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-manifest-digest-error/signature-1000077700000000000000000000000001354546467100332262../dir-img-valid/signature-1ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-mixed/000077500000000000000000000000001354546467100211235ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-mixed/manifest.json000077700000000000000000000000001354546467100311602../dir-img-valid/manifest.jsonustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-mixed/signature-1000077700000000000000000000000001354546467100277002../invalid-blob.signatureustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-mixed/signature-2000077700000000000000000000000001354546467100301232../dir-img-valid/signature-1ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-modified-manifest/000077500000000000000000000000001354546467100234015ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-modified-manifest/manifest.json000066400000000000000000000020241354546467100261000ustar00rootroot00000000000000{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": 7023, "digest": "sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 32654, "digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 16724, "digest": "sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b" }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 73109, "digest": "sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736" } ], "extra": "this manifest has been modified" } image-4.0.1/signature/fixtures/dir-img-modified-manifest/signature-1000077700000000000000000000000001354546467100324002../dir-img-valid/signature-1ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-no-manifest/000077500000000000000000000000001354546467100222355ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-no-manifest/signature-1000077700000000000000000000000001354546467100312342../dir-img-valid/signature-1ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-unsigned/000077500000000000000000000000001354546467100216315ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-unsigned/manifest.json000077700000000000000000000000001354546467100316662../dir-img-valid/manifest.jsonustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid-2/000077500000000000000000000000001354546467100212535ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid-2/manifest.json000077700000000000000000000000001354546467100313102../dir-img-valid/manifest.jsonustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid-2/signature-1000077700000000000000000000000001354546467100302522../dir-img-valid/signature-1ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid-2/signature-2000066400000000000000000000006511354546467100233400ustar00rootroot00000000000000xD$&1,2VJ.,LNQVLI+,SStRRRSJRK2s2ӀlDRRfnbz*6ݔt+D#S3+#4#S$cK #䔤d sKDDC3#3KKôD3T$T43#TSSe% $f&+$$f)g%dA\ T\cgghZ4.3%+C3 4005 (.NX)eaX!A tf?+ۦR]r]iL]Rlζd1L%K˵rRw+&xeU͕1v#+[[5:va;L\yUimage-4.0.1/signature/fixtures/dir-img-valid/000077500000000000000000000000001354546467100211145ustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid/manifest.json000077700000000000000000000000001354546467100276052../image.manifest.jsonustar00rootroot00000000000000image-4.0.1/signature/fixtures/dir-img-valid/signature-1000066400000000000000000000006531354546467100232020ustar00rootroot00000000000000xD$&1{|ɪVJ.,LNQVLI+,SStRRRSJRK2s2ӀlDRRfnbz*6ݔt+D#S3+#4#S$cK #䔤d sKDDC3#3KKôD3T$T43#TSSe% $f&+$$f)g%dA\ T\cgghZ4.3%+C3cK KSڎ9,  lLb_0ygeps8}N;+LYb*2o-B7)\c=톆6ogsJM N$W"rR9 ۴~rSLʰ+M+^LgKZimage-4.0.1/signature/fixtures/double.signature000066400000000000000000000014661354546467100216730ustar00rootroot00000000000000xD$1Z)($391GɪZ)3%5$NON--JMK-JKNUR*I-.KMLju2sStS2AJ3Lͬ Ҍ SSLL͓- ,,SRS--M͓R ͌,- R S R LR LSMMATܑXWZPXRZ R_PPrQ*PqBА\s ML-- kk;00r02…SZQW~:k?g^WeĈ`v喙X,U,23|`*;Ϯ 2㢌{ߗ=s&%Rwշ6i̯kG1?cSjl[]ű.J:JH:r2RKtS2ӁP_qFARajyerJRjjiyRjaZJqAaIyy)ȲKKs3J3R3KJRA J2 ^J.J*.BQ tXbnqm-WF&6V&P0pq `!tF]A ,*ȣ??37Į4d ZTBPyKO5uևC[XN:-'z5~=ϱl"aie+d/;/{;?>2ݚK5image-4.0.1/signature/fixtures/no-optional-fields.signature000066400000000000000000000005771354546467100241260ustar00rootroot00000000000000xD'1DuW+%ed&'(YU+edT)٩EEiEyɩJVJ%%yyi@RRfnbz*znJf:HRqFARajyerJRjjiyRjaZJqAaIyy)Ȳ;Ks3J3R3KJRA J2.,L3=zڑ11S+閵u7&!H!c,TE/uLgv.}yuõYY[>$.|?Qs8kPZvOimage-4.0.1/signature/fixtures/policy.json000066400000000000000000000060231354546467100206620ustar00rootroot00000000000000{ "default": [ { "type": "reject" } ], "transports": { "dir": { "": [ { "type": "insecureAcceptAnything" } ] }, "docker": { "example.com/playground": [ { "type": "insecureAcceptAnything" } ], "example.com/production": [ { "type": "signedBy", "keyType": "GPGKeys", "keyPath": "/keys/employee-gpg-keyring" } ], "example.com/hardened": [ { "type": "signedBy", "keyType": "GPGKeys", "keyPath": "/keys/employee-gpg-keyring", "signedIdentity": { "type": "matchRepository" } }, { "type": "signedBy", "keyType": "signedByGPGKeys", "keyPath": "/keys/public-key-signing-gpg-keyring", "signedIdentity": { "type": "matchExact" } }, { "type": "signedBaseLayer", "baseLayerIdentity": { "type": "exactRepository", "dockerRepository": "registry.access.redhat.com/rhel7/rhel" } } ], "example.com/hardened-x509": [ { "type": "signedBy", "keyType": "X509Certificates", "keyPath": "/keys/employee-cert-file", "signedIdentity": { "type": "matchRepository" } }, { "type": "signedBy", "keyType": "signedByX509CAs", "keyPath": "/keys/public-key-signing-ca-file" } ], "registry.access.redhat.com": [ { "type": "signedBy", "keyType": "signedByGPGKeys", "keyPath": "/keys/RH-key-signing-key-gpg-keyring", "signedIdentity": { "type": "matchRepoDigestOrExact" } } ], "bogus/key-data-example": [ { "type": "signedBy", "keyType": "signedByGPGKeys", "keyData": "bm9uc2Vuc2U=" } ], "bogus/signed-identity-example": [ { "type": "signedBaseLayer", "baseLayerIdentity": { "type": "exactReference", "dockerReference": "registry.access.redhat.com/rhel7/rhel:latest" } } ] } } }image-4.0.1/signature/fixtures/public-key.gpg000066400000000000000000000017271354546467100212410ustar00rootroot00000000000000-----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mI0EVurzqQEEAL3qkFq4K2URtSWVDYnQUNA9HdM9sqS2eAWfqUFMrkD5f+oN+LBL tPyaE5GNLA0vXY7nHAM2TeM8ijZ/eMP17Raj64JL8GhCymL3wn2jNvb9XaF0R0s6 H0IaRPPu45A3SnxLwm4Orc/9Z7/UxtYjKSg9xOaTiVPzJgaf5Vm4J4ApABEBAAG0 EnNrb3BlbyB0ZXN0aW5nIGtleYi4BBMBAgAiBQJW6vOpAhsDBgsJCAcDAgYVCAIJ CgsEFgIDAQIeAQIXgAAKCRDbcvIYi7RsyBbOBACgJFiKDlQ1UyvsNmGqJ7D0OpbS 1OppJlradKgZXyfahFswhFI+7ZREvELLHbinq3dBy5cLXRWzQKdJZNHknSN5Tjf2 0ipVBQuqpcBo+dnKiG4zH6fhTri7yeTZksIDfsqlI6FXDOdKLUSnahagEBn4yU+x jHPvZk5SuuZv56A45biNBFbq86kBBADIC/9CsAlOmRALuYUmkhcqEjuFwn3wKz2d IBjzgvro7zcVNNCgxQfMEjcUsvEh5cx13G3QQHcwOKy3M6Bv6VMhfZjd+1P1el4P 0fJS8GFmhWRBknMN8jFsgyohQeouQ798RFFv94KszfStNnr/ae8oao5URmoUXSCa /MdUxn0YKwARAQABiJ8EGAECAAkFAlbq86kCGwwACgkQ23LyGIu0bMjUywQAq0dn lUpDNSoLTcpNWuVvHQ7c/qmnE4TyiSLiRiAywdEWA6gMiyhUUucuGsEhMFP1WX1k UNwArZ6UG7BDOUsvngP7jKGNqyUOQrq1s/r8D+0MrJGOWErGLlfttO2WeoijECkI 5qm8cXzAra3Xf/Z3VjxYTKSnNu37LtZkakdTdYE= =tJAt -----END PGP PUBLIC KEY BLOCK----- image-4.0.1/signature/fixtures/pubring.gpg000066400000000000000000000012251354546467100206340ustar00rootroot00000000000000VZ+e% P==xAL@ K, /]6M<6xKhBb}6]tGK:BD7J|Kng#)(=擉S&Y')skopeo testing key"V   rl$XT5S+6a':i&Zt_'ڄ[0R>DBwA˗ ]@Id#yN7*U hʈn3Nْ~ʥ#W J-DjOsfNRo8V B N &*;}+= 74Р7!um@w083oS!}Sz^RafdAs 1l*!A.C|DQo6zi(jTFj] T}+ V rlGgJC5* MMZo"F 2 (TR.!0SY}dPC9K/%B XJ.Wz)橼q|wVDBwA˗ ]@Id#yN7*U hʈn3Nْ~ʥ#W J-DjOsfNRo8V B N &*;}+= 74Р7!um@w083oS!}Sz^RafdAs 1l*!A.C|DQo6zi(jTFj] T}+95'h*咎~ $l~DJkhni6#} `@g6.m~"&)fNp]I}aJӭ{8ꪁJS`  8Jv`aȣ31e0>qr˛O'YJ"jʷGJ&. 8Ir ]>=3v͗M6H5]V3S 6Np bն(uՊ,.)53M"ԋ8Tzl=0E9B&E>OaN6M&m9 iР V rlGgJC5* MMZo"F 2 (TR.!0SY}dPC9K/%B XJ.Wz)橼q|wV'\%c vaxQC9F 0Ͷgrl! vaxQC9Fimage-4.0.1/signature/fixtures/unknown-key.signature000066400000000000000000000010601354546467100226740ustar00rootroot00000000000000ĸuD%1DV~V\YdUWYR b'g%*Y)d&eJ:JHar) %VJFfVFIiF)&IƖF)I斦IfFfi)fƩ)I&if&F橦 J* @H,LVH+IK-R(LK,)-J)/(σx((@PX7%L h\f.ЉJV&f&f&&2, L lL bLaN֟soj=wy*aE#,gڴffORvqo)b 0]gܢuwj~_s*{lڦ./"uדO朳cr}YO\hR߫F/uopւ_v_[irn~{eߣ֟|OiVv;Y rcԏSnK_̷Oe"lj&_oykܯ㓼tm̭q2image-4.0.1/signature/fixtures/unknown-key.signature-v3000066400000000000000000000010451354546467100232250ustar00rootroot00000000000000ĸuD%1DV~ԨVJ.,LNQVLI+,SStRRRSJRK2s2ӀlZT$09ݔt+D#S3+#4#S$cK #䔤d sKDDC3#3KKôD3T$T43#TSSe% w$f&+$$f)g%dA<\ T\cgghZ4.3%+C33ccsNFQfVpËiZ2C%w5Zt8%싼8cso#0)9`tβ?KU|rtm IvWmfkwU/Y-F]k0qlc)Ecnѽ-^dָ5xʹ{l-{f5 O\cyYzɯk`_rxeM -imkKԤqG#<ͪB=+$.]5.(k'}j1ܒgn^image-4.0.1/signature/fixtures/unsigned-encrypted.signature000066400000000000000000000003511354546467100242200ustar00rootroot00000000000000#̢U*E@u@w I9u2QG _`C,fnjfYǻ݋^hE꺻CN2zsqMVbv0j0aRЃ=8W#g 0 && len(keyData) > 0 { return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") } if signedIdentity == nil { return nil, InvalidPolicyFormatError("signedIdentity not specified") } return &prSignedBy{ prCommon: prCommon{Type: prTypeSignedBy}, KeyType: keyType, KeyPath: keyPath, KeyData: keyData, SignedIdentity: signedIdentity, }, nil } // newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { return newPRSignedBy(keyType, keyPath, nil, signedIdentity) } // NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) } // newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { return newPRSignedBy(keyType, "", keyData, signedIdentity) } // NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { return newPRSignedByKeyData(keyType, keyData, signedIdentity) } // Compile-time check that prSignedBy implements json.Unmarshaler. var _ json.Unmarshaler = (*prSignedBy)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (pr *prSignedBy) UnmarshalJSON(data []byte) error { *pr = prSignedBy{} var tmp prSignedBy var gotKeyPath, gotKeyData = false, false var signedIdentity json.RawMessage if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { switch key { case "type": return &tmp.Type case "keyType": return &tmp.KeyType case "keyPath": gotKeyPath = true return &tmp.KeyPath case "keyData": gotKeyData = true return &tmp.KeyData case "signedIdentity": return &signedIdentity default: return nil } }); err != nil { return err } if tmp.Type != prTypeSignedBy { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } if signedIdentity == nil { tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() } else { si, err := newPolicyReferenceMatchFromJSON(signedIdentity) if err != nil { return err } tmp.SignedIdentity = si } var res *prSignedBy var err error switch { case gotKeyPath && gotKeyData: return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") case gotKeyPath && !gotKeyData: res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) case !gotKeyPath && gotKeyData: res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) case !gotKeyPath && !gotKeyData: return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") default: // Coverage: This should never happen return errors.Errorf("Impossible keyPath/keyData presence combination!?") } if err != nil { return err } *pr = *res return nil } // IsValid returns true iff kt is a recognized value func (kt sbKeyType) IsValid() bool { switch kt { case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: return true default: return false } } // Compile-time check that sbKeyType implements json.Unmarshaler. var _ json.Unmarshaler = (*sbKeyType)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (kt *sbKeyType) UnmarshalJSON(data []byte) error { *kt = sbKeyType("") var s string if err := json.Unmarshal(data, &s); err != nil { return err } if !sbKeyType(s).IsValid() { return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) } *kt = sbKeyType(s) return nil } // newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { if baseLayerIdentity == nil { return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") } return &prSignedBaseLayer{ prCommon: prCommon{Type: prTypeSignedBaseLayer}, BaseLayerIdentity: baseLayerIdentity, }, nil } // NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { return newPRSignedBaseLayer(baseLayerIdentity) } // Compile-time check that prSignedBaseLayer implements json.Unmarshaler. var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { *pr = prSignedBaseLayer{} var tmp prSignedBaseLayer var baseLayerIdentity json.RawMessage if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "baseLayerIdentity": &baseLayerIdentity, }); err != nil { return err } if tmp.Type != prTypeSignedBaseLayer { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) if err != nil { return err } res, err := newPRSignedBaseLayer(bli) if err != nil { // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. return err } *pr = *res return nil } // newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { var typeField prmCommon if err := json.Unmarshal(data, &typeField); err != nil { return nil, err } var res PolicyReferenceMatch switch typeField.Type { case prmTypeMatchExact: res = &prmMatchExact{} case prmTypeMatchRepoDigestOrExact: res = &prmMatchRepoDigestOrExact{} case prmTypeMatchRepository: res = &prmMatchRepository{} case prmTypeExactReference: res = &prmExactReference{} case prmTypeExactRepository: res = &prmExactRepository{} default: return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) } if err := json.Unmarshal(data, &res); err != nil { return nil, err } return res, nil } // newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. func newPRMMatchExact() *prmMatchExact { return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} } // NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. func NewPRMMatchExact() PolicyReferenceMatch { return newPRMMatchExact() } // Compile-time check that prmMatchExact implements json.Unmarshaler. var _ json.Unmarshaler = (*prmMatchExact)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { *prm = prmMatchExact{} var tmp prmMatchExact if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err } if tmp.Type != prmTypeMatchExact { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } *prm = *newPRMMatchExact() return nil } // newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} } // NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { return newPRMMatchRepoDigestOrExact() } // Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { *prm = prmMatchRepoDigestOrExact{} var tmp prmMatchRepoDigestOrExact if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err } if tmp.Type != prmTypeMatchRepoDigestOrExact { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } *prm = *newPRMMatchRepoDigestOrExact() return nil } // newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. func newPRMMatchRepository() *prmMatchRepository { return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} } // NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. func NewPRMMatchRepository() PolicyReferenceMatch { return newPRMMatchRepository() } // Compile-time check that prmMatchRepository implements json.Unmarshaler. var _ json.Unmarshaler = (*prmMatchRepository)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { *prm = prmMatchRepository{} var tmp prmMatchRepository if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, }); err != nil { return err } if tmp.Type != prmTypeMatchRepository { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } *prm = *newPRMMatchRepository() return nil } // newPRMExactReference is NewPRMExactReference, except it resturns the private type. func newPRMExactReference(dockerReference string) (*prmExactReference, error) { ref, err := reference.ParseNormalizedNamed(dockerReference) if err != nil { return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) } if reference.IsNameOnly(ref) { return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) } return &prmExactReference{ prmCommon: prmCommon{Type: prmTypeExactReference}, DockerReference: dockerReference, }, nil } // NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { return newPRMExactReference(dockerReference) } // Compile-time check that prmExactReference implements json.Unmarshaler. var _ json.Unmarshaler = (*prmExactReference)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (prm *prmExactReference) UnmarshalJSON(data []byte) error { *prm = prmExactReference{} var tmp prmExactReference if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "dockerReference": &tmp.DockerReference, }); err != nil { return err } if tmp.Type != prmTypeExactReference { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } res, err := newPRMExactReference(tmp.DockerReference) if err != nil { return err } *prm = *res return nil } // newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) } return &prmExactRepository{ prmCommon: prmCommon{Type: prmTypeExactRepository}, DockerRepository: dockerRepository, }, nil } // NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { return newPRMExactRepository(dockerRepository) } // Compile-time check that prmExactRepository implements json.Unmarshaler. var _ json.Unmarshaler = (*prmExactRepository)(nil) // UnmarshalJSON implements the json.Unmarshaler interface. func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { *prm = prmExactRepository{} var tmp prmExactRepository if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "type": &tmp.Type, "dockerRepository": &tmp.DockerRepository, }); err != nil { return err } if tmp.Type != prmTypeExactRepository { return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) } res, err := newPRMExactRepository(tmp.DockerRepository) if err != nil { return err } *prm = *res return nil } image-4.0.1/signature/policy_config_test.go000066400000000000000000001136651354546467100210440ustar00rootroot00000000000000package signature import ( "bytes" "encoding/json" "io/ioutil" "path/filepath" "testing" "github.com/containers/image/v4/directory" "github.com/containers/image/v4/docker" "github.com/pkg/errors" // this import is needed where we use the "atomic" transport in TestPolicyUnmarshalJSON _ "github.com/containers/image/v4/openshift" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // policyFixtureContents is a data structure equal to the contents of "fixtures/policy.json" var policyFixtureContents = &Policy{ Default: PolicyRequirements{NewPRReject()}, Transports: map[string]PolicyTransportScopes{ "dir": { "": PolicyRequirements{NewPRInsecureAcceptAnything()}, }, "docker": { "example.com/playground": { NewPRInsecureAcceptAnything(), }, "example.com/production": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/keys/employee-gpg-keyring", NewPRMMatchRepoDigestOrExact()), }, "example.com/hardened": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/keys/employee-gpg-keyring", NewPRMMatchRepository()), xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys, "/keys/public-key-signing-gpg-keyring", NewPRMMatchExact()), xNewPRSignedBaseLayer(xNewPRMExactRepository("registry.access.redhat.com/rhel7/rhel")), }, "example.com/hardened-x509": { xNewPRSignedByKeyPath(SBKeyTypeX509Certificates, "/keys/employee-cert-file", NewPRMMatchRepository()), xNewPRSignedByKeyPath(SBKeyTypeSignedByX509CAs, "/keys/public-key-signing-ca-file", NewPRMMatchRepoDigestOrExact()), }, "registry.access.redhat.com": { xNewPRSignedByKeyPath(SBKeyTypeSignedByGPGKeys, "/keys/RH-key-signing-key-gpg-keyring", NewPRMMatchRepoDigestOrExact()), }, "bogus/key-data-example": { xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("nonsense"), NewPRMMatchRepoDigestOrExact()), }, "bogus/signed-identity-example": { xNewPRSignedBaseLayer(xNewPRMExactReference("registry.access.redhat.com/rhel7/rhel:latest")), }, }, }, } func TestDefaultPolicy(t *testing.T) { // We can't test the actual systemDefaultPolicyPath, so override. // TestDefaultPolicyPath below tests that we handle the overrides and defaults // correctly. // Success policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: "./fixtures/policy.json"}) require.NoError(t, err) assert.Equal(t, policyFixtureContents, policy) for _, path := range []string{ "/this/doesnt/exist", // Error reading file "/dev/null", // A failure case; most are tested in the individual method unit tests. } { policy, err := DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path}) assert.Error(t, err) assert.Nil(t, policy) } } func TestDefaultPolicyPath(t *testing.T) { const nondefaultPath = "/this/is/not/the/default/path.json" const variableReference = "$HOME" const rootPrefix = "/root/prefix" for _, c := range []struct { sys *types.SystemContext expected string }{ // The common case {nil, systemDefaultPolicyPath}, // There is a context, but it does not override the path. {&types.SystemContext{}, systemDefaultPolicyPath}, // Path overridden {&types.SystemContext{SignaturePolicyPath: nondefaultPath}, nondefaultPath}, // Root overridden { &types.SystemContext{RootForImplicitAbsolutePaths: rootPrefix}, filepath.Join(rootPrefix, systemDefaultPolicyPath), }, // Root and path overrides present simultaneously, { &types.SystemContext{ RootForImplicitAbsolutePaths: rootPrefix, SignaturePolicyPath: nondefaultPath, }, nondefaultPath, }, // No environment expansion happens in the overridden paths {&types.SystemContext{SignaturePolicyPath: variableReference}, variableReference}, } { path := defaultPolicyPath(c.sys) assert.Equal(t, c.expected, path) } } func TestNewPolicyFromFile(t *testing.T) { // Success policy, err := NewPolicyFromFile("./fixtures/policy.json") require.NoError(t, err) assert.Equal(t, policyFixtureContents, policy) // Error reading file _, err = NewPolicyFromFile("/this/doesnt/exist") assert.Error(t, err) // A failure case; most are tested in the individual method unit tests. _, err = NewPolicyFromFile("/dev/null") require.Error(t, err) assert.IsType(t, InvalidPolicyFormatError(""), errors.Cause(err)) } func TestNewPolicyFromBytes(t *testing.T) { // Success bytes, err := ioutil.ReadFile("./fixtures/policy.json") require.NoError(t, err) policy, err := NewPolicyFromBytes(bytes) require.NoError(t, err) assert.Equal(t, policyFixtureContents, policy) // A failure case; most are tested in the individual method unit tests. _, err = NewPolicyFromBytes([]byte("")) require.Error(t, err) assert.IsType(t, InvalidPolicyFormatError(""), err) } // FIXME? There is quite a bit of duplication below. Factor some of it out? // testInvalidJSONInput verifies that obviously invalid input is rejected for dest. func testInvalidJSONInput(t *testing.T, dest json.Unmarshaler) { // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our // UnmarshalJSON implementation; so test that first, then test our error handling for completeness. err := json.Unmarshal([]byte("&"), dest) assert.Error(t, err) err = dest.UnmarshalJSON([]byte("&")) assert.Error(t, err) // Not an object/array/string err = json.Unmarshal([]byte("1"), dest) assert.Error(t, err) } // addExtraJSONMember adds adds an additional member "$name": $extra, // possibly with a duplicate name, to encoded. // Errors, if any, are reported through t. func addExtraJSONMember(t *testing.T, encoded []byte, name string, extra interface{}) []byte { extraJSON, err := json.Marshal(extra) require.NoError(t, err) require.True(t, bytes.HasSuffix(encoded, []byte("}"))) preservedLen := len(encoded) - 1 return bytes.Join([][]byte{encoded[:preservedLen], []byte(`,"`), []byte(name), []byte(`":`), extraJSON, []byte("}")}, nil) } func TestInvalidPolicyFormatError(t *testing.T) { // A stupid test just to keep code coverage s := "test" err := InvalidPolicyFormatError(s) assert.Equal(t, s, err.Error()) } // Return the result of modifying validJSON with fn and unmarshaling it into *p func tryUnmarshalModifiedPolicy(t *testing.T, p *Policy, validJSON []byte, modifyFn func(mSI)) error { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) modifyFn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) *p = Policy{} return json.Unmarshal(testJSON, p) } // xNewPRSignedByKeyPath is like NewPRSignedByKeyPath, except it must not fail. func xNewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) PolicyRequirement { pr, err := NewPRSignedByKeyPath(keyType, keyPath, signedIdentity) if err != nil { panic("xNewPRSignedByKeyPath failed") } return pr } // xNewPRSignedByKeyData is like NewPRSignedByKeyData, except it must not fail. func xNewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) PolicyRequirement { pr, err := NewPRSignedByKeyData(keyType, keyData, signedIdentity) if err != nil { panic("xNewPRSignedByKeyData failed") } return pr } func TestPolicyUnmarshalJSON(t *testing.T) { var p Policy testInvalidJSONInput(t, &p) // Start with a valid JSON. validPolicy := Policy{ Default: []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact()), }, Transports: map[string]PolicyTransportScopes{ "docker": { "docker.io/library/busybox": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), }, "registry.access.redhat.com": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), }, }, "atomic": { "registry.access.redhat.com/rhel7": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()), }, }, "unknown": { "registry.access.redhat.com/rhel7": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RHatomic"), NewPRMMatchRepository()), }, }, }, } validJSON, err := json.Marshal(validPolicy) require.NoError(t, err) // Success p = Policy{} err = json.Unmarshal(validJSON, &p) require.NoError(t, err) assert.Equal(t, validPolicy, p) // Various ways to corrupt the JSON breakFns := []func(mSI){ // The "default" field is missing func(v mSI) { delete(v, "default") }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // "default" not an array func(v mSI) { v["default"] = 1 }, func(v mSI) { v["default"] = mSI{} }, // "transports" not an object func(v mSI) { v["transports"] = 1 }, func(v mSI) { v["transports"] = []string{} }, // "default" is an invalid PolicyRequirements func(v mSI) { v["default"] = PolicyRequirements{} }, } for _, fn := range breakFns { err = tryUnmarshalModifiedPolicy(t, &p, validJSON, fn) assert.Error(t, err) } // Duplicated fields for _, field := range []string{"default", "transports"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) p = Policy{} err = json.Unmarshal(testJSON, &p) assert.Error(t, err) } // Various allowed modifications to the policy allowedModificationFns := []func(mSI){ // Delete the map of transport-specific scopes func(v mSI) { delete(v, "transports") }, // Use an empty map of transport-specific scopes func(v mSI) { v["transports"] = map[string]PolicyTransportScopes{} }, } for _, fn := range allowedModificationFns { err = tryUnmarshalModifiedPolicy(t, &p, validJSON, fn) require.NoError(t, err) } } func TestPolicyTransportScopesUnmarshalJSON(t *testing.T) { var pts PolicyTransportScopes // Start with a valid JSON. validPTS := PolicyTransportScopes{ "": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()), }, } validJSON, err := json.Marshal(validPTS) require.NoError(t, err) // Nothing can be unmarshaled directly into PolicyTransportScopes pts = PolicyTransportScopes{} err = json.Unmarshal(validJSON, &pts) assert.Error(t, err) } // Return the result of modifying validJSON with fn and unmarshaling it into *pts // using transport. func tryUnmarshalModifiedPTS(t *testing.T, pts *PolicyTransportScopes, transport types.ImageTransport, validJSON []byte, modifyFn func(mSI)) error { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) modifyFn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) *pts = PolicyTransportScopes{} dest := policyTransportScopesWithTransport{ transport: transport, dest: pts, } return json.Unmarshal(testJSON, &dest) } func TestPolicyTransportScopesWithTransportUnmarshalJSON(t *testing.T) { var pts PolicyTransportScopes dest := policyTransportScopesWithTransport{ transport: docker.Transport, dest: &pts, } testInvalidJSONInput(t, &dest) // Start with a valid JSON. validPTS := PolicyTransportScopes{ "docker.io/library/busybox": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), }, "registry.access.redhat.com": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), }, "": []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("global"), NewPRMMatchRepoDigestOrExact()), }, } validJSON, err := json.Marshal(validPTS) require.NoError(t, err) // Success pts = PolicyTransportScopes{} dest = policyTransportScopesWithTransport{ transport: docker.Transport, dest: &pts, } err = json.Unmarshal(validJSON, &dest) require.NoError(t, err) assert.Equal(t, validPTS, pts) // Various ways to corrupt the JSON breakFns := []func(mSI){ // A scope is not an array func(v mSI) { v["docker.io/library/busybox"] = 1 }, func(v mSI) { v["docker.io/library/busybox"] = mSI{} }, func(v mSI) { v[""] = 1 }, func(v mSI) { v[""] = mSI{} }, // A scope is an invalid PolicyRequirements func(v mSI) { v["docker.io/library/busybox"] = PolicyRequirements{} }, func(v mSI) { v[""] = PolicyRequirements{} }, } for _, fn := range breakFns { err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn) assert.Error(t, err) } // Duplicated fields for _, field := range []string{"docker.io/library/busybox", ""} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) pts = PolicyTransportScopes{} dest := policyTransportScopesWithTransport{ transport: docker.Transport, dest: &pts, } err = json.Unmarshal(testJSON, &dest) assert.Error(t, err) } // Scope rejected by transport the Docker scopes we use as valid are rejected by directory.Transport // as relative paths. err = tryUnmarshalModifiedPTS(t, &pts, directory.Transport, validJSON, func(v mSI) {}) assert.Error(t, err) // Various allowed modifications to the policy allowedModificationFns := []func(mSI){ // The "" scope is missing func(v mSI) { delete(v, "") }, // The policy is completely empty func(v mSI) { for key := range v { delete(v, key) } }, } for _, fn := range allowedModificationFns { err = tryUnmarshalModifiedPTS(t, &pts, docker.Transport, validJSON, fn) require.NoError(t, err) } } func TestPolicyRequirementsUnmarshalJSON(t *testing.T) { var reqs PolicyRequirements testInvalidJSONInput(t, &reqs) // Start with a valid JSON. validReqs := PolicyRequirements{ xNewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("def"), NewPRMMatchRepoDigestOrExact()), xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), } validJSON, err := json.Marshal(validReqs) require.NoError(t, err) // Success reqs = PolicyRequirements{} err = json.Unmarshal(validJSON, &reqs) require.NoError(t, err) assert.Equal(t, validReqs, reqs) for _, invalid := range [][]interface{}{ // No requirements {}, // A member is not an object {1}, // A member has an invalid type {prSignedBy{prCommon: prCommon{Type: "this is invalid"}}}, // A member has a valid type but invalid contents {prSignedBy{ prCommon: prCommon{Type: prTypeSignedBy}, KeyType: "this is invalid", }}, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) reqs = PolicyRequirements{} err = json.Unmarshal(testJSON, &reqs) assert.Error(t, err, string(testJSON)) } } func TestNewPolicyRequirementFromJSON(t *testing.T) { // Sample success. Others tested in the individual PolicyRequirement.UnmarshalJSON implementations. validReq := NewPRInsecureAcceptAnything() validJSON, err := json.Marshal(validReq) require.NoError(t, err) req, err := newPolicyRequirementFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validReq, req) // Invalid for _, invalid := range []interface{}{ // Not an object 1, // Missing type prCommon{}, // Invalid type prCommon{Type: "this is invalid"}, // Valid type but invalid contents prSignedBy{ prCommon: prCommon{Type: prTypeSignedBy}, KeyType: "this is invalid", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) _, err = newPolicyRequirementFromJSON(testJSON) assert.Error(t, err, string(testJSON)) } } func TestNewPRInsecureAcceptAnything(t *testing.T) { _pr := NewPRInsecureAcceptAnything() pr, ok := _pr.(*prInsecureAcceptAnything) require.True(t, ok) assert.Equal(t, &prInsecureAcceptAnything{prCommon{prTypeInsecureAcceptAnything}}, pr) } func TestPRInsecureAcceptAnythingUnmarshalJSON(t *testing.T) { var pr prInsecureAcceptAnything testInvalidJSONInput(t, &pr) // Start with a valid JSON. validPR := NewPRInsecureAcceptAnything() validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success pr = prInsecureAcceptAnything{} err = json.Unmarshal(validJSON, &pr) require.NoError(t, err) assert.Equal(t, validPR, &pr) // newPolicyRequirementFromJSON recognizes this type _pr, err := newPolicyRequirementFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) for _, invalid := range []mSI{ // Missing "type" field {}, // Wrong "type" field {"type": 1}, {"type": "this is invalid"}, // Extra fields { "type": string(prTypeInsecureAcceptAnything), "unknown": "foo", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) pr = prInsecureAcceptAnything{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) pr = prInsecureAcceptAnything{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) } } func TestNewPRReject(t *testing.T) { _pr := NewPRReject() pr, ok := _pr.(*prReject) require.True(t, ok) assert.Equal(t, &prReject{prCommon{prTypeReject}}, pr) } func TestPRRejectUnmarshalJSON(t *testing.T) { var pr prReject testInvalidJSONInput(t, &pr) // Start with a valid JSON. validPR := NewPRReject() validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success pr = prReject{} err = json.Unmarshal(validJSON, &pr) require.NoError(t, err) assert.Equal(t, validPR, &pr) // newPolicyRequirementFromJSON recognizes this type _pr, err := newPolicyRequirementFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) for _, invalid := range []mSI{ // Missing "type" field {}, // Wrong "type" field {"type": 1}, {"type": "this is invalid"}, // Extra fields { "type": string(prTypeReject), "unknown": "foo", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) pr = prReject{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) pr = prReject{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) } } func TestNewPRSignedBy(t *testing.T) { const testPath = "/foo/bar" testData := []byte("abc") testIdentity := NewPRMMatchRepoDigestOrExact() // Success pr, err := newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, testIdentity) require.NoError(t, err) assert.Equal(t, &prSignedBy{ prCommon: prCommon{prTypeSignedBy}, KeyType: SBKeyTypeGPGKeys, KeyPath: testPath, KeyData: nil, SignedIdentity: testIdentity, }, pr) pr, err = newPRSignedBy(SBKeyTypeGPGKeys, "", testData, testIdentity) require.NoError(t, err) assert.Equal(t, &prSignedBy{ prCommon: prCommon{prTypeSignedBy}, KeyType: SBKeyTypeGPGKeys, KeyPath: "", KeyData: testData, SignedIdentity: testIdentity, }, pr) // Invalid keyType pr, err = newPRSignedBy(sbKeyType(""), testPath, nil, testIdentity) assert.Error(t, err) pr, err = newPRSignedBy(sbKeyType("this is invalid"), testPath, nil, testIdentity) assert.Error(t, err) // Both keyPath and keyData specified pr, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, testData, testIdentity) assert.Error(t, err) // Invalid signedIdentity pr, err = newPRSignedBy(SBKeyTypeGPGKeys, testPath, nil, nil) assert.Error(t, err) } func TestNewPRSignedByKeyPath(t *testing.T) { const testPath = "/foo/bar" _pr, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, testPath, NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) pr, ok := _pr.(*prSignedBy) require.True(t, ok) assert.Equal(t, testPath, pr.KeyPath) // Failure cases tested in TestNewPRSignedBy. } func TestNewPRSignedByKeyData(t *testing.T) { testData := []byte("abc") _pr, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, testData, NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) pr, ok := _pr.(*prSignedBy) require.True(t, ok) assert.Equal(t, testData, pr.KeyData) // Failure cases tested in TestNewPRSignedBy. } // Return the result of modifying vaoidJSON with fn and unmarshalingit into *pr func tryUnmarshalModifiedSignedBy(t *testing.T, pr *prSignedBy, validJSON []byte, modifyFn func(mSI)) error { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) modifyFn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) *pr = prSignedBy{} return json.Unmarshal(testJSON, &pr) } func TestPRSignedByUnmarshalJSON(t *testing.T) { var pr prSignedBy testInvalidJSONInput(t, &pr) // Start with a valid JSON. validPR, err := NewPRSignedByKeyData(SBKeyTypeGPGKeys, []byte("abc"), NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success with KeyData pr = prSignedBy{} err = json.Unmarshal(validJSON, &pr) require.NoError(t, err) assert.Equal(t, validPR, &pr) // Success with KeyPath kpPR, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/foo/bar", NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) testJSON, err := json.Marshal(kpPR) require.NoError(t, err) pr = prSignedBy{} err = json.Unmarshal(testJSON, &pr) require.NoError(t, err) assert.Equal(t, kpPR, &pr) // newPolicyRequirementFromJSON recognizes this type _pr, err := newPolicyRequirementFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) // Various ways to corrupt the JSON breakFns := []func(mSI){ // The "type" field is missing func(v mSI) { delete(v, "type") }, // Wrong "type" field func(v mSI) { v["type"] = 1 }, func(v mSI) { v["type"] = "this is invalid" }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // The "keyType" field is missing func(v mSI) { delete(v, "keyType") }, // Invalid "keyType" field func(v mSI) { v["keyType"] = "this is invalid" }, // Both "keyPath" and "keyData" is missing func(v mSI) { delete(v, "keyData") }, // Both "keyPath" and "keyData" is present func(v mSI) { v["keyPath"] = "/foo/bar" }, // Invalid "keyPath" field func(v mSI) { delete(v, "keyData"); v["keyPath"] = 1 }, func(v mSI) { v["type"] = "this is invalid" }, // Invalid "keyData" field func(v mSI) { v["keyData"] = 1 }, func(v mSI) { v["keyData"] = "this is invalid base64" }, // Invalid "signedIdentity" field func(v mSI) { v["signedIdentity"] = "this is invalid" }, // "signedIdentity" an explicit nil func(v mSI) { v["signedIdentity"] = nil }, } for _, fn := range breakFns { err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type", "keyType", "keyData", "signedIdentity"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) pr = prSignedBy{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) } // Handle "keyPath", which is not in validJSON, specially pathPR, err := NewPRSignedByKeyPath(SBKeyTypeGPGKeys, "/foo/bar", NewPRMMatchRepoDigestOrExact()) require.NoError(t, err) testJSON, err = json.Marshal(pathPR) require.NoError(t, err) testJSON = addExtraJSONMember(t, testJSON, "keyPath", pr.KeyPath) pr = prSignedBy{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) // Various allowed modifications to the requirement allowedModificationFns := []func(mSI){ // Delete the signedIdentity field func(v mSI) { delete(v, "signedIdentity") }, } for _, fn := range allowedModificationFns { err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) require.NoError(t, err) } // Various ways to set signedIdentity to the default value signedIdentityDefaultFns := []func(mSI){ // Set signedIdentity to the default explicitly func(v mSI) { v["signedIdentity"] = NewPRMMatchRepoDigestOrExact() }, // Delete the signedIdentity field func(v mSI) { delete(v, "signedIdentity") }, } for _, fn := range signedIdentityDefaultFns { err = tryUnmarshalModifiedSignedBy(t, &pr, validJSON, fn) require.NoError(t, err) assert.Equal(t, NewPRMMatchRepoDigestOrExact(), pr.SignedIdentity) } } func TestSBKeyTypeIsValid(t *testing.T) { // Valid values for _, s := range []sbKeyType{ SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs, } { assert.True(t, s.IsValid()) } // Invalid values for _, s := range []string{"", "this is invalid"} { assert.False(t, sbKeyType(s).IsValid()) } } func TestSBKeyTypeUnmarshalJSON(t *testing.T) { var kt sbKeyType testInvalidJSONInput(t, &kt) // Valid values. for _, v := range []sbKeyType{ SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs, } { kt = sbKeyType("") err := json.Unmarshal([]byte(`"`+string(v)+`"`), &kt) assert.NoError(t, err) } // Invalid values kt = sbKeyType("") err := json.Unmarshal([]byte(`""`), &kt) assert.Error(t, err) kt = sbKeyType("") err = json.Unmarshal([]byte(`"this is invalid"`), &kt) assert.Error(t, err) } // NewPRSignedBaseLayer is like NewPRSignedBaseLayer, except it must not fail. func xNewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) PolicyRequirement { pr, err := NewPRSignedBaseLayer(baseLayerIdentity) if err != nil { panic("xNewPRSignedBaseLayer failed") } return pr } func TestNewPRSignedBaseLayer(t *testing.T) { testBLI := NewPRMMatchExact() // Success _pr, err := NewPRSignedBaseLayer(testBLI) require.NoError(t, err) pr, ok := _pr.(*prSignedBaseLayer) require.True(t, ok) assert.Equal(t, &prSignedBaseLayer{ prCommon: prCommon{prTypeSignedBaseLayer}, BaseLayerIdentity: testBLI, }, pr) // Invalid baseLayerIdentity _, err = NewPRSignedBaseLayer(nil) assert.Error(t, err) } func TestPRSignedBaseLayerUnmarshalJSON(t *testing.T) { var pr prSignedBaseLayer testInvalidJSONInput(t, &pr) // Start with a valid JSON. baseIdentity, err := NewPRMExactReference("registry.access.redhat.com/rhel7/rhel:7.2.3") require.NoError(t, err) validPR, err := NewPRSignedBaseLayer(baseIdentity) require.NoError(t, err) validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success pr = prSignedBaseLayer{} err = json.Unmarshal(validJSON, &pr) require.NoError(t, err) assert.Equal(t, validPR, &pr) // newPolicyRequirementFromJSON recognizes this type _pr, err := newPolicyRequirementFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) // Various ways to corrupt the JSON breakFns := []func(mSI){ // The "type" field is missing func(v mSI) { delete(v, "type") }, // Wrong "type" field func(v mSI) { v["type"] = 1 }, func(v mSI) { v["type"] = "this is invalid" }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // The "baseLayerIdentity" field is missing func(v mSI) { delete(v, "baseLayerIdentity") }, // Invalid "baseLayerIdentity" field func(v mSI) { v["baseLayerIdentity"] = "this is invalid" }, // Invalid "baseLayerIdentity" an explicit nil func(v mSI) { v["baseLayerIdentity"] = nil }, } for _, fn := range breakFns { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) fn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) pr = prSignedBaseLayer{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) } // Duplicated fields for _, field := range []string{"type", "baseLayerIdentity"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) pr = prSignedBaseLayer{} err = json.Unmarshal(testJSON, &pr) assert.Error(t, err) } } func TestNewPolicyReferenceMatchFromJSON(t *testing.T) { // Sample success. Others tested in the individual PolicyReferenceMatch.UnmarshalJSON implementations. validPRM := NewPRMMatchRepoDigestOrExact() validJSON, err := json.Marshal(validPRM) require.NoError(t, err) prm, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPRM, prm) // Invalid for _, invalid := range []interface{}{ // Not an object 1, // Missing type prmCommon{}, // Invalid type prmCommon{Type: "this is invalid"}, // Valid type but invalid contents prmExactReference{ prmCommon: prmCommon{Type: prmTypeExactReference}, DockerReference: "", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) _, err = newPolicyReferenceMatchFromJSON(testJSON) assert.Error(t, err, string(testJSON)) } } func TestNewPRMMatchExact(t *testing.T) { _prm := NewPRMMatchExact() prm, ok := _prm.(*prmMatchExact) require.True(t, ok) assert.Equal(t, &prmMatchExact{prmCommon{prmTypeMatchExact}}, prm) } func TestPRMMatchExactUnmarshalJSON(t *testing.T) { var prm prmMatchExact testInvalidJSONInput(t, &prm) // Start with a valid JSON. validPR := NewPRMMatchExact() validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success prm = prmMatchExact{} err = json.Unmarshal(validJSON, &prm) require.NoError(t, err) assert.Equal(t, validPR, &prm) // newPolicyReferenceMatchFromJSON recognizes this type _pr, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) for _, invalid := range []mSI{ // Missing "type" field {}, // Wrong "type" field {"type": 1}, {"type": "this is invalid"}, // Extra fields { "type": string(prmTypeMatchExact), "unknown": "foo", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) prm = prmMatchExact{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) prm = prmMatchExact{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } } func TestNewPRMMatchRepoDigestOrExact(t *testing.T) { _prm := NewPRMMatchRepoDigestOrExact() prm, ok := _prm.(*prmMatchRepoDigestOrExact) require.True(t, ok) assert.Equal(t, &prmMatchRepoDigestOrExact{prmCommon{prmTypeMatchRepoDigestOrExact}}, prm) } func TestPRMMatchRepoDigestOrExactUnmarshalJSON(t *testing.T) { var prm prmMatchRepoDigestOrExact testInvalidJSONInput(t, &prm) // Start with a valid JSON. validPR := NewPRMMatchRepoDigestOrExact() validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success prm = prmMatchRepoDigestOrExact{} err = json.Unmarshal(validJSON, &prm) require.NoError(t, err) assert.Equal(t, validPR, &prm) // newPolicyReferenceMatchFromJSON recognizes this type _pr, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) for _, invalid := range []mSI{ // Missing "type" field {}, // Wrong "type" field {"type": 1}, {"type": "this is invalid"}, // Extra fields { "type": string(prmTypeMatchRepoDigestOrExact), "unknown": "foo", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) prm = prmMatchRepoDigestOrExact{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) prm = prmMatchRepoDigestOrExact{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } } func TestNewPRMMatchRepository(t *testing.T) { _prm := NewPRMMatchRepository() prm, ok := _prm.(*prmMatchRepository) require.True(t, ok) assert.Equal(t, &prmMatchRepository{prmCommon{prmTypeMatchRepository}}, prm) } func TestPRMMatchRepositoryUnmarshalJSON(t *testing.T) { var prm prmMatchRepository testInvalidJSONInput(t, &prm) // Start with a valid JSON. validPR := NewPRMMatchRepository() validJSON, err := json.Marshal(validPR) require.NoError(t, err) // Success prm = prmMatchRepository{} err = json.Unmarshal(validJSON, &prm) require.NoError(t, err) assert.Equal(t, validPR, &prm) // newPolicyReferenceMatchFromJSON recognizes this type _pr, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPR, _pr) for _, invalid := range []mSI{ // Missing "type" field {}, // Wrong "type" field {"type": 1}, {"type": "this is invalid"}, // Extra fields { "type": string(prmTypeMatchRepository), "unknown": "foo", }, } { testJSON, err := json.Marshal(invalid) require.NoError(t, err) prm = prmMatchRepository{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err, string(testJSON)) } // Duplicated fields for _, field := range []string{"type"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) prm = prmMatchRepository{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } } // xNewPRMExactReference is like NewPRMExactReference, except it must not fail. func xNewPRMExactReference(dockerReference string) PolicyReferenceMatch { pr, err := NewPRMExactReference(dockerReference) if err != nil { panic("xNewPRMExactReference failed") } return pr } func TestNewPRMExactReference(t *testing.T) { const testDR = "library/busybox:latest" // Success _prm, err := NewPRMExactReference(testDR) require.NoError(t, err) prm, ok := _prm.(*prmExactReference) require.True(t, ok) assert.Equal(t, &prmExactReference{ prmCommon: prmCommon{prmTypeExactReference}, DockerReference: testDR, }, prm) // Invalid dockerReference _, err = NewPRMExactReference("") assert.Error(t, err) // Uppercase is invalid in Docker reference components. _, err = NewPRMExactReference("INVALIDUPPERCASE:latest") assert.Error(t, err) // Missing tag _, err = NewPRMExactReference("library/busybox") assert.Error(t, err) } func TestPRMExactReferenceUnmarshalJSON(t *testing.T) { var prm prmExactReference testInvalidJSONInput(t, &prm) // Start with a valid JSON. validPRM, err := NewPRMExactReference("library/buxybox:latest") require.NoError(t, err) validJSON, err := json.Marshal(validPRM) require.NoError(t, err) // Success prm = prmExactReference{} err = json.Unmarshal(validJSON, &prm) require.NoError(t, err) assert.Equal(t, validPRM, &prm) // newPolicyReferenceMatchFromJSON recognizes this type _prm, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPRM, _prm) // Various ways to corrupt the JSON breakFns := []func(mSI){ // The "type" field is missing func(v mSI) { delete(v, "type") }, // Wrong "type" field func(v mSI) { v["type"] = 1 }, func(v mSI) { v["type"] = "this is invalid" }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // The "dockerReference" field is missing func(v mSI) { delete(v, "dockerReference") }, // Invalid "dockerReference" field func(v mSI) { v["dockerReference"] = 1 }, } for _, fn := range breakFns { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) fn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) prm = prmExactReference{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } // Duplicated fields for _, field := range []string{"type", "baseLayerIdentity"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) prm = prmExactReference{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } } // xNewPRMExactRepository is like NewPRMExactRepository, except it must not fail. func xNewPRMExactRepository(dockerRepository string) PolicyReferenceMatch { pr, err := NewPRMExactRepository(dockerRepository) if err != nil { panic("xNewPRMExactRepository failed") } return pr } func TestNewPRMExactRepository(t *testing.T) { const testDR = "library/busybox:latest" // Success _prm, err := NewPRMExactRepository(testDR) require.NoError(t, err) prm, ok := _prm.(*prmExactRepository) require.True(t, ok) assert.Equal(t, &prmExactRepository{ prmCommon: prmCommon{prmTypeExactRepository}, DockerRepository: testDR, }, prm) // Invalid dockerRepository _, err = NewPRMExactRepository("") assert.Error(t, err) // Uppercase is invalid in Docker reference components. _, err = NewPRMExactRepository("INVALIDUPPERCASE") assert.Error(t, err) } func TestPRMExactRepositoryUnmarshalJSON(t *testing.T) { var prm prmExactRepository testInvalidJSONInput(t, &prm) // Start with a valid JSON. validPRM, err := NewPRMExactRepository("library/buxybox:latest") require.NoError(t, err) validJSON, err := json.Marshal(validPRM) require.NoError(t, err) // Success prm = prmExactRepository{} err = json.Unmarshal(validJSON, &prm) require.NoError(t, err) assert.Equal(t, validPRM, &prm) // newPolicyReferenceMatchFromJSON recognizes this type _prm, err := newPolicyReferenceMatchFromJSON(validJSON) require.NoError(t, err) assert.Equal(t, validPRM, _prm) // Various ways to corrupt the JSON breakFns := []func(mSI){ // The "type" field is missing func(v mSI) { delete(v, "type") }, // Wrong "type" field func(v mSI) { v["type"] = 1 }, func(v mSI) { v["type"] = "this is invalid" }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // The "dockerRepository" field is missing func(v mSI) { delete(v, "dockerRepository") }, // Invalid "dockerRepository" field func(v mSI) { v["dockerRepository"] = 1 }, } for _, fn := range breakFns { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) fn(tmp) testJSON, err := json.Marshal(tmp) require.NoError(t, err) prm = prmExactRepository{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } // Duplicated fields for _, field := range []string{"type", "baseLayerIdentity"} { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) testJSON := addExtraJSONMember(t, validJSON, field, tmp[field]) prm = prmExactRepository{} err = json.Unmarshal(testJSON, &prm) assert.Error(t, err) } } image-4.0.1/signature/policy_eval.go000066400000000000000000000300401354546467100174500ustar00rootroot00000000000000// This defines the top-level policy evaluation API. // To the extent possible, the interface of the fuctions provided // here is intended to be completely unambiguous, and stable for users // to rely on. package signature import ( "context" "github.com/containers/image/v4/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // PolicyRequirementError is an explanatory text for rejecting a signature or an image. type PolicyRequirementError string func (err PolicyRequirementError) Error() string { return string(err) } // signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. type signatureAcceptanceResult string const ( sarAccepted signatureAcceptanceResult = "sarAccepted" sarRejected signatureAcceptanceResult = "sarRejected" sarUnknown signatureAcceptanceResult = "sarUnknown" ) // PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. // The type is public, but its definition is private. type PolicyRequirement interface { // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache // costly initialization like creating temporary GPG home directories and reading files. // Setup() (someState, error) // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. // isSignatureAuthorAccepted, given an image and a signature blob, returns: // - sarAccepted if the signature has been verified against the appropriate public key // (where "appropriate public key" may depend on the contents of the signature); // in that case a parsed Signature should be returned. // - sarRejected if the signature has not been verified; // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. // - sarUnknown if if this PolicyRequirement does not deal with signatures. // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. // Returning sarUnknown and a non-nil error value is invalid. // WARNING: This makes the signature contents acceptable for futher processing, // but it does not necessarily mean that the contents of the signature are // consistent with local policy. // For example: // - Do not use a true value to determine whether to run // a container based on this image; use IsRunningImageAllowed instead. // - Just because a signature is accepted does not automatically mean the contents of the // signature are authorized to run code as root, or to affect system or cluster configuration. isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) // isRunningImageAllowed returns true if the requirement allows running an image. // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. // WARNING: This validates signatures and the manifest, but does not download or validate the // layers. Users must validate that the layers match their expected digests. isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) } // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. // The type is public, but its implementation is private. type PolicyReferenceMatch interface { // matchesDockerReference decides whether a specific image identity is accepted for an image // (or, usually, for the image's Reference().DockerReference()). Note that // image.Reference().DockerReference() may be nil. matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool } // PolicyContext encapsulates a policy and possible cached state // for speeding up its evaluation. type PolicyContext struct { Policy *Policy state policyContextState // Internal consistency checking } // policyContextState is used internally to verify the users are not misusing a PolicyContext. type policyContextState string const ( pcInvalid policyContextState = "" pcInitializing policyContextState = "Initializing" pcReady policyContextState = "Ready" pcInUse policyContextState = "InUse" pcDestroying policyContextState = "Destroying" pcDestroyed policyContextState = "Destroyed" ) // changeContextState changes pc.state, or fails if the state is unexpected func (pc *PolicyContext) changeState(expected, new policyContextState) error { if pc.state != expected { return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) } pc.state = new return nil } // NewPolicyContext sets up and initializes a context for the specified policy. // The policy must not be modified while the context exists. FIXME: make a deep copy? // If this function succeeds, the caller should call PolicyContext.Destroy() when done. func NewPolicyContext(policy *Policy) (*PolicyContext, error) { pc := &PolicyContext{Policy: policy, state: pcInitializing} // FIXME: initialize if err := pc.changeState(pcInitializing, pcReady); err != nil { // Huh?! This should never fail, we didn't give the pointer to anybody. // Just give up and leave unclean state around. return nil, err } return pc, nil } // Destroy should be called when the user of the context is done with it. func (pc *PolicyContext) Destroy() error { if err := pc.changeState(pcReady, pcDestroying); err != nil { return err } // FIXME: destroy return pc.changeState(pcDestroying, pcDestroyed) } // policyIdentityLogName returns a string description of the image identity for policy purposes. // ONLY use this for log messages, not for any decisions! func policyIdentityLogName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() } // requirementsForImageRef selects the appropriate requirements for ref. func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { // Do we have a PolicyTransportScopes for this transport? transportName := ref.Transport().Name() if transportScopes, ok := pc.Policy.Transports[transportName]; ok { // Look for a full match. identity := ref.PolicyConfigurationIdentity() if req, ok := transportScopes[identity]; ok { logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) return req } // Look for a match of the possible parent namespaces. for _, name := range ref.PolicyConfigurationNamespaces() { if req, ok := transportScopes[name]; ok { logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) return req } } // Look for a default match for the transport. if req, ok := transportScopes[""]; ok { logrus.Debugf(` Using transport "%s" policy section ""`, transportName) return req } } logrus.Debugf(" Using default policy section") return pc.Policy.Default } // GetSignaturesWithAcceptedAuthor returns those signatures from an image // for which the policy accepts the author (and which have been successfully // verified). // NOTE: This may legitimately return an empty list and no error, if the image // has no signatures or only invalid signatures. // WARNING: This makes the signature contents acceptable for futher processing, // but it does not necessarily mean that the contents of the signature are // consistent with local policy. // For example: // - Do not use a an existence of an accepted signature to determine whether to run // a container based on this image; use IsRunningImageAllowed instead. // - Just because a signature is accepted does not automatically mean the contents of the // signature are authorized to run code as root, or to affect system or cluster configuration. func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, image types.UnparsedImage) (sigs []*Signature, finalErr error) { if err := pc.changeState(pcReady, pcInUse); err != nil { return nil, err } defer func() { if err := pc.changeState(pcInUse, pcReady); err != nil { sigs = nil finalErr = err } }() logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) reqs := pc.requirementsForImageRef(image.Reference()) // FIXME: rename Signatures to UnverifiedSignatures // FIXME: pass context.Context unverifiedSignatures, err := image.Signatures(ctx) if err != nil { return nil, err } res := make([]*Signature, 0, len(unverifiedSignatures)) for sigNumber, sig := range unverifiedSignatures { var acceptedSig *Signature // non-nil if accepted rejected := false // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! logrus.Debugf("Evaluating signature %d:", sigNumber) interpretingReqs: for reqNumber, req := range reqs { // FIXME: Log the requirement itself? For now, we use just the number. // FIXME: supply state switch res, as, err := req.isSignatureAuthorAccepted(ctx, image, sig); res { case sarAccepted: if as == nil { // Coverage: this should never happen logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) rejected = true break interpretingReqs } logrus.Debugf(" Requirement %d: signature accepted", reqNumber) if acceptedSig == nil { acceptedSig = as } else if *as != *acceptedSig { // Coverage: this should never happen // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) rejected = true acceptedSig = nil break interpretingReqs } case sarRejected: logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) rejected = true break interpretingReqs case sarUnknown: if err != nil { // Coverage: this should never happen logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) rejected = true break interpretingReqs } logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) default: // Coverage: this should never happen logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) rejected = true break interpretingReqs } } // This also handles the (invalid) case of empty reqs, by rejecting the signature. if acceptedSig != nil && !rejected { logrus.Debugf(" Overall: OK, signature accepted") res = append(res, acceptedSig) } else { logrus.Debugf(" Overall: Signature not accepted") } } return res, nil } // IsRunningImageAllowed returns true iff the policy allows running the image. // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation // succeeded but the result was rejection. // WARNING: This validates signatures and the manifest, but does not download or validate the // layers. Users must validate that the layers match their expected digests. func (pc *PolicyContext) IsRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (res bool, finalErr error) { if err := pc.changeState(pcReady, pcInUse); err != nil { return false, err } defer func() { if err := pc.changeState(pcInUse, pcReady); err != nil { res = false finalErr = err } }() logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) reqs := pc.requirementsForImageRef(image.Reference()) if len(reqs) == 0 { return false, PolicyRequirementError("List of verification policy requirements must not be empty") } for reqNumber, req := range reqs { // FIXME: supply state allowed, err := req.isRunningImageAllowed(ctx, image) if !allowed { logrus.Debugf("Requirement %d: denied, done", reqNumber) return false, err } logrus.Debugf(" Requirement %d: allowed", reqNumber) } // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. logrus.Debugf("Overall: allowed") return true, nil } image-4.0.1/signature/policy_eval_baselayer.go000066400000000000000000000012231354546467100215000ustar00rootroot00000000000000// Policy evaluation for prSignedBaseLayer. package signature import ( "context" "github.com/containers/image/v4/types" "github.com/sirupsen/logrus" ) func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { return sarUnknown, nil, nil } func (pr *prSignedBaseLayer) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { // FIXME? Reject this at policy parsing time already? logrus.Errorf("signedBaseLayer not implemented yet!") return false, PolicyRequirementError("signedBaseLayer not implemented yet!") } image-4.0.1/signature/policy_eval_baselayer_test.go000066400000000000000000000016141354546467100225430ustar00rootroot00000000000000package signature import ( "context" "testing" "github.com/stretchr/testify/require" ) func TestPRSignedBaseLayerIsSignatureAuthorAccepted(t *testing.T) { pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository()) require.NoError(t, err) // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil) assertSARUnknown(t, sar, parsedSig, err) } func TestPRSignedBaseLayerIsRunningImageAllowed(t *testing.T) { // This will obviously need to change after signedBaseLayer is implemented. pr, err := NewPRSignedBaseLayer(NewPRMMatchRepository()) require.NoError(t, err) // Pass a nil pointer to, kind of, test that the return value does not depend on the image. res, err := pr.isRunningImageAllowed(context.Background(), nil) assertRunningRejectedPolicyRequirement(t, res, err) } image-4.0.1/signature/policy_eval_signedby.go000066400000000000000000000076041354546467100213460ustar00rootroot00000000000000// Policy evaluation for prSignedBy. package signature import ( "context" "fmt" "io/ioutil" "strings" "github.com/pkg/errors" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/opencontainers/go-digest" ) func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { switch pr.KeyType { case SBKeyTypeGPGKeys: case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: // FIXME? Reject this at policy parsing time already? return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) default: // This should never happen, newPRSignedBy ensures KeyType.IsValid() return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) } if pr.KeyPath != "" && pr.KeyData != nil { return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) } // FIXME: move this to per-context initialization var data []byte if pr.KeyData != nil { data = pr.KeyData } else { d, err := ioutil.ReadFile(pr.KeyPath) if err != nil { return sarRejected, nil, err } data = d } // FIXME: move this to per-context initialization mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) if err != nil { return sarRejected, nil, err } defer mech.Close() if len(trustedIdentities) == 0 { return sarRejected, nil, PolicyRequirementError("No public keys imported") } signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { for _, trustedIdentity := range trustedIdentities { if keyIdentity == trustedIdentity { return nil } } // Coverage: We use a private GPG home directory and only import trusted keys, so this should // not be reachable. return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) }, validateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) } return nil }, validateSignedDockerManifestDigest: func(digest digest.Digest) error { m, _, err := image.Manifest(ctx) if err != nil { return err } digestMatches, err := manifest.MatchesDigest(m, digest) if err != nil { return err } if !digestMatches { return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) } return nil }, }) if err != nil { return sarRejected, nil, err } return sarAccepted, signature, nil } func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { // FIXME: pass context.Context sigs, err := image.Signatures(ctx) if err != nil { return false, err } var rejections []error for _, s := range sigs { var reason error switch res, _, err := pr.isSignatureAuthorAccepted(ctx, image, s); res { case sarAccepted: // One accepted signature is enough. return true, nil case sarRejected: reason = err case sarUnknown: // Huh?! This should not happen at all; treat it as any other invalid value. fallthrough default: reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) } rejections = append(rejections, reason) } var summary error switch len(rejections) { case 0: summary = PolicyRequirementError("A signature was required, but no signature exists") case 1: summary = rejections[0] default: var msgs []string for _, e := range rejections { msgs = append(msgs, e.Error()) } summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", strings.Join(msgs, "; "))) } return false, summary } image-4.0.1/signature/policy_eval_signedby_test.go000066400000000000000000000265261354546467100224110ustar00rootroot00000000000000package signature import ( "context" "io/ioutil" "os" "path" "testing" "github.com/containers/image/v4/directory" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // dirImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference. // The caller must call the returned close callback when done. func dirImageMock(t *testing.T, dir, dockerReference string) (types.UnparsedImage, func() error) { ref, err := reference.ParseNormalizedNamed(dockerReference) require.NoError(t, err) return dirImageMockWithRef(t, dir, refImageReferenceMock{ref}) } // dirImageMockWithRef returns a types.UnparsedImage for a directory, claiming a specified ref. // The caller must call the returned close callback when done. func dirImageMockWithRef(t *testing.T, dir string, ref types.ImageReference) (types.UnparsedImage, func() error) { srcRef, err := directory.NewReference(dir) require.NoError(t, err) src, err := srcRef.NewImageSource(context.Background(), nil) require.NoError(t, err) return image.UnparsedInstance(&dirImageSourceMock{ ImageSource: src, ref: ref, }, nil), src.Close } // dirImageSourceMock inherits dirImageSource, but overrides its Reference method. type dirImageSourceMock struct { types.ImageSource ref types.ImageReference } func (d *dirImageSourceMock) Reference() types.ImageReference { return d.ref } func TestPRSignedByIsSignatureAuthorAccepted(t *testing.T) { ktGPG := SBKeyTypeGPGKeys prm := NewPRMMatchExact() testImage, closer := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() testImageSig, err := ioutil.ReadFile("fixtures/dir-img-valid/signature-1") require.NoError(t, err) // Successful validation, with KeyData and KeyPath pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSig) assertSARAccepted(t, sar, parsedSig, err, Signature{ DockerManifestDigest: TestImageManifestDigest, DockerReference: "testing/manifest:latest", }) keyData, err := ioutil.ReadFile("fixtures/public-key.gpg") require.NoError(t, err) pr, err = NewPRSignedByKeyData(ktGPG, keyData, prm) require.NoError(t, err) sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSig) assertSARAccepted(t, sar, parsedSig, err, Signature{ DockerManifestDigest: TestImageManifestDigest, DockerReference: "testing/manifest:latest", }) // Unimplemented and invalid KeyType values for _, keyType := range []sbKeyType{SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs, sbKeyType("This is invalid"), } { // Do not use NewPRSignedByKeyData, because it would reject invalid values. pr := &prSignedBy{ KeyType: keyType, KeyData: []byte("abc"), SignedIdentity: prm, } // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nil, nil) assertSARRejected(t, sar, parsedSig, err) } // Both KeyPath and KeyData set. Do not use NewPRSignedBy*, because it would reject this. prSB := &prSignedBy{ KeyType: ktGPG, KeyPath: "/foo/bar", KeyData: []byte("abc"), SignedIdentity: prm, } // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. sar, parsedSig, err = prSB.isSignatureAuthorAccepted(context.Background(), nil, nil) assertSARRejected(t, sar, parsedSig, err) // Invalid KeyPath pr, err = NewPRSignedByKeyPath(ktGPG, "/this/does/not/exist", prm) require.NoError(t, err) // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, nil) assertSARRejected(t, sar, parsedSig, err) // Errors initializing the temporary GPG directory and mechanism are not obviously easy to reach. // KeyData has no public keys. pr, err = NewPRSignedByKeyData(ktGPG, []byte{}, prm) require.NoError(t, err) // Pass nil pointers to, kind of, test that the return value does not depend on the parameters. sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, nil) assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) // A signature which does not GPG verify pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, []byte("invalid signature")) assertSARRejected(t, sar, parsedSig, err) // A valid signature using an unknown key. // (This is (currently?) rejected through the "mech.Verify fails" path, not the "!identityFound" path, // because we use a temporary directory and only import the trusted keys.) pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sig, err := ioutil.ReadFile("fixtures/unknown-key.signature") require.NoError(t, err) // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, sig) assertSARRejected(t, sar, parsedSig, err) // A valid signature of an invalid JSON. pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sig, err = ioutil.ReadFile("fixtures/invalid-blob.signature") require.NoError(t, err) // Pass a nil pointer to, kind of, test that the return value does not depend on the image parmater.. sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), nil, sig) assertSARRejected(t, sar, parsedSig, err) assert.IsType(t, InvalidSignatureError{}, err) // A valid signature with a rejected identity. nonmatchingPRM, err := NewPRMExactReference("this/doesnt:match") require.NoError(t, err) pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", nonmatchingPRM) require.NoError(t, err) sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), testImage, testImageSig) assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) // Error reading image manifest image, closer := dirImageMock(t, "fixtures/dir-img-no-manifest", "testing/manifest:latest") defer closer() sig, err = ioutil.ReadFile("fixtures/dir-img-no-manifest/signature-1") require.NoError(t, err) pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig) assertSARRejected(t, sar, parsedSig, err) // Error computing manifest digest image, closer = dirImageMock(t, "fixtures/dir-img-manifest-digest-error", "testing/manifest:latest") defer closer() sig, err = ioutil.ReadFile("fixtures/dir-img-manifest-digest-error/signature-1") require.NoError(t, err) pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig) assertSARRejected(t, sar, parsedSig, err) // A valid signature with a non-matching manifest image, closer = dirImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") defer closer() sig, err = ioutil.ReadFile("fixtures/dir-img-modified-manifest/signature-1") require.NoError(t, err) pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) sar, parsedSig, err = pr.isSignatureAuthorAccepted(context.Background(), image, sig) assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) } // createInvalidSigDir creates a directory suitable for dirImageMock, in which image.Signatures() // fails. // The caller should eventually call os.RemoveAll on the returned path. func createInvalidSigDir(t *testing.T) string { dir, err := ioutil.TempDir("", "skopeo-test-unreadable-signature") require.NoError(t, err) err = ioutil.WriteFile(path.Join(dir, "manifest.json"), []byte("{}"), 0644) require.NoError(t, err) // Creating a 000-permissions file would work for unprivileged accounts, but root (in particular, // in the Docker container we use for testing) would still have access. So, create a symlink // pointing to itself, to cause an ELOOP. (Note that a symlink pointing to a nonexistent file would be treated // just like a nonexistent signature file, and not an error.) err = os.Symlink("signature-1", path.Join(dir, "signature-1")) require.NoError(t, err) return dir } func TestPRSignedByIsRunningImageAllowed(t *testing.T) { ktGPG := SBKeyTypeGPGKeys prm := NewPRMMatchExact() // A simple success case: single valid signature. image, closer := dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() pr, err := NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err := pr.isRunningImageAllowed(context.Background(), image) assertRunningAllowed(t, allowed, err) // Error reading signatures invalidSigDir := createInvalidSigDir(t) defer os.RemoveAll(invalidSigDir) image, closer = dirImageMock(t, invalidSigDir, "testing/manifest:latest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningRejected(t, allowed, err) // No signatures image, closer = dirImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningRejectedPolicyRequirement(t, allowed, err) // 1 invalid signature: use dir-img-valid, but a non-matching Docker reference image, closer = dirImageMock(t, "fixtures/dir-img-valid", "testing/manifest:notlatest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningRejectedPolicyRequirement(t, allowed, err) // 2 valid signatures image, closer = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningAllowed(t, allowed, err) // One invalid, one valid signature (in this order) image, closer = dirImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningAllowed(t, allowed, err) // 2 invalid signatures: use dir-img-valid-2, but a non-matching Docker reference image, closer = dirImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:notlatest") defer closer() pr, err = NewPRSignedByKeyPath(ktGPG, "fixtures/public-key.gpg", prm) require.NoError(t, err) allowed, err = pr.isRunningImageAllowed(context.Background(), image) assertRunningRejectedPolicyRequirement(t, allowed, err) } image-4.0.1/signature/policy_eval_simple.go000066400000000000000000000023271354546467100210300ustar00rootroot00000000000000// Policy evaluation for the various simple PolicyRequirement types. package signature import ( "context" "fmt" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" ) func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { // prInsecureAcceptAnything semantics: Every image is allowed to run, // but this does not consider the signature as verified. return sarUnknown, nil, nil } func (pr *prInsecureAcceptAnything) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { return true, nil } func (pr *prReject) isSignatureAuthorAccepted(ctx context.Context, image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) } func (pr *prReject) isRunningImageAllowed(ctx context.Context, image types.UnparsedImage) (bool, error) { return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) } image-4.0.1/signature/policy_eval_simple_test.go000066400000000000000000000057521354546467100220740ustar00rootroot00000000000000package signature import ( "context" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/internal/testing/mocks" "github.com/containers/image/v4/types" ) // nameOnlyImageMock is a mock of types.UnparsedImage which only allows transports.ImageName to work type nameOnlyImageMock struct { forbiddenImageMock } func (nameOnlyImageMock) Reference() types.ImageReference { return nameOnlyImageReferenceMock("== StringWithinTransport mock") } // nameOnlyImageReferenceMock is a mock of types.ImageReference which only allows transports.ImageName to work, returning self. type nameOnlyImageReferenceMock string func (ref nameOnlyImageReferenceMock) Transport() types.ImageTransport { return mocks.NameImageTransport("== Transport mock") } func (ref nameOnlyImageReferenceMock) StringWithinTransport() string { return string(ref) } func (ref nameOnlyImageReferenceMock) DockerReference() reference.Named { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) PolicyConfigurationIdentity() string { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) PolicyConfigurationNamespaces() []string { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { panic("unexpected call to a mock function") } func (ref nameOnlyImageReferenceMock) DeleteImage(ctx context.Context, sys *types.SystemContext) error { panic("unexpected call to a mock function") } func TestPRInsecureAcceptAnythingIsSignatureAuthorAccepted(t *testing.T) { pr := NewPRInsecureAcceptAnything() // Pass nil signature to, kind of, test that the return value does not depend on it. sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nameOnlyImageMock{}, nil) assertSARUnknown(t, sar, parsedSig, err) } func TestPRInsecureAcceptAnythingIsRunningImageAllowed(t *testing.T) { pr := NewPRInsecureAcceptAnything() res, err := pr.isRunningImageAllowed(context.Background(), nameOnlyImageMock{}) assertRunningAllowed(t, res, err) } func TestPRRejectIsSignatureAuthorAccepted(t *testing.T) { pr := NewPRReject() // Pass nil signature to, kind of, test that the return value does not depend on it. sar, parsedSig, err := pr.isSignatureAuthorAccepted(context.Background(), nameOnlyImageMock{}, nil) assertSARRejectedPolicyRequirement(t, sar, parsedSig, err) } func TestPRRejectIsRunningImageAllowed(t *testing.T) { pr := NewPRReject() res, err := pr.isRunningImageAllowed(context.Background(), nameOnlyImageMock{}) assertRunningRejectedPolicyRequirement(t, res, err) } image-4.0.1/signature/policy_eval_test.go000066400000000000000000000475711354546467100205300ustar00rootroot00000000000000package signature import ( "context" "fmt" "os" "testing" "github.com/containers/image/v4/docker" "github.com/containers/image/v4/docker/policyconfiguration" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/internal/testing/mocks" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPolicyRequirementError(t *testing.T) { // A stupid test just to keep code coverage s := "test" err := PolicyRequirementError(s) assert.Equal(t, s, err.Error()) } func TestPolicyContextChangeState(t *testing.T) { pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) require.NoError(t, err) defer pc.Destroy() require.Equal(t, pcReady, pc.state) err = pc.changeState(pcReady, pcInUse) require.NoError(t, err) err = pc.changeState(pcReady, pcInUse) require.Error(t, err) // Return state to pcReady to allow pc.Destroy to clean up. err = pc.changeState(pcInUse, pcReady) require.NoError(t, err) } func TestPolicyContextNewDestroy(t *testing.T) { pc, err := NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) require.NoError(t, err) assert.Equal(t, pcReady, pc.state) err = pc.Destroy() require.NoError(t, err) assert.Equal(t, pcDestroyed, pc.state) // Trying to destroy when not pcReady pc, err = NewPolicyContext(&Policy{Default: PolicyRequirements{NewPRReject()}}) require.NoError(t, err) err = pc.changeState(pcReady, pcInUse) require.NoError(t, err) err = pc.Destroy() require.Error(t, err) assert.Equal(t, pcInUse, pc.state) // The state, and hopefully nothing else, has changed. err = pc.changeState(pcInUse, pcReady) require.NoError(t, err) err = pc.Destroy() assert.NoError(t, err) } // pcImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference // and handles PolicyConfigurationIdentity and PolicyConfigurationReference consistently. type pcImageReferenceMock struct { transportName string ref reference.Named } func (ref pcImageReferenceMock) Transport() types.ImageTransport { return mocks.NameImageTransport(ref.transportName) } func (ref pcImageReferenceMock) StringWithinTransport() string { // We use this in error messages, so sadly we must return something. return "== StringWithinTransport mock" } func (ref pcImageReferenceMock) DockerReference() reference.Named { return ref.ref } func (ref pcImageReferenceMock) PolicyConfigurationIdentity() string { res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) if res == "" || err != nil { panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) } return res } func (ref pcImageReferenceMock) PolicyConfigurationNamespaces() []string { if ref.ref == nil { panic("unexpected call to a mock function") } return policyconfiguration.DockerReferenceNamespaces(ref.ref) } func (ref pcImageReferenceMock) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { panic("unexpected call to a mock function") } func (ref pcImageReferenceMock) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { panic("unexpected call to a mock function") } func (ref pcImageReferenceMock) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { panic("unexpected call to a mock function") } func (ref pcImageReferenceMock) DeleteImage(ctx context.Context, sys *types.SystemContext) error { panic("unexpected call to a mock function") } func TestPolicyContextRequirementsForImageRefNotRegisteredTransport(t *testing.T) { transports.Delete("docker") assert.Nil(t, transports.Get("docker")) defer func() { assert.Nil(t, transports.Get("docker")) transports.Register(docker.Transport) assert.NotNil(t, transports.Get("docker")) }() pr := []PolicyRequirement{ xNewPRSignedByKeyData(SBKeyTypeSignedByGPGKeys, []byte("RH"), NewPRMMatchRepository()), } policy := &Policy{ Default: PolicyRequirements{NewPRReject()}, Transports: map[string]PolicyTransportScopes{ "docker": { "registry.access.redhat.com": pr, }, }, } pc, err := NewPolicyContext(policy) require.NoError(t, err) ref, err := reference.ParseNormalizedNamed("registry.access.redhat.com/rhel7:latest") require.NoError(t, err) reqs := pc.requirementsForImageRef(pcImageReferenceMock{"docker", ref}) assert.True(t, &(reqs[0]) == &(pr[0])) assert.True(t, len(reqs) == len(pr)) } func TestPolicyContextRequirementsForImageRef(t *testing.T) { ktGPG := SBKeyTypeGPGKeys prm := NewPRMMatchRepoDigestOrExact() policy := &Policy{ Default: PolicyRequirements{NewPRReject()}, Transports: map[string]PolicyTransportScopes{}, } // Just put _something_ into the PolicyTransportScopes map for the keys we care about, and make it pairwise // distinct so that we can compare the values and show them when debugging the tests. for _, t := range []struct{ transport, scope string }{ {"docker", ""}, {"docker", "unmatched"}, {"docker", "deep.com"}, {"docker", "deep.com/n1"}, {"docker", "deep.com/n1/n2"}, {"docker", "deep.com/n1/n2/n3"}, {"docker", "deep.com/n1/n2/n3/repo"}, {"docker", "deep.com/n1/n2/n3/repo:tag2"}, {"atomic", "unmatched"}, } { if _, ok := policy.Transports[t.transport]; !ok { policy.Transports[t.transport] = PolicyTransportScopes{} } policy.Transports[t.transport][t.scope] = PolicyRequirements{xNewPRSignedByKeyData(ktGPG, []byte(t.transport+t.scope), prm)} } pc, err := NewPolicyContext(policy) require.NoError(t, err) for _, c := range []struct{ inputTransport, input, matchedTransport, matched string }{ // Full match {"docker", "deep.com/n1/n2/n3/repo:tag2", "docker", "deep.com/n1/n2/n3/repo:tag2"}, // Namespace matches {"docker", "deep.com/n1/n2/n3/repo:nottag2", "docker", "deep.com/n1/n2/n3/repo"}, {"docker", "deep.com/n1/n2/n3/notrepo:tag2", "docker", "deep.com/n1/n2/n3"}, {"docker", "deep.com/n1/n2/notn3/repo:tag2", "docker", "deep.com/n1/n2"}, {"docker", "deep.com/n1/notn2/n3/repo:tag2", "docker", "deep.com/n1"}, // Host name match {"docker", "deep.com/notn1/n2/n3/repo:tag2", "docker", "deep.com"}, // Default {"docker", "this.doesnt/match:anything", "docker", ""}, // No match within a matched transport which doesn't have a "" scope {"atomic", "this.doesnt/match:anything", "", ""}, // No configuration available for this transport at all {"dir", "what/ever", "", ""}, // "what/ever" is not a valid scope for the real "dir" transport, but we only need it to be a valid reference.Named. } { var expected PolicyRequirements if c.matchedTransport != "" { e, ok := policy.Transports[c.matchedTransport][c.matched] require.True(t, ok, fmt.Sprintf("case %s:%s: expected reqs not found", c.inputTransport, c.input)) expected = e } else { expected = policy.Default } ref, err := reference.ParseNormalizedNamed(c.input) require.NoError(t, err) reqs := pc.requirementsForImageRef(pcImageReferenceMock{c.inputTransport, ref}) comment := fmt.Sprintf("case %s:%s: %#v", c.inputTransport, c.input, reqs[0]) // Do not use assert.Equal, which would do a deep contents comparison; we want to compare // the pointers. Also, == does not work on slices; so test that the slices start at the // same element and have the same length. assert.True(t, &(reqs[0]) == &(expected[0]), comment) assert.True(t, len(reqs) == len(expected), comment) } } // pcImageMock returns a types.UnparsedImage for a directory, claiming a specified dockerReference and implementing PolicyConfigurationIdentity/PolicyConfigurationNamespaces. // The caller must call the returned close callback when done. func pcImageMock(t *testing.T, dir, dockerReference string) (types.UnparsedImage, func() error) { ref, err := reference.ParseNormalizedNamed(dockerReference) require.NoError(t, err) return dirImageMockWithRef(t, dir, pcImageReferenceMock{"docker", ref}) } func TestPolicyContextGetSignaturesWithAcceptedAuthor(t *testing.T) { expectedSig := &Signature{ DockerManifestDigest: TestImageManifestDigest, DockerReference: "testing/manifest:latest", } pc, err := NewPolicyContext(&Policy{ Default: PolicyRequirements{NewPRReject()}, Transports: map[string]PolicyTransportScopes{ "docker": { "docker.io/testing/manifest:latest": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()), }, "docker.io/testing/manifest:twoAccepts": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), }, "docker.io/testing/manifest:acceptReject": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), NewPRReject(), }, "docker.io/testing/manifest:acceptUnknown": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), xNewPRSignedBaseLayer(NewPRMMatchRepository()), }, "docker.io/testing/manifest:rejectUnknown": { NewPRReject(), xNewPRSignedBaseLayer(NewPRMMatchRepository()), }, "docker.io/testing/manifest:unknown": { xNewPRSignedBaseLayer(NewPRMMatchRepository()), }, "docker.io/testing/manifest:unknown2": { NewPRInsecureAcceptAnything(), }, "docker.io/testing/manifest:invalidEmptyRequirements": {}, }, }, }) require.NoError(t, err) defer pc.Destroy() // Success img, closer := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() sigs, err := pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Equal(t, []*Signature{expectedSig}, sigs) // Two signatures // FIXME? Use really different signatures for this? img, closer = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Equal(t, []*Signature{expectedSig, expectedSig}, sigs) // No signatures img, closer = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // Only invalid signatures img, closer = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // 1 invalid, 1 valid signature (in this order) img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Equal(t, []*Signature{expectedSig}, sigs) // Two sarAccepted results for one signature img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:twoAccepts") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Equal(t, []*Signature{expectedSig}, sigs) // sarAccepted+sarRejected for a signature img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptReject") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // sarAccepted+sarUnknown for a signature img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:acceptUnknown") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Equal(t, []*Signature{expectedSig}, sigs) // sarRejected+sarUnknown for a signature img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:rejectUnknown") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // sarUnknown only img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:unknown2") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // Empty list of requirements (invalid) img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) require.NoError(t, err) assert.Empty(t, sigs) // Failures: Make sure we return nil sigs. // Unexpected state (context already destroyed) destroyedPC, err := NewPolicyContext(pc.Policy) require.NoError(t, err) err = destroyedPC.Destroy() require.NoError(t, err) img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() sigs, err = destroyedPC.GetSignaturesWithAcceptedAuthor(context.Background(), img) assert.Error(t, err) assert.Nil(t, sigs) // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement // implementations meddling with the state, or threads. This is for catching trivial programmer // mistakes only, anyway. // Error reading signatures. invalidSigDir := createInvalidSigDir(t) defer os.RemoveAll(invalidSigDir) img, closer = pcImageMock(t, invalidSigDir, "testing/manifest:latest") defer closer() sigs, err = pc.GetSignaturesWithAcceptedAuthor(context.Background(), img) assert.Error(t, err) assert.Nil(t, sigs) } func TestPolicyContextIsRunningImageAllowed(t *testing.T) { pc, err := NewPolicyContext(&Policy{ Default: PolicyRequirements{NewPRReject()}, Transports: map[string]PolicyTransportScopes{ "docker": { "docker.io/testing/manifest:latest": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchExact()), }, "docker.io/testing/manifest:twoAllows": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), }, "docker.io/testing/manifest:allowDeny": { xNewPRSignedByKeyPath(SBKeyTypeGPGKeys, "fixtures/public-key.gpg", NewPRMMatchRepository()), NewPRReject(), }, "docker.io/testing/manifest:reject": { NewPRReject(), }, "docker.io/testing/manifest:acceptAnything": { NewPRInsecureAcceptAnything(), }, "docker.io/testing/manifest:invalidEmptyRequirements": {}, }, }, }) require.NoError(t, err) defer pc.Destroy() // Success img, closer := pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() res, err := pc.IsRunningImageAllowed(context.Background(), img) assertRunningAllowed(t, res, err) // Two signatures // FIXME? Use really different signatures for this? img, closer = pcImageMock(t, "fixtures/dir-img-valid-2", "testing/manifest:latest") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningAllowed(t, res, err) // No signatures img, closer = pcImageMock(t, "fixtures/dir-img-unsigned", "testing/manifest:latest") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningRejectedPolicyRequirement(t, res, err) // Only invalid signatures img, closer = pcImageMock(t, "fixtures/dir-img-modified-manifest", "testing/manifest:latest") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningRejectedPolicyRequirement(t, res, err) // 1 invalid, 1 valid signature (in this order) img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:latest") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningAllowed(t, res, err) // Two allowed results img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:twoAllows") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningAllowed(t, res, err) // Allow + deny results img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:allowDeny") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningRejectedPolicyRequirement(t, res, err) // prReject works img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:reject") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningRejectedPolicyRequirement(t, res, err) // prInsecureAcceptAnything works img, closer = pcImageMock(t, "fixtures/dir-img-mixed", "testing/manifest:acceptAnything") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningAllowed(t, res, err) // Empty list of requirements (invalid) img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:invalidEmptyRequirements") defer closer() res, err = pc.IsRunningImageAllowed(context.Background(), img) assertRunningRejectedPolicyRequirement(t, res, err) // Unexpected state (context already destroyed) destroyedPC, err := NewPolicyContext(pc.Policy) require.NoError(t, err) err = destroyedPC.Destroy() require.NoError(t, err) img, closer = pcImageMock(t, "fixtures/dir-img-valid", "testing/manifest:latest") defer closer() res, err = destroyedPC.IsRunningImageAllowed(context.Background(), img) assertRunningRejected(t, res, err) // Not testing the pcInUse->pcReady transition, that would require custom PolicyRequirement // implementations meddling with the state, or threads. This is for catching trivial programmer // mistakes only, anyway. } // Helpers for validating PolicyRequirement.isSignatureAuthorAccepted results: // assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result // with the expected signature. func assertSARAccepted(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error, expectedSig Signature) { assert.Equal(t, sarAccepted, sar) assert.Equal(t, &expectedSig, parsedSig) assert.NoError(t, err) } // assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarRejected result. func assertSARRejected(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { assert.Equal(t, sarRejected, sar) assert.Nil(t, parsedSig) assert.Error(t, err) } // assertSARRejectedPolicyRequiremnt verifies that isSignatureAuthorAccepted returns a consistent sarRejected resul, // and that the returned error is a PolicyRequirementError.. func assertSARRejectedPolicyRequirement(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { assertSARRejected(t, sar, parsedSig, err) assert.IsType(t, PolicyRequirementError(""), err) } // assertSARRejected verifies that isSignatureAuthorAccepted returns a consistent sarUnknown result. func assertSARUnknown(t *testing.T, sar signatureAcceptanceResult, parsedSig *Signature, err error) { assert.Equal(t, sarUnknown, sar) assert.Nil(t, parsedSig) assert.NoError(t, err) } // Helpers for validating PolicyRequirement.isRunningImageAllowed results: // assertRunningAllowed verifies that isRunningImageAllowed returns a consistent true result func assertRunningAllowed(t *testing.T, allowed bool, err error) { assert.Equal(t, true, allowed) assert.NoError(t, err) } // assertRunningRejected verifies that isRunningImageAllowed returns a consistent false result func assertRunningRejected(t *testing.T, allowed bool, err error) { assert.Equal(t, false, allowed) assert.Error(t, err) } // assertRunningRejectedPolicyRequirement verifies that isRunningImageAllowed returns a consistent false result // and that the returned error is a PolicyRequirementError. func assertRunningRejectedPolicyRequirement(t *testing.T, allowed bool, err error) { assertRunningRejected(t, allowed, err) assert.IsType(t, PolicyRequirementError(""), err) } image-4.0.1/signature/policy_reference_match.go000066400000000000000000000077311354546467100216460ustar00rootroot00000000000000// PolicyReferenceMatch implementations. package signature import ( "fmt" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" ) // parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { r1 := image.Reference().DockerReference() if r1 == nil { return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", transports.ImageName(image.Reference()))) } r2, err := reference.ParseNormalizedNamed(s2) if err != nil { return nil, nil, err } return r1, r2, nil } func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false } // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { return false } return signature.String() == intended.String() } func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false } // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. if reference.IsNameOnly(signature) { return false } switch intended.(type) { case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. return signature.String() == intended.String() case reference.Canonical: // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) return signature.Name() == intended.Name() default: // !reference.IsNameOnly(intended) return false } } func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) if err != nil { return false } return signature.Name() == intended.Name() } // parseDockerReferences converts two reference strings into parsed entities, failing on any error func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { r1, err := reference.ParseNormalizedNamed(s1) if err != nil { return nil, nil, err } r2, err := reference.ParseNormalizedNamed(s2) if err != nil { return nil, nil, err } return r1, r2, nil } func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) if err != nil { return false } // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { return false } return signature.String() == intended.String() } func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) if err != nil { return false } return signature.Name() == intended.Name() } image-4.0.1/signature/policy_reference_match_test.go000066400000000000000000000360441354546467100227040ustar00rootroot00000000000000package signature import ( "context" "fmt" "testing" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/internal/testing/mocks" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( fullRHELRef = "registry.access.redhat.com/rhel7/rhel:7.2.3" untaggedRHELRef = "registry.access.redhat.com/rhel7/rhel" digestSuffix = "@sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" digestSuffixOther = "@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" ) func TestParseImageAndDockerReference(t *testing.T) { const ( ok1 = "busybox" ok2 = fullRHELRef bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES" bad2 = "" ) // Success ref, err := reference.ParseNormalizedNamed(ok1) require.NoError(t, err) r1, r2, err := parseImageAndDockerReference(refImageMock{ref}, ok2) require.NoError(t, err) assert.Equal(t, ok1, reference.FamiliarString(r1)) assert.Equal(t, ok2, reference.FamiliarString(r2)) // Unidentified images are rejected. _, _, err = parseImageAndDockerReference(refImageMock{nil}, ok2) require.Error(t, err) assert.IsType(t, PolicyRequirementError(""), err) // Failures for _, refs := range [][]string{ {bad1, ok2}, {ok1, bad2}, {bad1, bad2}, } { ref, err := reference.ParseNormalizedNamed(refs[0]) if err == nil { _, _, err := parseImageAndDockerReference(refImageMock{ref}, refs[1]) assert.Error(t, err) } } } // refImageMock is a mock of types.UnparsedImage which returns itself in Reference().DockerReference. type refImageMock struct{ reference.Named } func (ref refImageMock) Reference() types.ImageReference { return refImageReferenceMock{ref.Named} } func (ref refImageMock) Close() error { panic("unexpected call to a mock function") } func (ref refImageMock) Manifest(ctx context.Context) ([]byte, string, error) { panic("unexpected call to a mock function") } func (ref refImageMock) Signatures(context.Context) ([][]byte, error) { panic("unexpected call to a mock function") } func (ref refImageMock) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { panic("unexpected call to a mock function") } // refImageReferenceMock is a mock of types.ImageReference which returns itself in DockerReference. type refImageReferenceMock struct{ reference.Named } func (ref refImageReferenceMock) Transport() types.ImageTransport { // We use this in error messages, so sady we must return something. But right now we do so only when DockerReference is nil, so restrict to that. if ref.Named == nil { return mocks.NameImageTransport("== Transport mock") } panic("unexpected call to a mock function") } func (ref refImageReferenceMock) StringWithinTransport() string { // We use this in error messages, so sadly we must return something. But right now we do so only when DockerReference is nil, so restrict to that. if ref.Named == nil { return "== StringWithinTransport for an image with no Docker support" } panic("unexpected call to a mock function") } func (ref refImageReferenceMock) DockerReference() reference.Named { return ref.Named } func (ref refImageReferenceMock) PolicyConfigurationIdentity() string { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) PolicyConfigurationNamespaces() []string { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { panic("unexpected call to a mock function") } func (ref refImageReferenceMock) DeleteImage(ctx context.Context, sys *types.SystemContext) error { panic("unexpected call to a mock function") } type prmSymmetricTableTest struct { refA, refB string result bool } // Test cases for exact reference match. The behavior is supposed to be symmetric. var prmExactMatchTestTable = []prmSymmetricTableTest{ // Success, simple matches {"busybox:latest", "busybox:latest", true}, {fullRHELRef, fullRHELRef, true}, {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time. // Non-canonical reference format is canonicalized {"library/busybox:latest", "busybox:latest", true}, {"docker.io/library/busybox:latest", "busybox:latest", true}, {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true}, // Mismatch {"busybox:latest", "busybox:notlatest", false}, {"busybox:latest", "notbusybox:latest", false}, {"busybox:latest", "hostname/library/busybox:notlatest", false}, {"hostname/library/busybox:latest", "busybox:notlatest", false}, {"busybox:latest", fullRHELRef, false}, {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false}, {"busybox:latest", "busybox" + digestSuffix, false}, {"busybox" + digestSuffix, "busybox" + digestSuffixOther, false}, // NameOnly references {"busybox", "busybox:latest", false}, {"busybox", "busybox" + digestSuffix, false}, {"busybox", "busybox", false}, // References with both tags and digests: We match them exactly (requiring BOTH to match) // NOTE: Again, this is not documented behavior; the recommendation is to sign tags, not digests, and then tag-and-digest references won’t match the signed identity. {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true}, {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false}, {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, false}, {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, false}, {"busybox:latest" + digestSuffix, "busybox:latest", false}, // Invalid format {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false}, {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false}, // Even if they are exactly equal, invalid values are rejected. {"INVALID", "INVALID", false}, } // Test cases for repository-only reference match. The behavior is supposed to be symmetric. var prmRepositoryMatchTestTable = []prmSymmetricTableTest{ // Success, simple matches {"busybox:latest", "busybox:latest", true}, {fullRHELRef, fullRHELRef, true}, {"busybox" + digestSuffix, "busybox" + digestSuffix, true}, // NOTE: This is not documented; signing digests is not recommended at this time. // Non-canonical reference format is canonicalized {"library/busybox:latest", "busybox:latest", true}, {"docker.io/library/busybox:latest", "busybox:latest", true}, {"library/busybox" + digestSuffix, "busybox" + digestSuffix, true}, // The same as above, but with mismatching tags {"busybox:latest", "busybox:notlatest", true}, {fullRHELRef + "tagsuffix", fullRHELRef, true}, {"library/busybox:latest", "busybox:notlatest", true}, {"busybox:latest", "library/busybox:notlatest", true}, {"docker.io/library/busybox:notlatest", "busybox:latest", true}, {"busybox:notlatest", "docker.io/library/busybox:latest", true}, {"busybox:latest", "busybox" + digestSuffix, true}, {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.) // The same as above, but with defaulted tags (should not actually happen) {"busybox", "busybox:notlatest", true}, {fullRHELRef, untaggedRHELRef, true}, {"busybox", "busybox" + digestSuffix, true}, {"library/busybox", "busybox", true}, {"docker.io/library/busybox", "busybox", true}, // Mismatch {"busybox:latest", "notbusybox:latest", false}, {"hostname/library/busybox:latest", "busybox:notlatest", false}, {"busybox:latest", fullRHELRef, false}, {"busybox" + digestSuffix, "notbusybox" + digestSuffix, false}, // References with both tags and digests: We ignore both anyway. {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffix, true}, {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, true}, {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffix, true}, {"busybox:latest" + digestSuffix, "busybox" + digestSuffix, true}, {"busybox:latest" + digestSuffix, "busybox:latest", true}, // Invalid format {"UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", "busybox:latest", false}, {"", "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES", false}, // Even if they are exactly equal, invalid values are rejected. {"INVALID", "INVALID", false}, } func testImageAndSig(t *testing.T, prm PolicyReferenceMatch, imageRef, sigRef string, result bool) { // This assumes that all ways to obtain a reference.Named perform equivalent validation, // and therefore values refused by reference.ParseNormalizedNamed can not happen in practice. parsedImageRef, err := reference.ParseNormalizedNamed(imageRef) if err != nil { return } res := prm.matchesDockerReference(refImageMock{parsedImageRef}, sigRef) assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef)) } func TestPRMMatchExactMatchesDockerReference(t *testing.T) { prm := NewPRMMatchExact() for _, test := range prmExactMatchTestTable { testImageAndSig(t, prm, test.refA, test.refB, test.result) testImageAndSig(t, prm, test.refB, test.refA, test.result) } // Even if they are signed with an empty string as a reference, unidentified images are rejected. res := prm.matchesDockerReference(refImageMock{nil}, "") assert.False(t, res, `unidentified vs. ""`) } func TestPMMMatchRepoDigestOrExactMatchesDockerReference(t *testing.T) { prm := NewPRMMatchRepoDigestOrExact() // prmMatchRepoDigestOrExact is a middle ground between prmMatchExact and prmMatchRepository: // It accepts anything prmMatchExact accepts,… for _, test := range prmExactMatchTestTable { if test.result == true { testImageAndSig(t, prm, test.refA, test.refB, test.result) testImageAndSig(t, prm, test.refB, test.refA, test.result) } } // … and it rejects everything prmMatchRepository rejects. for _, test := range prmRepositoryMatchTestTable { if test.result == false { testImageAndSig(t, prm, test.refA, test.refB, test.result) testImageAndSig(t, prm, test.refB, test.refA, test.result) } } // The other cases, possibly assymetrical: for _, test := range []struct { imageRef, sigRef string result bool }{ // Tag mismatch {"busybox:latest", "busybox:notlatest", false}, {fullRHELRef + "tagsuffix", fullRHELRef, false}, {"library/busybox:latest", "busybox:notlatest", false}, {"busybox:latest", "library/busybox:notlatest", false}, {"docker.io/library/busybox:notlatest", "busybox:latest", false}, {"busybox:notlatest", "docker.io/library/busybox:latest", false}, // NameOnly references {"busybox", "busybox:latest", false}, {"busybox:latest", "busybox", false}, {"busybox", "busybox" + digestSuffix, false}, {"busybox" + digestSuffix, "busybox", false}, {fullRHELRef, untaggedRHELRef, false}, {"busybox", "busybox", false}, // Tag references only accept signatures with matching tags. {"busybox:latest", "busybox" + digestSuffix, false}, // Digest references accept any signature with matching repository. {"busybox" + digestSuffix, "busybox:latest", true}, {"busybox" + digestSuffix, "busybox" + digestSuffixOther, true}, // Even this is accepted here. (This could more reasonably happen with two different digest algorithms.) // References with both tags and digests: We match them exactly (requiring BOTH to match). {"busybox:latest" + digestSuffix, "busybox:latest", false}, {"busybox:latest" + digestSuffix, "busybox:notlatest", false}, {"busybox:latest", "busybox:latest" + digestSuffix, false}, {"busybox:latest" + digestSuffix, "busybox:latest" + digestSuffixOther, false}, {"busybox:latest" + digestSuffix, "busybox:notlatest" + digestSuffixOther, false}, } { testImageAndSig(t, prm, test.imageRef, test.sigRef, test.result) } } func TestPRMMatchRepositoryMatchesDockerReference(t *testing.T) { prm := NewPRMMatchRepository() for _, test := range prmRepositoryMatchTestTable { testImageAndSig(t, prm, test.refA, test.refB, test.result) testImageAndSig(t, prm, test.refB, test.refA, test.result) } // Even if they are signed with an empty string as a reference, unidentified images are rejected. res := prm.matchesDockerReference(refImageMock{nil}, "") assert.False(t, res, `unidentified vs. ""`) } func TestParseDockerReferences(t *testing.T) { const ( ok1 = "busybox" ok2 = fullRHELRef bad1 = "UPPERCASE_IS_INVALID_IN_DOCKER_REFERENCES" bad2 = "" ) // Success r1, r2, err := parseDockerReferences(ok1, ok2) require.NoError(t, err) assert.Equal(t, ok1, reference.FamiliarString(r1)) assert.Equal(t, ok2, reference.FamiliarString(r2)) // Failures for _, refs := range [][]string{ {bad1, ok2}, {ok1, bad2}, {bad1, bad2}, } { _, _, err := parseDockerReferences(refs[0], refs[1]) assert.Error(t, err) } } // forbiddenImageMock is a mock of types.UnparsedImage which ensures Reference is not called type forbiddenImageMock struct{} func (ref forbiddenImageMock) Reference() types.ImageReference { panic("unexpected call to a mock function") } func (ref forbiddenImageMock) Close() error { panic("unexpected call to a mock function") } func (ref forbiddenImageMock) Manifest(ctx context.Context) ([]byte, string, error) { panic("unexpected call to a mock function") } func (ref forbiddenImageMock) Signatures(context.Context) ([][]byte, error) { panic("unexpected call to a mock function") } func (ref forbiddenImageMock) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { panic("unexpected call to a mock function") } func testExactPRMAndSig(t *testing.T, prmFactory func(string) PolicyReferenceMatch, imageRef, sigRef string, result bool) { prm := prmFactory(imageRef) res := prm.matchesDockerReference(forbiddenImageMock{}, sigRef) assert.Equal(t, result, res, fmt.Sprintf("%s vs. %s", imageRef, sigRef)) } func prmExactReferenceFactory(ref string) PolicyReferenceMatch { // Do not use NewPRMExactReference, we want to also test the case with an invalid DockerReference, // even though NewPRMExactReference should never let it happen. return &prmExactReference{DockerReference: ref} } func TestPRMExactReferenceMatchesDockerReference(t *testing.T) { for _, test := range prmExactMatchTestTable { testExactPRMAndSig(t, prmExactReferenceFactory, test.refA, test.refB, test.result) testExactPRMAndSig(t, prmExactReferenceFactory, test.refB, test.refA, test.result) } } func prmExactRepositoryFactory(ref string) PolicyReferenceMatch { // Do not use NewPRMExactRepository, we want to also test the case with an invalid DockerReference, // even though NewPRMExactRepository should never let it happen. return &prmExactRepository{DockerRepository: ref} } func TestPRMExactRepositoryMatchesDockerReference(t *testing.T) { for _, test := range prmRepositoryMatchTestTable { testExactPRMAndSig(t, prmExactRepositoryFactory, test.refA, test.refB, test.result) testExactPRMAndSig(t, prmExactRepositoryFactory, test.refB, test.refA, test.result) } } image-4.0.1/signature/policy_types.go000066400000000000000000000155021354546467100176730ustar00rootroot00000000000000// Note: Consider the API unstable until the code supports at least three different image formats or transports. // This defines types used to represent a signature verification policy in memory. // Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements // built using the constructor functions provided in policy_config.go. package signature // NOTE: Keep this in sync with docs/containers-policy.json.5.md! // Policy defines requirements for considering a signature, or an image, valid. type Policy struct { // Default applies to any image which does not have a matching policy in Transports. // Note that this can happen even if a matching PolicyTransportScopes exists in Transports // if the image matches none of the scopes. Default PolicyRequirements `json:"default"` Transports map[string]PolicyTransportScopes `json:"transports"` } // PolicyTransportScopes defines policies for images for a specific transport, // for various scopes, the map keys. // Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); // there is one scope precisely matching to a single image, and namespace scopes as prefixes // of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) // The empty scope, if exists, is considered a parent namespace of all other scopes. // Most specific scope wins, duplication is prohibited (hard failure). type PolicyTransportScopes map[string]PolicyRequirements // PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). // Must not be empty, frequently will only contain a single element. type PolicyRequirements []PolicyRequirement // PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. // The type is public, but its definition is private. // prCommon is the common type field in a JSON encoding of PolicyRequirement. type prCommon struct { Type prTypeIdentifier `json:"type"` } // prTypeIdentifier is string designating a kind of a PolicyRequirement. type prTypeIdentifier string const ( prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" prTypeReject prTypeIdentifier = "reject" prTypeSignedBy prTypeIdentifier = "signedBy" prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" ) // prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: // every image is allowed to run. // Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). // NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). // FIXME? Better name? type prInsecureAcceptAnything struct { prCommon } // prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. type prReject struct { prCommon } // prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity type prSignedBy struct { prCommon // KeyType specifies what kind of key reference KeyPath/KeyData is. // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only KeyType sbKeyType `json:"keyType"` // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. KeyPath string `json:"keyPath,omitempty"` // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. KeyData []byte `json:"keyData,omitempty"` // SignedIdentity specifies what image identity the signature must be claiming about the image. // Defaults to "match-exact" if not specified. SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` } // sbKeyType are the allowed values for prSignedBy.KeyType type sbKeyType string const ( // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring SBKeyTypeGPGKeys sbKeyType = "GPGKeys" // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates // FIXME: PEM, DER? SBKeyTypeX509Certificates sbKeyType = "X509Certificates" // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs // FIXME: PEM, DER? SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" ) // prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. type prSignedBaseLayer struct { prCommon // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` } // PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. // The type is public, but its implementation is private. // prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. type prmCommon struct { Type prmTypeIdentifier `json:"type"` } // prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. type prmTypeIdentifier string const ( prmTypeMatchExact prmTypeIdentifier = "matchExact" prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" prmTypeMatchRepository prmTypeIdentifier = "matchRepository" prmTypeExactReference prmTypeIdentifier = "exactReference" prmTypeExactRepository prmTypeIdentifier = "exactRepository" ) // prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. type prmMatchExact struct { prmCommon } // prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, // except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest type prmMatchRepoDigestOrExact struct { prmCommon } // prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. type prmMatchRepository struct { prmCommon } // prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. type prmExactReference struct { prmCommon DockerReference string `json:"dockerReference"` } // prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. type prmExactRepository struct { prmCommon DockerRepository string `json:"dockerRepository"` } image-4.0.1/signature/signature.go000066400000000000000000000252321354546467100171520ustar00rootroot00000000000000// Note: Consider the API unstable until the code supports at least three different image formats or transports. // NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! package signature import ( "encoding/json" "fmt" "time" "github.com/pkg/errors" "github.com/containers/image/v4/version" "github.com/opencontainers/go-digest" ) const ( signatureType = "atomic container signature" ) // InvalidSignatureError is returned when parsing an invalid signature. type InvalidSignatureError struct { msg string } func (err InvalidSignatureError) Error() string { return err.msg } // Signature is a parsed content of a signature. // The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. type Signature struct { DockerManifestDigest digest.Digest DockerReference string // FIXME: more precise type? } // untrustedSignature is a parsed content of a signature. type untrustedSignature struct { UntrustedDockerManifestDigest digest.Digest UntrustedDockerReference string // FIXME: more precise type? UntrustedCreatorID *string // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, // we would add another field, UntrustedTimestampNS int64. UntrustedTimestamp *int64 } // UntrustedSignatureInformation is information available in an untrusted signature. // This may be useful when debugging signature verification failures, // or when managing a set of signatures on a single image. // // WARNING: Do not use the contents of this for ANY security decisions, // and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. // There is NO REASON to expect the values to be correct, or not intentionally misleading // (including things like “✅ Verified by $authority”) type UntrustedSignatureInformation struct { UntrustedDockerManifestDigest digest.Digest UntrustedDockerReference string // FIXME: more precise type? UntrustedCreatorID *string UntrustedTimestamp *time.Time UntrustedShortKeyIdentifier string } // newUntrustedSignature returns an untrustedSignature object with // the specified primary contents and appropriate metadata. func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { // Use intermediate variables for these values so that we can take their addresses. // Golang guarantees that they will have a new address on every execution. creatorID := "atomic " + version.Version timestamp := time.Now().Unix() return untrustedSignature{ UntrustedDockerManifestDigest: dockerManifestDigest, UntrustedDockerReference: dockerReference, UntrustedCreatorID: &creatorID, UntrustedTimestamp: ×tamp, } } // Compile-time check that untrustedSignature implements json.Marshaler var _ json.Marshaler = (*untrustedSignature)(nil) // MarshalJSON implements the json.Marshaler interface. func (s untrustedSignature) MarshalJSON() ([]byte, error) { if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { return nil, errors.New("Unexpected empty signature content") } critical := map[string]interface{}{ "type": signatureType, "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, } optional := map[string]interface{}{} if s.UntrustedCreatorID != nil { optional["creator"] = *s.UntrustedCreatorID } if s.UntrustedTimestamp != nil { optional["timestamp"] = *s.UntrustedTimestamp } signature := map[string]interface{}{ "critical": critical, "optional": optional, } return json.Marshal(signature) } // Compile-time check that untrustedSignature implements json.Unmarshaler var _ json.Unmarshaler = (*untrustedSignature)(nil) // UnmarshalJSON implements the json.Unmarshaler interface func (s *untrustedSignature) UnmarshalJSON(data []byte) error { err := s.strictUnmarshalJSON(data) if err != nil { if _, ok := err.(jsonFormatError); ok { err = InvalidSignatureError{msg: err.Error()} } } return err } // strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. // Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { var critical, optional json.RawMessage if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ "critical": &critical, "optional": &optional, }); err != nil { return err } var creatorID string var timestamp float64 var gotCreatorID, gotTimestamp = false, false if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { switch key { case "creator": gotCreatorID = true return &creatorID case "timestamp": gotTimestamp = true return ×tamp default: var ignore interface{} return &ignore } }); err != nil { return err } if gotCreatorID { s.UntrustedCreatorID = &creatorID } if gotTimestamp { intTimestamp := int64(timestamp) if float64(intTimestamp) != timestamp { return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} } s.UntrustedTimestamp = &intTimestamp } var t string var image, identity json.RawMessage if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ "type": &t, "image": &image, "identity": &identity, }); err != nil { return err } if t != signatureType { return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} } var digestString string if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ "docker-manifest-digest": &digestString, }); err != nil { return err } s.UntrustedDockerManifestDigest = digest.Digest(digestString) return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ "docker-reference": &s.UntrustedDockerReference, }) } // Sign formats the signature and returns a blob signed using mech and keyIdentity // (If it seems surprising that this is a method on untrustedSignature, note that there // isn’t a good reason to think that a key used by the user is trusted by any component // of the system just because it is a private key — actually the presence of a private key // on the system increases the likelihood of an a successful attack on that private key // on that particular system.) func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { json, err := json.Marshal(s) if err != nil { return nil, err } return mech.Sign(json, keyIdentity) } // signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. // We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies // the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature // because the functions have the same or similar types, so there is a risk of exchanging the functions; // named members of this struct are more explicit. type signatureAcceptanceRules struct { validateKeyIdentity func(string) error validateSignedDockerReference func(string) error validateSignedDockerManifestDigest func(digest.Digest) error } // verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components // match expected values, both as specified by rules, and returns it func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { signed, keyIdentity, err := mech.Verify(unverifiedSignature) if err != nil { return nil, err } if err := rules.validateKeyIdentity(keyIdentity); err != nil { return nil, err } var unmatchedSignature untrustedSignature if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { return nil, InvalidSignatureError{msg: err.Error()} } if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { return nil, err } if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { return nil, err } // signatureAcceptanceRules have accepted this value. return &Signature{ DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, DockerReference: unmatchedSignature.UntrustedDockerReference, }, nil } // GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, // WITHOUT doing any cryptographic verification. // This may be useful when debugging signature verification failures, // or when managing a set of signatures on a single image. // // WARNING: Do not use the contents of this for ANY security decisions, // and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. // There is NO REASON to expect the values to be correct, or not intentionally misleading // (including things like “✅ Verified by $authority”) func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { // NOTE: This should eventualy do format autodetection. mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) if err != nil { return nil, err } defer mech.Close() untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) if err != nil { return nil, err } var untrustedDecodedContents untrustedSignature if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { return nil, InvalidSignatureError{msg: err.Error()} } var timestamp *time.Time // = nil if untrustedDecodedContents.UntrustedTimestamp != nil { ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) timestamp = &ts } return &UntrustedSignatureInformation{ UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, UntrustedTimestamp: timestamp, UntrustedShortKeyIdentifier: shortKeyIdentifier, }, nil } image-4.0.1/signature/signature_test.go000066400000000000000000000360201354546467100202060ustar00rootroot00000000000000package signature import ( "encoding/json" "io/ioutil" "path/filepath" "testing" "time" "github.com/containers/image/v4/version" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/xeipuuv/gojsonschema" ) func TestInvalidSignatureError(t *testing.T) { // A stupid test just to keep code coverage s := "test" err := InvalidSignatureError{msg: s} assert.Equal(t, s, err.Error()) } func TestNewUntrustedSignature(t *testing.T) { timeBefore := time.Now() sig := newUntrustedSignature(TestImageManifestDigest, TestImageSignatureReference) assert.Equal(t, TestImageManifestDigest, sig.UntrustedDockerManifestDigest) assert.Equal(t, TestImageSignatureReference, sig.UntrustedDockerReference) require.NotNil(t, sig.UntrustedCreatorID) assert.Equal(t, "atomic "+version.Version, *sig.UntrustedCreatorID) require.NotNil(t, sig.UntrustedTimestamp) timeAfter := time.Now() assert.True(t, timeBefore.Unix() <= *sig.UntrustedTimestamp) assert.True(t, *sig.UntrustedTimestamp <= timeAfter.Unix()) } func TestMarshalJSON(t *testing.T) { // Empty string values s := newUntrustedSignature("", "_") _, err := s.MarshalJSON() assert.Error(t, err) s = newUntrustedSignature("_", "") _, err = s.MarshalJSON() assert.Error(t, err) // Success // Use intermediate variables for these values so that we can take their addresses. creatorID := "CREATOR" timestamp := int64(1484683104) for _, c := range []struct { input untrustedSignature expected string }{ { untrustedSignature{ UntrustedDockerManifestDigest: "digest!@#", UntrustedDockerReference: "reference#@!", UntrustedCreatorID: &creatorID, UntrustedTimestamp: ×tamp, }, "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{\"creator\":\"CREATOR\",\"timestamp\":1484683104}}", }, { untrustedSignature{ UntrustedDockerManifestDigest: "digest!@#", UntrustedDockerReference: "reference#@!", }, "{\"critical\":{\"identity\":{\"docker-reference\":\"reference#@!\"},\"image\":{\"docker-manifest-digest\":\"digest!@#\"},\"type\":\"atomic container signature\"},\"optional\":{}}", }, } { marshaled, err := c.input.MarshalJSON() require.NoError(t, err) assert.Equal(t, []byte(c.expected), marshaled) // Also call MarshalJSON through the JSON package. marshaled, err = json.Marshal(c.input) assert.NoError(t, err) assert.Equal(t, []byte(c.expected), marshaled) } } // Return the result of modifying validJSON with fn func modifiedUntrustedSignatureJSON(t *testing.T, validJSON []byte, modifyFn func(mSI)) []byte { var tmp mSI err := json.Unmarshal(validJSON, &tmp) require.NoError(t, err) modifyFn(tmp) modifiedJSON, err := json.Marshal(tmp) require.NoError(t, err) return modifiedJSON } // Verify that input can be unmarshaled as an untrustedSignature, and that it passes JSON schema validation, and return the unmarshaled untrustedSignature. func succesfullyUnmarshalUntrustedSignature(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) untrustedSignature { inputString := string(input) var s untrustedSignature err := json.Unmarshal(input, &s) require.NoError(t, err, inputString) res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString)) assert.True(t, err == nil, inputString) assert.True(t, res.Valid(), inputString) return s } // Verify that input can't be unmashaled as an untrusted signature, and that it fails JSON schema validation. func assertUnmarshalUntrustedSignatureFails(t *testing.T, schemaLoader gojsonschema.JSONLoader, input []byte) { inputString := string(input) var s untrustedSignature err := json.Unmarshal(input, &s) assert.Error(t, err, inputString) res, err := gojsonschema.Validate(schemaLoader, gojsonschema.NewStringLoader(inputString)) assert.True(t, err != nil || !res.Valid(), inputString) } func TestUnmarshalJSON(t *testing.T) { // NOTE: The schema at schemaPath is NOT authoritative; docs/atomic-signature.json and the code is, rather! // The schemaPath references are not testing that the code follows the behavior declared by the schema, // they are testing that the schema follows the behavior of the code! schemaPath, err := filepath.Abs("../docs/atomic-signature-embedded-json.json") require.NoError(t, err) schemaLoader := gojsonschema.NewReferenceLoader("file://" + schemaPath) // Invalid input. Note that json.Unmarshal is guaranteed to validate input before calling our // UnmarshalJSON implementation; so test that first, then test our error handling for completeness. assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("&")) var s untrustedSignature err = s.UnmarshalJSON([]byte("&")) assert.Error(t, err) // Not an object assertUnmarshalUntrustedSignatureFails(t, schemaLoader, []byte("1")) // Start with a valid JSON. validSig := newUntrustedSignature("digest!@#", "reference#@!") validJSON, err := validSig.MarshalJSON() require.NoError(t, err) // Success s = succesfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON) assert.Equal(t, validSig, s) // Various ways to corrupt the JSON breakFns := []func(mSI){ // A top-level field is missing func(v mSI) { delete(v, "critical") }, func(v mSI) { delete(v, "optional") }, // Extra top-level sub-object func(v mSI) { v["unexpected"] = 1 }, // "critical" not an object func(v mSI) { v["critical"] = 1 }, // "optional" not an object func(v mSI) { v["optional"] = 1 }, // A field of "critical" is missing func(v mSI) { delete(x(v, "critical"), "type") }, func(v mSI) { delete(x(v, "critical"), "image") }, func(v mSI) { delete(x(v, "critical"), "identity") }, // Extra field of "critical" func(v mSI) { x(v, "critical")["unexpected"] = 1 }, // Invalid "type" func(v mSI) { x(v, "critical")["type"] = 1 }, func(v mSI) { x(v, "critical")["type"] = "unexpected" }, // Invalid "image" object func(v mSI) { x(v, "critical")["image"] = 1 }, func(v mSI) { delete(x(v, "critical", "image"), "docker-manifest-digest") }, func(v mSI) { x(v, "critical", "image")["unexpected"] = 1 }, // Invalid "docker-manifest-digest" func(v mSI) { x(v, "critical", "image")["docker-manifest-digest"] = 1 }, // Invalid "identity" object func(v mSI) { x(v, "critical")["identity"] = 1 }, func(v mSI) { delete(x(v, "critical", "identity"), "docker-reference") }, func(v mSI) { x(v, "critical", "identity")["unexpected"] = 1 }, // Invalid "docker-reference" func(v mSI) { x(v, "critical", "identity")["docker-reference"] = 1 }, // Invalid "creator" func(v mSI) { x(v, "optional")["creator"] = 1 }, // Invalid "timestamp" func(v mSI) { x(v, "optional")["timestamp"] = "unexpected" }, func(v mSI) { x(v, "optional")["timestamp"] = 0.5 }, // Fractional input } for _, fn := range breakFns { testJSON := modifiedUntrustedSignatureJSON(t, validJSON, fn) assertUnmarshalUntrustedSignatureFails(t, schemaLoader, testJSON) } // Modifications to unrecognized fields in "optional" are allowed and ignored allowedModificationFns := []func(mSI){ // Add an optional field func(v mSI) { x(v, "optional")["unexpected"] = 1 }, } for _, fn := range allowedModificationFns { testJSON := modifiedUntrustedSignatureJSON(t, validJSON, fn) s := succesfullyUnmarshalUntrustedSignature(t, schemaLoader, testJSON) assert.Equal(t, validSig, s) } // Optional fields can be missing validSig = untrustedSignature{ UntrustedDockerManifestDigest: "digest!@#", UntrustedDockerReference: "reference#@!", UntrustedCreatorID: nil, UntrustedTimestamp: nil, } validJSON, err = validSig.MarshalJSON() require.NoError(t, err) s = succesfullyUnmarshalUntrustedSignature(t, schemaLoader, validJSON) assert.Equal(t, validSig, s) } func TestSign(t *testing.T) { mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) require.NoError(t, err) defer mech.Close() if err := mech.SupportsSigning(); err != nil { t.Skipf("Signing not supported: %v", err) } sig := newUntrustedSignature("digest!@#", "reference#@!") // Successful signing signature, err := sig.sign(mech, TestKeyFingerprint) require.NoError(t, err) verified, err := verifyAndExtractSignature(mech, signature, signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { if keyIdentity != TestKeyFingerprint { return errors.Errorf("Unexpected keyIdentity") } return nil }, validateSignedDockerReference: func(signedDockerReference string) error { if signedDockerReference != sig.UntrustedDockerReference { return errors.Errorf("Unexpected signedDockerReference") } return nil }, validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { if signedDockerManifestDigest != sig.UntrustedDockerManifestDigest { return errors.Errorf("Unexpected signedDockerManifestDigest") } return nil }, }) require.NoError(t, err) assert.Equal(t, sig.UntrustedDockerManifestDigest, verified.DockerManifestDigest) assert.Equal(t, sig.UntrustedDockerReference, verified.DockerReference) // Error creating blob to sign _, err = untrustedSignature{}.sign(mech, TestKeyFingerprint) assert.Error(t, err) // Error signing _, err = sig.sign(mech, "this fingerprint doesn't exist") assert.Error(t, err) } func TestVerifyAndExtractSignature(t *testing.T) { mech, err := newGPGSigningMechanismInDirectory(testGPGHomeDirectory) require.NoError(t, err) defer mech.Close() type triple struct { keyIdentity string signedDockerReference string signedDockerManifestDigest digest.Digest } var wanted, recorded triple // recordingRules are a plausible signatureAcceptanceRules implementations, but equally // importantly record that we are passing the correct values to the rule callbacks. recordingRules := signatureAcceptanceRules{ validateKeyIdentity: func(keyIdentity string) error { recorded.keyIdentity = keyIdentity if keyIdentity != wanted.keyIdentity { return errors.Errorf("keyIdentity mismatch") } return nil }, validateSignedDockerReference: func(signedDockerReference string) error { recorded.signedDockerReference = signedDockerReference if signedDockerReference != wanted.signedDockerReference { return errors.Errorf("signedDockerReference mismatch") } return nil }, validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { recorded.signedDockerManifestDigest = signedDockerManifestDigest if signedDockerManifestDigest != wanted.signedDockerManifestDigest { return errors.Errorf("signedDockerManifestDigest mismatch") } return nil }, } signature, err := ioutil.ReadFile("./fixtures/image.signature") require.NoError(t, err) signatureData := triple{ keyIdentity: TestKeyFingerprint, signedDockerReference: TestImageSignatureReference, signedDockerManifestDigest: TestImageManifestDigest, } // Successful verification wanted = signatureData recorded = triple{} sig, err := verifyAndExtractSignature(mech, signature, recordingRules) require.NoError(t, err) assert.Equal(t, TestImageSignatureReference, sig.DockerReference) assert.Equal(t, TestImageManifestDigest, sig.DockerManifestDigest) assert.Equal(t, signatureData, recorded) // For extra paranoia, test that we return a nil signature object on error. // Completely invalid signature. recorded = triple{} sig, err = verifyAndExtractSignature(mech, []byte{}, recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, triple{}, recorded) recorded = triple{} sig, err = verifyAndExtractSignature(mech, []byte("invalid signature"), recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, triple{}, recorded) // Valid signature of non-JSON: asked for keyIdentity, only invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature") require.NoError(t, err) recorded = triple{} sig, err = verifyAndExtractSignature(mech, invalidBlobSignature, recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded) // Valid signature with a wrong key: asked for keyIdentity, only wanted = signatureData wanted.keyIdentity = "unexpected fingerprint" recorded = triple{} sig, err = verifyAndExtractSignature(mech, signature, recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, triple{keyIdentity: signatureData.keyIdentity}, recorded) // Valid signature with a wrong manifest digest: asked for keyIdentity and signedDockerManifestDigest wanted = signatureData wanted.signedDockerManifestDigest = "invalid digest" recorded = triple{} sig, err = verifyAndExtractSignature(mech, signature, recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, triple{ keyIdentity: signatureData.keyIdentity, signedDockerManifestDigest: signatureData.signedDockerManifestDigest, }, recorded) // Valid signature with a wrong image reference wanted = signatureData wanted.signedDockerReference = "unexpected docker reference" recorded = triple{} sig, err = verifyAndExtractSignature(mech, signature, recordingRules) assert.Error(t, err) assert.Nil(t, sig) assert.Equal(t, signatureData, recorded) } func TestGetUntrustedSignatureInformationWithoutVerifying(t *testing.T) { signature, err := ioutil.ReadFile("./fixtures/image.signature") require.NoError(t, err) // Successful parsing, all optional fields present info, err := GetUntrustedSignatureInformationWithoutVerifying(signature) require.NoError(t, err) assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference) assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest) assert.NotNil(t, info.UntrustedCreatorID) assert.Equal(t, "atomic ", *info.UntrustedCreatorID) assert.NotNil(t, info.UntrustedTimestamp) assert.Equal(t, time.Unix(1458239713, 0), *info.UntrustedTimestamp) assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier) // Successful parsing, no optional fields present signature, err = ioutil.ReadFile("./fixtures/no-optional-fields.signature") require.NoError(t, err) // Successful parsing info, err = GetUntrustedSignatureInformationWithoutVerifying(signature) require.NoError(t, err) assert.Equal(t, TestImageSignatureReference, info.UntrustedDockerReference) assert.Equal(t, TestImageManifestDigest, info.UntrustedDockerManifestDigest) assert.Nil(t, info.UntrustedCreatorID) assert.Nil(t, info.UntrustedTimestamp) assert.Equal(t, TestKeyShortID, info.UntrustedShortKeyIdentifier) // Completely invalid signature. _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte{}) assert.Error(t, err) _, err = GetUntrustedSignatureInformationWithoutVerifying([]byte("invalid signature")) assert.Error(t, err) // Valid signature of non-JSON invalidBlobSignature, err := ioutil.ReadFile("./fixtures/invalid-blob.signature") require.NoError(t, err) _, err = GetUntrustedSignatureInformationWithoutVerifying(invalidBlobSignature) assert.Error(t, err) } image-4.0.1/storage/000077500000000000000000000000001354546467100142615ustar00rootroot00000000000000image-4.0.1/storage/storage_image.go000066400000000000000000001151671354546467100174310ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "bytes" "context" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "sync" "sync/atomic" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/internal/tmpdir" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/pkg/blobinfocache/none" "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. ErrBlobDigestMismatch = errors.New("blob digest mismatch") // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") // ErrNoManifestLists is returned when GetManifest() is called. // with a non-nil instanceDigest. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. ErrNoSuchImage = storage.ErrNotAnImage ) type storageImageSource struct { imageRef storageReference image *storage.Image layerPosition map[digest.Digest]int // Where we are in reading a blob's layers cachedManifest []byte // A cached copy of the manifest, if already known, or nil getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary signatures []byte // Signature contents, temporary putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageCloser struct { types.ImageCloser size int64 } // manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions. // If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably; // for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey func manifestBigDataKey(digest digest.Digest) string { return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String() } // newImageSource sets up an image for reading. func newImageSource(imageRef storageReference) (*storageImageSource, error) { // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } // Build the reader object. image := &storageImageSource{ imageRef: imageRef, image: img, layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } if img.Metadata != "" { if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { return nil, errors.Wrap(err, "error decoding metadata for source image") } } return image, nil } // Reference returns the image reference that we used to find this image. func (s *storageImageSource) Reference() types.ImageReference { return s.imageRef } // Close cleans up any resources we tied up while reading the image. func (s *storageImageSource) Close() error { return nil } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (s *storageImageSource) HasThreadSafeGetBlob() bool { return true } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { if info.Digest == image.GzippedEmptyLayerDigest { return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil } rc, n, _, err = s.getBlobAndLayerID(info) return rc, n, err } // getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { var layer storage.Layer var diffOptions *storage.DiffOptions // We need a valid digest value. err = info.Digest.Validate() if err != nil { return nil, -1, "", err } // Check if the blob corresponds to a diff that was used to initialize any layers. Our // callers should try to retrieve layers using their uncompressed digests, so no need to // check if they're using one of the compressed digests, which we can't reproduce anyway. layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) // If it's not a layer, then it must be a data item. if len(layers) == 0 { b, err := s.imageRef.transport.store.ImageBigData(s.image.ID, info.Digest.String()) if err != nil { return nil, -1, "", err } r := bytes.NewReader(b) logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) return ioutil.NopCloser(r), int64(r.Len()), "", nil } // Step through the list of matching layers. Tests may want to verify that if we have multiple layers // which claim to have the same contents, that we actually do have multiple layers, otherwise we could // just go ahead and use the first one every time. s.getBlobMutex.Lock() i := s.layerPosition[info.Digest] s.layerPosition[info.Digest] = i + 1 s.getBlobMutex.Unlock() if len(layers) > 0 { layer = layers[i%len(layers)] } // Force the storage layer to not try to match any compression that was used when the layer was first // handed to it. noCompression := archive.Uncompressed diffOptions = &storage.DiffOptions{ Compression: &noCompression, } if layer.UncompressedSize < 0 { n = -1 } else { n = layer.UncompressedSize } logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err != nil { return nil, -1, "", err } return rc, n, layer.ID, err } // GetManifest() reads the image's manifest. func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { if instanceDigest != nil { return nil, "", ErrNoManifestLists } if len(s.cachedManifest) == 0 { // The manifest is stored as a big data item. // Prefer the manifest corresponding to the user-specified digest, if available. if s.imageRef.named != nil { if digested, ok := s.imageRef.named.(reference.Digested); ok { key := manifestBigDataKey(digested.Digest()) blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key) if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key return nil, "", err } if err == nil { s.cachedManifest = blob } } } // If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest. // Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest(). if len(s.cachedManifest) == 0 { cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey) if err != nil { return nil, "", err } s.cachedManifest = cachedBlob } } return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err } // LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of // the image, after they've been decompressed. func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { manifestBlob, manifestType, err := s.GetManifest(ctx, nil) if err != nil { return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID) } man, err := manifest.FromBlob(manifestBlob, manifestType) if err != nil { return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID) } uncompressedLayerType := "" switch manifestType { case imgspecv1.MediaTypeImageManifest: uncompressedLayerType = imgspecv1.MediaTypeImageLayer case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: // This is actually a compressed type, but there's no uncompressed type defined uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType } physicalBlobInfos := []types.BlobInfo{} layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) if err != nil { return nil, errors.Wrapf(err, "error reading layer %q in image %q", layerID, s.image.ID) } if layer.UncompressedDigest == "" { return nil, errors.Errorf("uncompressed digest for layer %q is unknown", layerID) } if layer.UncompressedSize < 0 { return nil, errors.Errorf("uncompressed size for layer %q is unknown", layerID) } blobInfo := types.BlobInfo{ Digest: layer.UncompressedDigest, Size: layer.UncompressedSize, MediaType: uncompressedLayerType, } physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) layerID = layer.Parent } res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) if err != nil { return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID) } return res, nil } // buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, // but using layer data which we can actually produce — physicalInfos for non-empty layers, // and image.GzippedEmptyLayer for empty ones. // (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { nextPhysical := 0 res := make([]types.BlobInfo, len(manifestInfos)) for i, mi := range manifestInfos { if mi.EmptyLayer { res[i] = types.BlobInfo{ Digest: image.GzippedEmptyLayerDigest, Size: int64(len(image.GzippedEmptyLayer)), MediaType: mi.MediaType, } } else { if nextPhysical >= len(physicalInfos) { return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) } res[i] = physicalInfos[nextPhysical] nextPhysical++ } } if nextPhysical != len(physicalInfos) { return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) } return res, nil } // GetSignatures() parses the image's signatures blob into a slice of byte slices. func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { if instanceDigest != nil { return nil, ErrNoManifestLists } var offset int sigslice := [][]byte{} signature := []byte{} if len(s.SignatureSizes) > 0 { signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, "signatures") if err != nil { return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.image.ID) } signature = signatureBlob } for _, length := range s.SignatureSizes { sigslice = append(sigslice, signature[offset:offset+length]) offset += length } if offset != len(signature) { return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) } return sigslice, nil } // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage") if err != nil { return nil, errors.Wrapf(err, "error creating a temporary directory") } image := &storageImageDestination{ imageRef: imageRef, directory: directory, blobDiffIDs: make(map[digest.Digest]digest.Digest), fileSizes: make(map[digest.Digest]int64), filenames: make(map[digest.Digest]string), SignatureSizes: []int{}, } return image, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. func (s *storageImageDestination) Reference() types.ImageReference { return s.imageRef } // Close cleans up the temporary directory. func (s *storageImageDestination) Close() error { return os.RemoveAll(s.directory) } func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression { // We ultimately have to decompress layers to populate trees on disk // and need to explicitly ask for it here, so that the layers' MIME // types can be set accordingly. return types.PreserveOriginal } func (s *storageImageDestination) computeNextBlobCacheFile() string { return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) } // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. func (s *storageImageDestination) HasThreadSafePutBlob() bool { return true } // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { // Stores a layer or data blob in our temporary directory, checking that any information // in the blobinfo matches the incoming data. errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } // Set up to digest the blob and count its size while saving it to a file. hasher := digest.Canonical.Digester() if blobinfo.Digest.Validate() == nil { if a := blobinfo.Digest.Algorithm(); a.Available() { hasher = a.Digester() } } diffID := digest.Canonical.Digester() filename := s.computeNextBlobCacheFile() file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) } defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) reader := io.TeeReader(io.TeeReader(stream, counter), file) decompressed, err := archive.DecompressStream(reader) if err != nil { return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). _, err = io.Copy(diffID.Hash(), decompressed) decompressed.Close() if err != nil { return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) } // Ensure that any information that we were given about the blob is correct. if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { return errorBlobInfo, ErrBlobDigestMismatch } if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { return errorBlobInfo, ErrBlobSizeMismatch } // Record information about the blob. s.putBlobMutex.Lock() s.blobDiffIDs[hasher.Digest()] = diffID.Digest() s.fileSizes[hasher.Digest()] = counter.Count s.filenames[hasher.Digest()] = filename s.putBlobMutex.Unlock() blobDigest := blobinfo.Digest if blobDigest.Validate() != nil { blobDigest = hasher.Digest() } blobSize := blobinfo.Size if blobSize < 0 { blobSize = counter.Count } // This is safe because we have just computed both values ourselves. cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest()) return types.BlobInfo{ Digest: blobDigest, Size: blobSize, MediaType: blobinfo.MediaType, }, nil } // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { // lock the entire method as it executes fairly quickly s.putBlobMutex.Lock() defer s.putBlobMutex.Unlock() if blobinfo.Digest == "" { return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`) } if err := blobinfo.Digest.Validate(); err != nil { return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } // Check if we've already cached it in a file. if size, ok := s.fileSizes[blobinfo.Digest]; ok { return true, types.BlobInfo{ Digest: blobinfo.Digest, Size: size, MediaType: blobinfo.MediaType, }, nil } // Check if we have a wasn't-compressed layer in storage that's based on that blob. layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { // Save this for completeness. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, types.BlobInfo{ Digest: blobinfo.Digest, Size: layers[0].UncompressedSize, MediaType: blobinfo.MediaType, }, nil } // Check if we have a was-compressed layer in storage that's based on that blob. layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) } if len(layers) > 0 { // Record the uncompressed value so that we can use it to calculate layer IDs. s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, types.BlobInfo{ Digest: blobinfo.Digest, Size: layers[0].CompressedSize, MediaType: blobinfo.MediaType, }, nil } // Does the blob correspond to a known DiffID which we already have available? // Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the // uncompressed layer, and that can happen only if canSubstitute, or if the incoming manifest already specifies the size. if canSubstitute || blobinfo.Size != -1 { if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest { layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest) } if len(layers) > 0 { if blobinfo.Size != -1 { s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest return true, blobinfo, nil } if !canSubstitute { return false, types.BlobInfo{}, fmt.Errorf("Internal error: canSubstitute was expected to be true for blobInfo %v", blobinfo) } s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest return true, types.BlobInfo{ Digest: uncompressedDigest, Size: layers[0].UncompressedSize, MediaType: blobinfo.MediaType, }, nil } } } // Nope, we don't have it. return false, types.BlobInfo{}, nil } // computeID computes a recommended image ID based on information we have so far. If // the manifest is not of a type that we recognize, we return an empty value, indicating // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. func (s *storageImageDestination) computeID(m manifest.Manifest) string { // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. var diffIDs []digest.Digest switch m := m.(type) { case *manifest.Schema1: // Build a list of the diffIDs we've generated for the non-throwaway FS layers, // in reverse of the order in which they were originally listed. for i, compat := range m.ExtractedV1Compatibility { if compat.ThrowAway { continue } blobSum := m.FSLayers[i].BlobSum diffID, ok := s.blobDiffIDs[blobSum] if !ok { logrus.Infof("error looking up diffID for layer %q", blobSum.String()) return "" } diffIDs = append([]digest.Digest{diffID}, diffIDs...) } case *manifest.Schema2, *manifest.OCI1: // We know the ID calculation for these formats doesn't actually use the diffIDs, // so we don't need to populate the diffID list. default: return "" } id, err := m.ImageID(diffIDs) if err != nil { return "" } return id } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig // information out of it for Inspect(). func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { if info.Digest == "" { return nil, errors.Errorf(`no digest supplied when reading blob`) } if err := info.Digest.Validate(); err != nil { return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) } // Assume it's a file, since we're only calling this from a place that expects to read files. if filename, ok := s.filenames[info.Digest]; ok { contents, err2 := ioutil.ReadFile(filename) if err2 != nil { return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) } return contents, nil } // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. return nil, errors.New("blob not found") } func (s *storageImageDestination) Commit(ctx context.Context) error { // Find the list of layer blobs. if len(s.manifest) == 0 { return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) if err != nil { return errors.Wrapf(err, "error parsing manifest") } layerBlobs := man.LayerInfos() // Extract or find the layers. lastLayer := "" for _, blob := range layerBlobs { if blob.EmptyLayer { continue } // Check if there's already a layer with the ID that we'd give to the result of applying // this layer blob to its parent, if it has one, or the blob's hex value otherwise. diffID, haveDiffID := s.blobDiffIDs[blob.Digest] if !haveDiffID { // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), // or to even check if we had it. // Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller // that relies on using a blob digest that has never been seeen by the store had better call // TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only // so far we are going to accommodate that (if we should be doing that at all). logrus.Debugf("looking for diffID for blob %+v", blob.Digest) has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false) if err != nil { return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) } if !has { return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) } diffID, haveDiffID = s.blobDiffIDs[blob.Digest] if !haveDiffID { return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) } } id := diffID.Hex() if lastLayer != "" { id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. lastLayer = layer.ID continue } // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. filename, ok := s.filenames[blob.Digest] if !ok { // Try to find the layer with contents matching that blobsum. layer := "" layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID) if err2 == nil && len(layers) > 0 { layer = layers[0].ID } else { layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) if err2 == nil && len(layers) > 0 { layer = layers[0].ID } } if layer == "" { return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) } // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ Compression: &noCompression, } diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions) if err2 != nil { return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants // the same lock, so the diff can't just be directly streamed from one // to the other. filename = s.computeNextBlobCacheFile() file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) if err != nil { diff.Close() return errors.Wrapf(err, "error creating temporary file %q", filename) } // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using // ctx.Done(). _, err = io.Copy(file, diff) diff.Close() file.Close() if err != nil { return errors.Wrapf(err, "error storing blob to file %q", filename) } // Make sure that we can find this file later, should we need the layer's // contents again. s.filenames[blob.Digest] = filename } // Read the cached blob and use it as a diff. file, err := os.Open(filename) if err != nil { return errors.Wrapf(err, "error opening file %q", filename) } defer file.Close() // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file) if err != nil && errors.Cause(err) != storage.ErrDuplicateID { return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) } lastLayer = layer.ID } // If one of those blobs was a configuration blob, then we can try to dig out the date when the image // was originally created, in case we're just copying it. If not, no harm done. options := &storage.ImageOptions{} if inspect, err := man.Inspect(s.getConfigBlob); err == nil && inspect.Created != nil { logrus.Debugf("setting image creation date to %s", inspect.Created) options.CreationDate = *inspect.Created } // Create the image record, pointing to the most-recently added layer. intendedID := s.imageRef.id if intendedID == "" { intendedID = s.computeID(man) } oldNames := []string{} img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) return errors.Wrapf(err, "error creating image %q", intendedID) } img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so // we just need to screen out the ones that are actually layers to get the list of non-layers. dataBlobs := make(map[digest.Digest]struct{}) for blob := range s.filenames { dataBlobs[blob] = struct{}{} } for _, layerBlob := range layerBlobs { delete(dataBlobs, layerBlob.Digest) } for blob := range dataBlobs { v, err := ioutil.ReadFile(s.filenames[blob]) if err != nil { return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) } if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) } } // Set the reference's name on the image. if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { names := []string{} if name != nil { names = append(names, name.String()) } if len(oldNames) > 0 { names = append(names, oldNames...) } if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } logrus.Debugf("set names of image %q to %v", img.ID, names) } // Save the manifest. Allow looking it up by digest by using the key convention defined by the Store. // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. manifestDigest, err := manifest.Digest(s.manifest) if err != nil { return errors.Wrapf(err, "error computing manifest digest") } if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) return err } if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) return err } // Save the signatures, if we have any. if len(s.signatures) > 0 { if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) return err } } // Save our metadata. metadata, err := json.Marshal(s) if err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) return err } if len(metadata) != 0 { if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) return err } logrus.Debugf("saved image metadata %q", string(metadata)) } return nil } var manifestMIMETypes = []string{ imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, } func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } // PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error { if s.imageRef.named != nil { if digested, ok := s.imageRef.named.(reference.Digested); ok { matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest()) if err != nil { return err } if !matches { return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest()) } } } s.manifest = make([]byte, len(manifestBlob)) copy(s.manifest, manifestBlob) return nil } // SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was // previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures(ctx context.Context) error { return nil } // AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false } // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } // IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool { return true // Yes, we want the unmodified manifest } // PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error { sizes := []int{} sigblob := []byte{} for _, sig := range signatures { sizes = append(sizes, len(sig)) newblob := make([]byte, len(sigblob)+len(sig)) copy(newblob, sigblob) copy(newblob[len(sigblob):], sig) sigblob = newblob } s.signatures = sigblob s.SignatureSizes = sizes return nil } // getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the // signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) getSize() (int64, error) { var sum int64 // Size up the data blobs. dataNames, err := s.imageRef.transport.store.ListImageBigData(s.image.ID) if err != nil { return -1, errors.Wrapf(err, "error reading image %q", s.image.ID) } for _, dataName := range dataNames { bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.image.ID, dataName) if err != nil { return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.image.ID) } sum += bigSize } // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } // Walk the layer list. layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) if err != nil { return -1, err } if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) } sum += layer.UncompressedSize if layer.Parent == "" { break } layerID = layer.Parent } return sum, nil } // Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the // signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) Size() (int64, error) { return s.getSize() } // Size() returns the previously-computed size of the image, with no error. func (s *storageImageCloser) Size() (int64, error) { return s.size, nil } // newImage creates an image that also knows its size func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) { src, err := newImageSource(s) if err != nil { return nil, err } img, err := image.FromSource(ctx, sys, src) if err != nil { return nil, err } size, err := src.getSize() if err != nil { return nil, err } return &storageImageCloser{ImageCloser: img, size: size}, nil } image-4.0.1/storage/storage_image_test.go000066400000000000000000000044571354546467100204670ustar00rootroot00000000000000package storage import ( "testing" "github.com/containers/image/v4/manifest" "github.com/containers/image/v4/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestBuildLayerInfosForCopy(t *testing.T) { manifestInfos := []manifest.LayerInfo{ {BlobInfo: types.BlobInfo{Digest: "sha256:6a5a5368e0c2d3e5909184fa28ddfd56072e7ff3ee9a945876f7eee5896ef5bb", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Size: -1}, EmptyLayer: true}, {BlobInfo: types.BlobInfo{Digest: "sha256:1bbf5d58d24c47512e234a5623474acf65ae00d4d1414272a893204f44cc680c", Size: -1}, EmptyLayer: false}, {BlobInfo: types.BlobInfo{Digest: "sha256:5555555555555555555555555555555555555555555555555555555555555555", Size: -1}, EmptyLayer: true}, } physicalInfos := []types.BlobInfo{ {Digest: "sha256:1111111111111111111111111111111111111111111111111111111111111111", Size: 111, MediaType: manifest.DockerV2Schema2LayerMediaType}, {Digest: "sha256:2222222222222222222222222222222222222222222222222222222222222222", Size: 222, MediaType: manifest.DockerV2Schema2LayerMediaType}, } // Success res, err := buildLayerInfosForCopy(manifestInfos, physicalInfos) require.NoError(t, err) assert.Equal(t, []types.BlobInfo{ {Digest: "sha256:1111111111111111111111111111111111111111111111111111111111111111", Size: 111, MediaType: manifest.DockerV2Schema2LayerMediaType}, {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32}, {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32}, {Digest: "sha256:2222222222222222222222222222222222222222222222222222222222222222", Size: 222, MediaType: manifest.DockerV2Schema2LayerMediaType}, {Digest: "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", Size: 32}, }, res) // PhysicalInfos too short _, err = buildLayerInfosForCopy(manifestInfos, physicalInfos[:len(physicalInfos)-1]) assert.Error(t, err) // PhysicalInfos too long _, err = buildLayerInfosForCopy(manifestInfos, append(physicalInfos, physicalInfos[0])) assert.Error(t, err) } image-4.0.1/storage/storage_reference.go000066400000000000000000000174341354546467100203030ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "context" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte // value hex-encoded into a 64-character string, and a reference to a Store // where an image is, or would be, kept. // Either "named" or "id" must be set. type storageReference struct { transport storageTransport named reference.Named // may include a tag and/or a digest id string } func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) { if named == nil && id == "" { return nil, ErrInvalidReference } // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by // further calls to the original transport's SetStore() method. return &storageReference{ transport: transport, named: named, id: id, }, nil } // imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref func imageMatchesRepo(image *storage.Image, ref reference.Named) bool { repo := ref.Name() for _, name := range image.Names { if named, err := reference.ParseNormalizedNamed(name); err == nil { if named.Name() == repo { return true } } } return false } // Resolve the reference's name to an image ID in the store, if there's already // one present with the same name or ID, and return the image. func (s *storageReference) resolveImage() (*storage.Image, error) { var loadedImage *storage.Image if s.id == "" && s.named != nil { // Look for an image that has the expanded reference name as an explicit Name value. image, err := s.transport.store.Image(s.named.String()) if image != nil && err == nil { loadedImage = image s.id = image.ID } } if s.id == "" && s.named != nil { if digested, ok := s.named.(reference.Digested); ok { // Look for an image with the specified digest that has the same name, // though possibly with a different tag or digest, as a Name value, so // that the canonical reference can be implicitly resolved to the image. images, err := s.transport.store.ImagesByDigest(digested.Digest()) if err == nil && len(images) > 0 { for _, image := range images { if imageMatchesRepo(image, s.named) { loadedImage = image s.id = image.ID break } } } } } if s.id == "" { logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) } if loadedImage == nil { img, err := s.transport.store.Image(s.id) if err != nil { return nil, errors.Wrapf(err, "error reading image %q", s.id) } loadedImage = img } if s.named != nil { if !imageMatchesRepo(loadedImage, s.named) { logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) return nil, ErrNoSuchImage } } // Default to having the image digest that we hand back match the most recently // added manifest... if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok { loadedImage.Digest = digest } // ... unless the named reference says otherwise, and it matches one of the digests // in the image. For those cases, set the Digest field to that value, for the // sake of older consumers that don't know there's a whole list in there now. if s.named != nil { if digested, ok := s.named.(reference.Digested); ok { for _, digest := range loadedImage.Digests { if digest == digested.Digest() { loadedImage.Digest = digest break } } } } return loadedImage, nil } // Return a Transport object that defaults to using the same store that we used // to build this reference object. func (s storageReference) Transport() types.ImageTransport { return &storageTransport{ store: s.transport.store, defaultUIDMap: s.transport.defaultUIDMap, defaultGIDMap: s.transport.defaultGIDMap, } } // Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { return s.named } // Return a name with a tag, prefixed with the graph root and driver name, to // disambiguate between images which may be present in multiple stores and // share only their names. func (s storageReference) StringWithinTransport() string { optionsList := "" options := s.transport.store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" if s.named != nil { res = res + s.named.String() } if s.id != "" { res = res + "@" + s.id } return res } func (s storageReference) PolicyConfigurationIdentity() string { res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" if s.named != nil { res = res + s.named.String() } if s.id != "" { res = res + "@" + s.id } return res } // Also accept policy that's tied to the combination of the graph root and // driver name, to apply to all images stored in the Store, and to just the // graph root, in case we're using multiple drivers in the same directory for // some reason. func (s storageReference) PolicyConfigurationNamespaces() []string { storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.named != nil { if s.id != "" { // The reference without the ID is also a valid namespace. namespaces = append(namespaces, storeSpec+s.named.String()) } tagged, isTagged := s.named.(reference.Tagged) _, isDigested := s.named.(reference.Digested) if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace. namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag()) } components := strings.Split(s.named.Name(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] } } namespaces = append(namespaces, storeSpec) namespaces = append(namespaces, driverlessStoreSpec) return namespaces } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (s storageReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { return newImage(ctx, sys, s) } func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { img, err := s.resolveImage() if err != nil { return err } layers, err := s.transport.store.DeleteImage(img.ID, true) if err == nil { logrus.Debugf("deleted image %q", img.ID) for _, layer := range layers { logrus.Debugf("deleted layer %q", layer) } } return err } func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { return newImageSource(s) } func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return newImageDestination(s) } image-4.0.1/storage/storage_reference_test.go000066400000000000000000000120451354546467100213330ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "fmt" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestNewReference(t *testing.T) { newStore(t) st, ok := Transport.(*storageTransport) require.True(t, ok) // Success is tested throughout; test only the failure _, err := newReference(*st, nil, "") assert.Error(t, err) } func TestStorageReferenceTransport(t *testing.T) { newStore(t) ref, err := Transport.ParseReference("busybox") require.NoError(t, err) transport := ref.Transport() st, ok := transport.(*storageTransport) require.True(t, ok) assert.Equal(t, *(Transport.(*storageTransport)), *st) } // A common list of reference formats to test for the various ImageReference methods. var validReferenceTestCases = []struct { input, dockerRef, canonical string namespaces []string }{ { "busybox", "docker.io/library/busybox:latest", "docker.io/library/busybox:latest", []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest", "example.com/myns/ns2/busybox:notlatest", []string{"example.com/myns/ns2/busybox", "example.com/myns/ns2", "example.com/myns", "example.com"}, }, { "@" + sha256digestHex, "", "@" + sha256digestHex, []string{}, }, { "busybox@" + sha256digestHex, "docker.io/library/busybox:latest", "docker.io/library/busybox:latest@" + sha256digestHex, []string{"docker.io/library/busybox:latest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, []string{"docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest@" + sha256digestHex, []string{"docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "busybox:notlatest@sha256:" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, []string{"docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, "docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, []string{"docker.io/library/busybox@" + sha256Digest2, "docker.io/library/busybox", "docker.io/library", "docker.io"}, }, { "busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex, []string{"docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest", "docker.io/library/busybox", "docker.io/library", "docker.io"}, }, } func TestStorageReferenceDockerReference(t *testing.T) { newStore(t) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(c.input) require.NoError(t, err, c.input) if c.dockerRef != "" { dr := ref.DockerReference() require.NotNil(t, dr, c.input) assert.Equal(t, c.dockerRef, dr.String(), c.input) } else { dr := ref.DockerReference() assert.Nil(t, dr, c.input) } } } func TestStorageReferenceStringWithinTransport(t *testing.T) { store := newStore(t) optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := fmt.Sprintf("[%s@%s+%s%s]", store.GraphDriverName(), store.GraphRoot(), store.RunRoot(), optionsList) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(c.input) require.NoError(t, err, c.input) assert.Equal(t, storeSpec+c.canonical, ref.StringWithinTransport(), c.input) } } func TestStorageReferencePolicyConfigurationIdentity(t *testing.T) { store := newStore(t) storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot()) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(c.input) require.NoError(t, err, c.input) assert.Equal(t, storeSpec+c.canonical, ref.PolicyConfigurationIdentity(), c.input) } } func TestStorageReferencePolicyConfigurationNamespaces(t *testing.T) { store := newStore(t) storeSpec := fmt.Sprintf("[%s@%s]", store.GraphDriverName(), store.GraphRoot()) for _, c := range validReferenceTestCases { ref, err := Transport.ParseReference(c.input) require.NoError(t, err, c.input) expectedNS := []string{} for _, ns := range c.namespaces { expectedNS = append(expectedNS, storeSpec+ns) } expectedNS = append(expectedNS, storeSpec) expectedNS = append(expectedNS, fmt.Sprintf("[%s]", store.GraphRoot())) assert.Equal(t, expectedNS, ref.PolicyConfigurationNamespaces(), c.input) } } // NewImage, NewImageSource, NewImageDestination, DeleteImage tested in storage_test.go image-4.0.1/storage/storage_test.go000066400000000000000000001037171354546467100173240ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "archive/tar" "bytes" "context" "crypto/rand" "crypto/sha256" "flag" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "testing" "time" "github.com/containers/image/v4/pkg/blobinfocache/memory" "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/reexec" ddigest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) var ( _imgd types.ImageDestination = &storageImageDestination{} _imgs types.ImageSource = &storageImageSource{} _ref types.ImageReference = &storageReference{} _transport types.ImageTransport = &storageTransport{} topwd = "" ) const ( layerSize = 12345 ) func TestMain(m *testing.M) { if reexec.Init() { return } wd, err := ioutil.TempDir("", "test.") if err != nil { os.Exit(1) } topwd = wd debug := false flag.BoolVar(&debug, "debug", false, "print debug statements") flag.Parse() if debug { logrus.SetLevel(logrus.DebugLevel) } code := m.Run() os.RemoveAll(wd) os.Exit(code) } func newStoreWithGraphDriverOptions(t *testing.T, options []string) storage.Store { wd, err := ioutil.TempDir(topwd, "test.") if err != nil { t.Fatal(err) } err = os.MkdirAll(wd, 0700) if err != nil { t.Fatal(err) } run := filepath.Join(wd, "run") root := filepath.Join(wd, "root") Transport.SetDefaultUIDMap([]idtools.IDMap{{ ContainerID: 0, HostID: os.Getuid(), Size: 1, }}) Transport.SetDefaultGIDMap([]idtools.IDMap{{ ContainerID: 0, HostID: os.Getgid(), Size: 1, }}) store, err := storage.GetStore(storage.StoreOptions{ RunRoot: run, GraphRoot: root, GraphDriverName: "vfs", GraphDriverOptions: options, UIDMap: Transport.DefaultUIDMap(), GIDMap: Transport.DefaultGIDMap(), }) if err != nil { t.Fatal(err) } Transport.SetStore(store) return store } func newStore(t *testing.T) storage.Store { return newStoreWithGraphDriverOptions(t, []string{}) } func TestParse(t *testing.T) { store := newStore(t) ref, err := Transport.ParseReference("test") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } ref, err = Transport.ParseStoreReference(store, "test") if err != nil { t.Fatalf("ParseStoreReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseStoreReference(%q) returned nil reference", "test") } strRef := ref.StringWithinTransport() ref, err = Transport.ParseReference(strRef) if err != nil { t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) } if ref == nil { t.Fatalf("ParseReference(%q) returned nil reference", strRef) } transport := storageTransport{ store: store, defaultUIDMap: Transport.(*storageTransport).defaultUIDMap, defaultGIDMap: Transport.(*storageTransport).defaultGIDMap, } _references := []storageReference{ { named: ref.(*storageReference).named, id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", transport: transport, }, { named: ref.(*storageReference).named, transport: transport, }, { id: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", transport: transport, }, { named: ref.DockerReference(), transport: transport, }, } for _, reference := range _references { s := reference.StringWithinTransport() ref, err := Transport.ParseStoreReference(store, s) if err != nil { t.Fatalf("ParseReference(%q) returned error: %v", strRef, err) } if ref.id != reference.id { t.Fatalf("ParseReference(%q) failed to extract ID", s) } if reference.named == nil { if ref.named != nil { t.Fatalf("ParseReference(%q) set non-nil named", s) } } else { if ref.named.String() != reference.named.String() { t.Fatalf("ParseReference(%q) failed to extract reference (%q!=%q)", s, ref.named.String(), reference.named.String()) } } } } func TestParseWithGraphDriverOptions(t *testing.T) { optionLists := [][]string{ {}, {"vfs.ignore_chown_errors=true"}, {"vfs.ignore_chown_errors=false"}, } for _, optionList := range optionLists { store := newStoreWithGraphDriverOptions(t, optionList) ref, err := Transport.ParseStoreReference(store, "test") if err != nil { t.Fatalf("ParseStoreReference(%q, graph driver options %v) returned error %v", "test", optionList, err) } if ref == nil { t.Fatalf("ParseStoreReference returned nil reference") } spec := ref.StringWithinTransport() ref2, err := Transport.ParseReference(spec) if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } sref, ok := ref2.(*storageReference) if !ok { t.Fatalf("ParseReference returned a reference from transport %s, not one of ours", ref2.Transport().Name()) } parsedOptions := sref.transport.store.GraphOptions() if len(parsedOptions) != len(optionList) { t.Fatalf("Lost options between %v and %v", optionList, parsedOptions) } for i := range optionList { if parsedOptions[i] != optionList[i] { t.Fatalf("Mismatched option %d: %v and %v", i, optionList[i], parsedOptions[i]) } } } } func systemContext() *types.SystemContext { return &types.SystemContext{} } func makeLayer(t *testing.T, compression archive.Compression) (ddigest.Digest, int64, int64, []byte) { var cwriter io.WriteCloser var uncompressed *ioutils.WriteCounter var twriter *tar.Writer preader, pwriter := io.Pipe() tbuffer := bytes.Buffer{} if compression != archive.Uncompressed { compressor, err := archive.CompressStream(pwriter, compression) if err != nil { t.Fatalf("Error compressing layer: %v", err) } cwriter = compressor uncompressed = ioutils.NewWriteCounter(cwriter) } else { uncompressed = ioutils.NewWriteCounter(pwriter) } twriter = tar.NewWriter(uncompressed) buf := make([]byte, layerSize) n, err := rand.Read(buf) if err != nil { t.Fatalf("Error reading tar data: %v", err) } if n != len(buf) { t.Fatalf("Short read reading tar data: %d < %d", n, len(buf)) } for i := 1024; i < 2048; i++ { buf[i] = 0 } go func() { defer pwriter.Close() if cwriter != nil { defer cwriter.Close() } defer twriter.Close() err := twriter.WriteHeader(&tar.Header{ Name: "/random-single-file", Mode: 0600, Size: int64(len(buf)), ModTime: time.Now(), AccessTime: time.Now(), ChangeTime: time.Now(), Typeflag: tar.TypeReg, }) if err != nil { t.Fatalf("Error writing tar header: %v", err) } n, err := twriter.Write(buf) if err != nil { t.Fatalf("Error writing tar header: %v", err) } if n != len(buf) { t.Fatalf("Short write writing tar header: %d < %d", n, len(buf)) } }() _, err = io.Copy(&tbuffer, preader) if err != nil { t.Fatalf("Error reading layer tar: %v", err) } sum := ddigest.SHA256.FromBytes(tbuffer.Bytes()) return sum, uncompressed.Count, int64(tbuffer.Len()), tbuffer.Bytes() } func TestWriteRead(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestWriteRead requires root privileges") } config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` sum := ddigest.SHA256.FromBytes([]byte(config)) configInfo := types.BlobInfo{ Digest: sum, Size: int64(len(config)), } manifests := []string{ //`{ // "schemaVersion": 2, // "mediaType": "application/vnd.oci.image.manifest.v1+json", // "config": { // "mediaType": "application/vnd.oci.image.serialization.config.v1+json", // "size": %cs, // "digest": "%ch" // }, // "layers": [ // { // "mediaType": "application/vnd.oci.image.serialization.rootfs.tar.gzip", // "digest": "%lh", // "size": %ls // } // ] //}`, `{ "schemaVersion": 1, "name": "test", "tag": "latest", "architecture": "amd64", "fsLayers": [ { "blobSum": "%lh" } ], "history": [ { "v1Compatibility": "{\"id\":\"%li\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":%ls}" } ] }`, `{ "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": %cs, "digest": "%ch" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%lh", "size": %ls } ] }`, } signatures := [][]byte{ []byte("Signature A"), []byte("Signature B"), } newStore(t) ref, err := Transport.ParseReference("test") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } cache := memory.New() for _, manifestFmt := range manifests { dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) } if dest.Reference().StringWithinTransport() != ref.StringWithinTransport() { t.Fatalf("NewImageDestination(%q) changed the reference to %q", ref.StringWithinTransport(), dest.Reference().StringWithinTransport()) } t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes()) if err := dest.SupportsSignatures(context.Background()); err != nil { t.Fatalf("Destination image doesn't support signatures: %v", err) } t.Logf("compress layers: %v", dest.DesiredLayerCompression()) compression := archive.Uncompressed if dest.DesiredLayerCompression() == types.Compress { compression = archive.Gzip } digest, decompressedSize, size, blob := makeLayer(t, compression) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination: %v", err) } t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize) if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, cache, false); err != nil { t.Fatalf("Error saving config to destination: %v", err) } manifest := strings.Replace(manifestFmt, "%lh", digest.String(), -1) manifest = strings.Replace(manifest, "%ch", configInfo.Digest.String(), -1) manifest = strings.Replace(manifest, "%ls", fmt.Sprintf("%d", size), -1) manifest = strings.Replace(manifest, "%cs", fmt.Sprintf("%d", configInfo.Size), -1) li := digest.Hex() manifest = strings.Replace(manifest, "%li", li, -1) manifest = strings.Replace(manifest, "%ci", sum.Hex(), -1) t.Logf("this manifest is %q", manifest) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error saving manifest to destination: %v", err) } if err := dest.PutSignatures(context.Background(), signatures); err != nil { t.Fatalf("Error saving signatures to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination: %v", err) } dest.Close() img, err := ref.NewImage(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) } imageConfigInfo := img.ConfigInfo() if imageConfigInfo.Digest != "" { blob, err := img.ConfigBlob(context.Background()) if err != nil { t.Fatalf("image %q claimed there was a config blob, but couldn't produce it: %v", ref.StringWithinTransport(), err) } sum := ddigest.SHA256.FromBytes(blob) if sum != configInfo.Digest { t.Fatalf("image config blob digest for %q doesn't match", ref.StringWithinTransport()) } if int64(len(blob)) != configInfo.Size { t.Fatalf("image config size for %q changed from %d to %d", ref.StringWithinTransport(), configInfo.Size, len(blob)) } } layerInfos := img.LayerInfos() if layerInfos == nil { t.Fatalf("image for %q returned empty layer list", ref.StringWithinTransport()) } imageInfo, err := img.Inspect(context.Background()) if err != nil { t.Fatalf("Inspect(%q) returned error %v", ref.StringWithinTransport(), err) } if imageInfo.Created.IsZero() { t.Fatalf("Image %q claims to have been created at time 0", ref.StringWithinTransport()) } src, err := ref.NewImageSource(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) } if src == nil { t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport()) } // Note that we would strip a digest here, but not a tag. if src.Reference().StringWithinTransport() != ref.StringWithinTransport() { // As long as it's only the addition of an ID suffix, that's okay. if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") { t.Fatalf("NewImageSource(%q) changed the reference to %q", ref.StringWithinTransport(), src.Reference().StringWithinTransport()) } } _, manifestType, err := src.GetManifest(context.Background(), nil) if err != nil { t.Fatalf("GetManifest(%q) returned error %v", ref.StringWithinTransport(), err) } t.Logf("this manifest's type appears to be %q", manifestType) sum = ddigest.SHA256.FromBytes([]byte(manifest)) _, _, err = src.GetManifest(context.Background(), &sum) if err == nil { t.Fatalf("GetManifest(%q) with an instanceDigest is supposed to fail", ref.StringWithinTransport()) } sigs, err := src.GetSignatures(context.Background(), nil) if err != nil { t.Fatalf("GetSignatures(%q) returned error %v", ref.StringWithinTransport(), err) } if len(sigs) < len(signatures) { t.Fatalf("Lost %d signatures", len(signatures)-len(sigs)) } if len(sigs) > len(signatures) { t.Fatalf("Gained %d signatures", len(sigs)-len(signatures)) } for i := range sigs { if bytes.Compare(sigs[i], signatures[i]) != 0 { t.Fatalf("Signature %d was corrupted", i) } } _, err = src.GetSignatures(context.Background(), &sum) if err == nil { t.Fatalf("GetSignatures(%q) with instanceDigest is supposed to fail", ref.StringWithinTransport()) } for _, layerInfo := range layerInfos { buf := bytes.Buffer{} layer, size, err := src.GetBlob(context.Background(), layerInfo, cache) if err != nil { t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) } t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size) hasher := sha256.New() compressed := ioutils.NewWriteCounter(hasher) countedLayer := io.TeeReader(layer, compressed) decompressed, err := archive.DecompressStream(countedLayer) if err != nil { t.Fatalf("Error decompressing layer %q from %q", layerInfo.Digest, ref.StringWithinTransport()) } n, err := io.Copy(&buf, decompressed) if layerInfo.Size >= 0 && compressed.Count != layerInfo.Size { t.Fatalf("Blob size is different than expected: %d != %d, read %d", compressed.Count, layerInfo.Size, n) } if size >= 0 && compressed.Count != size { t.Fatalf("Blob size mismatch: %d != %d, read %d", compressed.Count, size, n) } sum := hasher.Sum(nil) if ddigest.NewDigestFromBytes(ddigest.SHA256, sum) != layerInfo.Digest { t.Fatalf("Layer blob digest for %q doesn't match", ref.StringWithinTransport()) } } src.Close() img.Close() err = ref.DeleteImage(context.Background(), systemContext()) if err != nil { t.Fatalf("DeleteImage(%q) returned error %v", ref.StringWithinTransport(), err) } } } func TestDuplicateName(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestDuplicateName requires root privileges") } newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob := makeLayer(t, archive.Uncompressed) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination, first pass: %v", err) } dest.Close() dest, err = ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob = makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination, second pass: %v", err) } dest.Close() } func TestDuplicateID(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestDuplicateID requires root privileges") } newStore(t) cache := memory.New() ref, err := Transport.ParseReference("@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination, first pass: %v", err) } dest.Close() dest, err = ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob = makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); errors.Cause(err) != storage.ErrDuplicateID { if err != nil { t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) } t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") } dest.Close() } func TestDuplicateNameID(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestDuplicateNameID requires root privileges") } newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, first pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, first pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size, Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, first pass: %v", err) } manifest := fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination, first pass: %v", err) } dest.Close() dest, err = ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q, second pass) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q, second pass) returned no destination", ref.StringWithinTransport()) } digest, _, size, blob = makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: int64(size), Digest: digest, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer to destination, second pass: %v", err) } manifest = fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, digest, size) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); errors.Cause(err) != storage.ErrDuplicateID { if err != nil { t.Fatalf("Wrong error committing changes to destination, second pass: %v", err) } t.Fatal("Incorrectly succeeded committing changes to destination, second pass: no error") } dest.Close() } func TestNamespaces(t *testing.T) { newStore(t) ref, err := Transport.ParseReference("test@aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } namespaces := ref.PolicyConfigurationNamespaces() for _, namespace := range namespaces { t.Logf("namespace: %q", namespace) err = Transport.ValidatePolicyConfigurationScope(namespace) if ref == nil { t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) } } namespace := ref.StringWithinTransport() t.Logf("ref: %q", namespace) err = Transport.ValidatePolicyConfigurationScope(namespace) if err != nil { t.Fatalf("ValidatePolicyConfigurationScope(%q) returned error: %v", namespace, err) } for _, namespace := range []string{ "@beefee", ":miracle", ":miracle@beefee", "@beefee:miracle", } { t.Logf("invalid ref: %q", namespace) err = Transport.ValidatePolicyConfigurationScope(namespace) if err == nil { t.Fatalf("ValidatePolicyConfigurationScope(%q) should have failed", namespace) } } } func TestSize(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestSize requires root privileges") } config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` sum := ddigest.SHA256.FromBytes([]byte(config)) configInfo := types.BlobInfo{ Digest: sum, Size: int64(len(config)), } newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) } if _, err := dest.PutBlob(context.Background(), bytes.NewBufferString(config), configInfo, cache, false); err != nil { t.Fatalf("Error saving config to destination: %v", err) } digest1, usize1, size1, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size1, Digest: digest1, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination: %v", err) } digest2, usize2, size2, blob := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob), types.BlobInfo{ Size: size2, Digest: digest2, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination: %v", err) } manifest := fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": %d, "digest": "%s" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination: %v", err) } dest.Close() img, err := ref.NewImage(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) } usize, err := img.Size() if usize == -1 || err != nil { t.Fatalf("Error calculating image size: %v", err) } if int(usize) != len(config)+int(usize1)+int(usize2)+len(manifest) { t.Fatalf("Unexpected image size: %d != %d + %d + %d + %d", usize, len(config), usize1, usize2, len(manifest)) } img.Close() } func TestDuplicateBlob(t *testing.T) { if os.Geteuid() != 0 { t.Skip("TestDuplicateBlob requires root privileges") } config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}` sum := ddigest.SHA256.FromBytes([]byte(config)) configInfo := types.BlobInfo{ Digest: sum, Size: int64(len(config)), } newStore(t) cache := memory.New() ref, err := Transport.ParseReference("test") if err != nil { t.Fatalf("ParseReference(%q) returned error %v", "test", err) } if ref == nil { t.Fatalf("ParseReference returned nil reference") } dest, err := ref.NewImageDestination(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err) } if dest == nil { t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport()) } digest1, _, size1, blob1 := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob1), types.BlobInfo{ Size: size1, Digest: digest1, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination (first copy): %v", err) } digest2, _, size2, blob2 := makeLayer(t, archive.Gzip) if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob2), types.BlobInfo{ Size: size2, Digest: digest2, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination (first copy): %v", err) } if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob1), types.BlobInfo{ Size: size1, Digest: digest1, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 1 to destination (second copy): %v", err) } if _, err := dest.PutBlob(context.Background(), bytes.NewBuffer(blob2), types.BlobInfo{ Size: size2, Digest: digest2, }, cache, false); err != nil { t.Fatalf("Error saving randomly-generated layer 2 to destination (second copy): %v", err) } manifest := fmt.Sprintf(` { "schemaVersion": 2, "mediaType": "application/vnd.docker.distribution.manifest.v2+json", "config": { "mediaType": "application/vnd.docker.container.image.v1+json", "size": %d, "digest": "%s" }, "layers": [ { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d }, { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "%s", "size": %d } ] } `, configInfo.Size, configInfo.Digest, digest1, size1, digest2, size2, digest1, size1, digest2, size2) if err := dest.PutManifest(context.Background(), []byte(manifest)); err != nil { t.Fatalf("Error storing manifest to destination: %v", err) } if err := dest.Commit(context.Background()); err != nil { t.Fatalf("Error committing changes to destination: %v", err) } dest.Close() img, err := ref.NewImage(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err) } src, err := ref.NewImageSource(context.Background(), systemContext()) if err != nil { t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err) } source, ok := src.(*storageImageSource) if !ok { t.Fatalf("ImageSource is not a storage image") } layers := []string{} layersInfo, err := img.LayerInfosForCopy(context.Background()) if err != nil { t.Fatalf("LayerInfosForCopy() returned error %v", err) } for _, layerInfo := range layersInfo { rc, _, layerID, err := source.getBlobAndLayerID(layerInfo) if err != nil { t.Fatalf("getBlobAndLayerID(%q) returned error %v", layerInfo.Digest, err) } io.Copy(ioutil.Discard, rc) rc.Close() layers = append(layers, layerID) } if len(layers) != 4 { t.Fatalf("Incorrect number of layers: %d", len(layers)) } for i, layerID := range layers { for j, otherID := range layers { if i != j && layerID == otherID { t.Fatalf("Layer IDs are not unique: %v", layers) } } } src.Close() img.Close() } image-4.0.1/storage/storage_transport.go000066400000000000000000000312061354546467100203720ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "fmt" "path/filepath" "strings" "github.com/pkg/errors" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) const ( minimumTruncatedIDLength = 3 ) func init() { transports.Register(Transport) } var ( // Transport is an ImageTransport that uses either a default // storage.Store or one that's it's explicitly told to use. Transport StoreTransport = &storageTransport{} // ErrInvalidReference is returned when ParseReference() is passed an // empty reference. ErrInvalidReference = errors.New("invalid reference") // ErrPathNotAbsolute is returned when a graph root is not an absolute // path name. ErrPathNotAbsolute = errors.New("path name is not absolute") ) // StoreTransport is an ImageTransport that uses a storage.Store to parse // references, either its own default or one that it's told to use. type StoreTransport interface { types.ImageTransport // SetStore sets the default store for this transport. SetStore(storage.Store) // GetImage retrieves the image from the transport's store that's named // by the reference. GetImage(types.ImageReference) (*storage.Image, error) // GetStoreImage retrieves the image from a specified store that's named // by the reference. GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) // ParseStoreReference parses a reference, overriding any store // specification that it may contain. ParseStoreReference(store storage.Store, reference string) (*storageReference, error) // SetDefaultUIDMap sets the default UID map to use when opening stores. SetDefaultUIDMap(idmap []idtools.IDMap) // SetDefaultGIDMap sets the default GID map to use when opening stores. SetDefaultGIDMap(idmap []idtools.IDMap) // DefaultUIDMap returns the default UID map used when opening stores. DefaultUIDMap() []idtools.IDMap // DefaultGIDMap returns the default GID map used when opening stores. DefaultGIDMap() []idtools.IDMap } type storageTransport struct { store storage.Store defaultUIDMap []idtools.IDMap defaultGIDMap []idtools.IDMap } func (s *storageTransport) Name() string { // Still haven't really settled on a name. return "containers-storage" } // SetStore sets the Store object which the Transport will use for parsing // references when information about a Store is not directly specified as part // of the reference. If one is not set, the library will attempt to initialize // one with default settings when a reference needs to be parsed. Calling // SetStore does not affect previously parsed references. func (s *storageTransport) SetStore(store storage.Store) { s.store = store } // SetDefaultUIDMap sets the default UID map to use when opening stores. func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { s.defaultUIDMap = idmap } // SetDefaultGIDMap sets the default GID map to use when opening stores. func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { s.defaultGIDMap = idmap } // DefaultUIDMap returns the default UID map used when opening stores. func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { return s.defaultUIDMap } // DefaultGIDMap returns the default GID map used when opening stores. func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { return s.defaultGIDMap } // ParseStoreReference takes a name or an ID, tries to figure out which it is // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { if ref == "" { return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref) } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } // The reference may end with an image ID. Image IDs and digests use the same "@" separator; // here we only peel away an image ID, and leave digests alone. split := strings.LastIndex(ref, "@") id := "" if split != -1 { possibleID := ref[split+1:] if possibleID == "" { return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref) } // If it looks like a digest, leave it alone for now. if _, err := digest.Parse(possibleID); err != nil { // Otherwise… if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { id = possibleID // … it is a full ID } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { // … it is a truncated version of the ID of an image that's present in local storage, // so we might as well use the expanded value. id = img.ID } else { return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID) } // We have recognized an image ID; peel it off. ref = ref[:split] } } // If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's // at least of what we guess is a reasonable minimum length, because we don't want a really short value // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") { if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { // It's a truncated version of the ID of an image that's present in local storage; // we need to expand it. id = img.ID ref = "" } } var named reference.Named // Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been // completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest.. if ref != "" { var err error named, err = reference.ParseNormalizedNamed(ref) if err != nil { return nil, errors.Wrapf(err, "error parsing named reference %q", ref) } named = reference.TagNameOnly(named) } result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) if err != nil { return nil, err } logrus.Debugf("parsed reference into %q", result.StringWithinTransport()) return result, nil } func (s *storageTransport) GetStore() (storage.Store, error) { // Return the transport's previously-set store. If we don't have one // of those, initialize one now. if s.store == nil { options, err := storage.DefaultStoreOptionsAutoDetectUID() if err != nil { return nil, err } options.UIDMap = s.defaultUIDMap options.GIDMap = s.defaultGIDMap store, err := storage.GetStore(options) if err != nil { return nil, err } s.store = store } return s.store, nil } // ParseReference takes a name and a tag or digest and/or ID // ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. // If _id_ is the ID of an image that's present in local storage, it can be truncated, and // even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it // needs to match a store that was previously initialized using // storage.GetStore(), or be enough to let the storage library fill out // the rest using knowledge that it has from elsewhere. if reference[0] == '[' { closeIndex := strings.IndexRune(reference, ']') if closeIndex < 1 { return nil, ErrInvalidReference } storeSpec := reference[1:closeIndex] reference = reference[closeIndex+1:] // Peel off a "driver@" from the start. driverInfo := "" driverSplit := strings.SplitN(storeSpec, "@", 2) if len(driverSplit) != 2 { if storeSpec == "" { return nil, ErrInvalidReference } } else { driverInfo = driverSplit[0] if driverInfo == "" { return nil, ErrInvalidReference } storeSpec = driverSplit[1] if storeSpec == "" { return nil, ErrInvalidReference } } // Peel off a ":options" from the end. var options []string optionsSplit := strings.SplitN(storeSpec, ":", 2) if len(optionsSplit) == 2 { options = strings.Split(optionsSplit[1], ",") storeSpec = optionsSplit[0] } // Peel off a "+runroot" from the new end. runRootInfo := "" runRootSplit := strings.SplitN(storeSpec, "+", 2) if len(runRootSplit) == 2 { runRootInfo = runRootSplit[1] storeSpec = runRootSplit[0] } // The rest is our graph root. rootInfo := storeSpec // Check that any paths are absolute paths. if rootInfo != "" && !filepath.IsAbs(rootInfo) { return nil, ErrPathNotAbsolute } if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { return nil, ErrPathNotAbsolute } store2, err := storage.GetStore(storage.StoreOptions{ GraphDriverName: driverInfo, GraphRoot: rootInfo, RunRoot: runRootInfo, GraphDriverOptions: options, UIDMap: s.defaultUIDMap, GIDMap: s.defaultGIDMap, }) if err != nil { return nil, err } store = store2 } else { // We didn't have a store spec, so use the default. store2, err := s.GetStore() if err != nil { return nil, err } store = store2 } return s.ParseStoreReference(store, reference) } func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() if dref != nil { if img, err := store.Image(dref.String()); err == nil { return img, nil } } if sref, ok := ref.(*storageReference); ok { tmpRef := *sref if img, err := tmpRef.resolveImage(); err == nil { return img, nil } } return nil, storage.ErrImageUnknown } func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { store, err := s.GetStore() if err != nil { return nil, err } return s.GetStoreImage(store, ref) } func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { // Check that there's a store location prefix. Values we're passed are // expected to come from PolicyConfigurationIdentity or // PolicyConfigurationNamespaces, so if there's no store location, // something's wrong. if scope[0] != '[' { return ErrInvalidReference } // Parse the store location prefix. closeIndex := strings.IndexRune(scope, ']') if closeIndex < 1 { return ErrInvalidReference } storeSpec := scope[1:closeIndex] scope = scope[closeIndex+1:] storeInfo := strings.SplitN(storeSpec, "@", 2) if len(storeInfo) == 1 && storeInfo[0] != "" { // One component: the graph root. if !filepath.IsAbs(storeInfo[0]) { return ErrPathNotAbsolute } } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { // Two components: the driver type and the graph root. if !filepath.IsAbs(storeInfo[1]) { return ErrPathNotAbsolute } } else { // Anything else: scope specified in a form we don't // recognize. return ErrInvalidReference } // That might be all of it, and that's okay. if scope == "" { return nil } fields := strings.SplitN(scope, "@", 3) switch len(fields) { case 1: // name only case 2: // name:tag@ID or name[:tag]@digest if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { if _, digestErr := digest.Parse(fields[1]); digestErr != nil { return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) } } case 3: // name[:tag]@digest@ID if _, err := digest.Parse(fields[1]); err != nil { return err } if _, err := digest.Parse("sha256:" + fields[2]); err != nil { return err } default: // Coverage: This should never happen return errors.New("Internal error: unexpected number of fields form strings.SplitN") } // As for field[0], if it is non-empty at all: // FIXME? We could be verifying the various character set and length restrictions // from docker/distribution/reference.regexp.go, but other than that there // are few semantically invalid strings. return nil } image-4.0.1/storage/storage_transport_test.go000066400000000000000000000250561354546467100214370ustar00rootroot00000000000000// +build !containers_image_storage_stub package storage import ( "fmt" "testing" "github.com/containers/image/v4/docker/reference" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) const ( sha256digestHex = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" sha256Digest2 = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" ) func TestTransportName(t *testing.T) { assert.Equal(t, "containers-storage", Transport.Name()) } func TestTransportParseStoreReference(t *testing.T) { const digest3 = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" store := newStore(t) Transport.SetStore(nil) for _, c := range []struct{ input, expectedRef, expectedID string }{ {"", "", ""}, // Empty input // Handling of the store prefix // FIXME? Should we be silently discarding input like this? {"[unterminated", "", ""}, // Unterminated store specifier {"[garbage]busybox", "docker.io/library/busybox:latest", ""}, // Store specifier is overridden by the store we pass to ParseStoreReference {"UPPERCASEISINVALID", "", ""}, // Invalid single-component name {"sha256:" + sha256digestHex, "docker.io/library/sha256:" + sha256digestHex, ""}, // Valid single-component name; the hex part is not an ID unless it has a "@" prefix, so it looks like a tag // FIXME: This test is now incorrect, this should not fail _if the image ID matches_ {sha256digestHex, "", ""}, // Invalid single-component ID; not an ID without a "@" prefix, so it's parsed as a name, but names aren't allowed to look like IDs {"@" + sha256digestHex, "", sha256digestHex}, // Valid single-component ID {"@sha256:" + sha256digestHex, "", ""}, // Invalid un-named @digest // "aaaa", either a valid image ID prefix, or a short form of docker.io/library/aaaa, untested {"sha256:ab", "docker.io/library/sha256:ab", ""}, // Valid single-component name, explicit tag {"busybox", "docker.io/library/busybox:latest", ""}, // Valid single-component name, implicit tag {"busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, explicit tag {"docker.io/library/busybox:notlatest", "docker.io/library/busybox:notlatest", ""}, // Valid single-component name, everything explicit {"UPPERCASEISINVALID@" + sha256digestHex, "", ""}, // Invalid name in name@digestOrID {"busybox@ab", "", ""}, // Invalid ID in name@digestOrID {"busybox@", "", ""}, // Empty ID in name@digestOrID {"busybox@sha256:ab", "", ""}, // Invalid digest in name@digestOrID {"busybox@sha256:" + sha256digestHex, "docker.io/library/busybox@sha256:" + sha256digestHex, ""}, // Valid name@digest, no tag {"busybox@" + sha256digestHex, "docker.io/library/busybox:latest", sha256digestHex}, // Valid name@ID, implicit tag // "busybox@aaaa", a valid image ID prefix, untested {"busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid name@ID, explicit tag {"docker.io/library/busybox:notlatest@" + sha256digestHex, "docker.io/library/busybox:notlatest", sha256digestHex}, // Valid name@ID, everything explicit {"docker.io/library/busybox:notlatest@" + sha256Digest2, "docker.io/library/busybox:notlatest@" + sha256Digest2, ""}, // Valid name:tag@digest, everything explicit {"busybox@sha256:" + sha256digestHex + "@ab", "", ""}, // Invalid ID in name@digest@ID {"busybox@ab@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID {"busybox@@" + sha256digestHex, "", ""}, // Invalid digest in name@digest@ID {"busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID {"docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, "docker.io/library/busybox@" + sha256Digest2, sha256digestHex}, // name@digest@ID, everything explicit {"docker.io/library/busybox:notlatest@sha256:" + sha256digestHex + "@" + sha256digestHex, "docker.io/library/busybox:notlatest@sha256:" + sha256digestHex, sha256digestHex}, // name:tag@digest@ID, everything explicit // "busybox@sha256:"+sha256digestHex+"@aaaa", a valid image ID prefix, untested {"busybox:notlatest@" + sha256Digest2 + "@" + digest3 + "@" + sha256digestHex, "", ""}, // name@digest@ID, with name containing another digest } { storageRef, err := Transport.ParseStoreReference(store, c.input) if c.expectedRef == "" && c.expectedID == "" { assert.Error(t, err, c.input) } else { require.NoError(t, err, c.input) assert.Equal(t, store, storageRef.transport.store, c.input) if c.expectedRef == "" { assert.Nil(t, storageRef.named, c.input) } else { dockerRef, err := reference.ParseNormalizedNamed(c.expectedRef) require.NoError(t, err) require.NotNil(t, storageRef.named, c.input) assert.Equal(t, dockerRef.String(), storageRef.named.String()) } assert.Equal(t, c.expectedID, storageRef.id, c.input) } } } func TestTransportParseReference(t *testing.T) { store := newStore(t) driver := store.GraphDriverName() root := store.GraphRoot() for _, c := range []struct{ prefix, expectedDriver, expectedRoot, expectedRunRoot string }{ {"", driver, root, ""}, // Implicit store location prefix {"[unterminated", "", "", ""}, // Unterminated store specifier {"[]", "", "", ""}, // Empty store specifier {"[relative/path]", "", "", ""}, // Non-absolute graph root path {"[" + driver + "@relative/path]", "", "", ""}, // Non-absolute graph root path {"[@" + root + "suffix2]", "", "", ""}, // Empty graph driver {"[" + driver + "@]", "", "", ""}, // Empty root path {"[thisisunknown@" + root + "suffix2]", "", "", ""}, // Unknown graph driver {"[" + root + "suffix1]", "", "", ""}, // A valid root path, but no run dir {"[" + driver + "@" + root + "suffix3+relative/path]", "", "", ""}, // Non-absolute run dir {"[" + driver + "@" + root + "suffix3+" + root + "suffix4]", driver, root + "suffix3", root + "suffix4"}, // A valid root@graph+run set {"[" + driver + "@" + root + "suffix3+" + root + "suffix4:options,options,options]", driver, root + "suffix3", root + "suffix4"}, // A valid root@graph+run+options set } { t.Logf("parsing %q", c.prefix+"busybox") ref, err := Transport.ParseReference(c.prefix + "busybox") if c.expectedDriver == "" { assert.Error(t, err, c.prefix) } else { require.NoError(t, err, c.prefix) storageRef, ok := ref.(*storageReference) require.True(t, ok, c.prefix) assert.Equal(t, c.expectedDriver, storageRef.transport.store.GraphDriverName(), c.prefix) assert.Equal(t, c.expectedRoot, storageRef.transport.store.GraphRoot(), c.prefix) if c.expectedRunRoot != "" { assert.Equal(t, c.expectedRunRoot, storageRef.transport.store.RunRoot(), c.prefix) } } } } func TestTransportValidatePolicyConfigurationScope(t *testing.T) { store := newStore(t) driver := store.GraphDriverName() root := store.GraphRoot() storeSpec := fmt.Sprintf("[%s@%s]", driver, root) // As computed in PolicyConfigurationNamespaces // Valid inputs for _, scope := range []string{ "[" + root + "suffix1]", // driverlessStoreSpec in PolicyConfigurationNamespaces "[" + driver + "@" + root + "suffix3]", // storeSpec in PolicyConfigurationNamespaces storeSpec + "@" + sha256digestHex, // ID only storeSpec + "docker.io", // Host name only storeSpec + "docker.io/library", // A repository namespace storeSpec + "docker.io/library/busybox", // A repository name storeSpec + "docker.io/library/busybox:notlatest", // name:tag storeSpec + "docker.io/library/busybox:notlatest@" + sha256digestHex, // name@ID storeSpec + "docker.io/library/busybox@" + sha256Digest2, // name@digest storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@" + sha256digestHex, // name@digest@ID storeSpec + "docker.io/library/busybox:notlatest@" + sha256Digest2, // name:tag@digest storeSpec + "docker.io/library/busybox:notlatest@" + sha256Digest2 + "@" + sha256digestHex, // name:tag@digest@ID } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.NoError(t, err, scope) } // Invalid inputs for _, scope := range []string{ "busybox", // Unprefixed reference "[unterminated", // Unterminated store specifier "[]", // Empty store specifier "[relative/path]", // Non-absolute graph root path "[" + driver + "@relative/path]", // Non-absolute graph root path // "[thisisunknown@" + root + "suffix2]", // Unknown graph driver FIXME: validate against storage.ListGraphDrivers() once that's available storeSpec + "@", // An incomplete two-component name storeSpec + "docker.io/library/busybox@sha256:ab", // Invalid digest in name@digest storeSpec + "docker.io/library/busybox@ab", // Invalid ID in name@ID storeSpec + "docker.io/library/busybox@", // Empty ID/digest in name@ID storeSpec + "docker.io/library/busybox@@" + sha256digestHex, // Empty digest in name@digest@ID storeSpec + "docker.io/library/busybox@ab@" + sha256digestHex, // Invalid digest in name@digest@ID storeSpec + "docker.io/library/busybox@sha256:ab@" + sha256digestHex, // Invalid digest in name@digest@ID storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@", // Empty ID in name@digest@ID storeSpec + "docker.io/library/busybox@" + sha256Digest2 + "@ab", // Invalid ID in name@digest@ID } { err := Transport.ValidatePolicyConfigurationScope(scope) assert.Error(t, err, scope) } } image-4.0.1/tarball/000077500000000000000000000000001354546467100142365ustar00rootroot00000000000000image-4.0.1/tarball/doc.go000066400000000000000000000026541354546467100153410ustar00rootroot00000000000000// Package tarball provides a way to generate images using one or more layer // tarballs and an optional template configuration. // // An example: // package main // // import ( // "fmt" // // cp "github.com/containers/image/v4/copy" // "github.com/containers/image/v4/tarball" // "github.com/containers/image/v4/transports/alltransports" // // imgspecv1 "github.com/containers/image/v4/transports/alltransports" // ) // // func imageFromTarball() { // src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") // // - or - // // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") // if err != nil { // panic(err) // } // updater, ok := src.(tarball.ConfigUpdater) // if !ok { // panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") // } // config := imgspecv1.Image{ // Config: imgspecv1.ImageConfig{ // Cmd: []string{"/bin/bash"}, // }, // } // annotations := make(map[string]string) // annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" // err = updater.ConfigUpdate(config, annotations) // if err != nil { // panic(err) // } // dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") // if err != nil { // panic(err) // } // err = cp.Image(nil, dest, src, nil) // if err != nil { // panic(err) // } // } package tarball image-4.0.1/tarball/tarball_reference.go000066400000000000000000000056271354546467100202360ustar00rootroot00000000000000package tarball import ( "context" "fmt" "os" "strings" "github.com/containers/image/v4/docker/reference" "github.com/containers/image/v4/image" "github.com/containers/image/v4/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ConfigUpdater is an interface that ImageReferences for "tarball" images also // implement. It can be used to set values for a configuration, and to set // image annotations which will be present in the images returned by the // reference's NewImage() or NewImageSource() methods. type ConfigUpdater interface { ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error } type tarballReference struct { transport types.ImageTransport config imgspecv1.Image annotations map[string]string filenames []string stdin []byte } // ConfigUpdate updates the image's default configuration and adds annotations // which will be visible in source images created using this reference. func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { r.config = config if r.annotations == nil { r.annotations = make(map[string]string) } for k, v := range annotations { r.annotations[k] = v } return nil } func (r *tarballReference) Transport() types.ImageTransport { return r.transport } func (r *tarballReference) StringWithinTransport() string { return strings.Join(r.filenames, ":") } func (r *tarballReference) DockerReference() reference.Named { return nil } func (r *tarballReference) PolicyConfigurationIdentity() string { return "" } func (r *tarballReference) PolicyConfigurationNamespaces() []string { return nil } // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { src, err := r.NewImageSource(ctx, sys) if err != nil { return nil, err } img, err := image.FromSource(ctx, sys, src) if err != nil { src.Close() return nil, err } return img, nil } func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { for _, filename := range r.filenames { if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { return fmt.Errorf("error removing %q: %v", filename, err) } } return nil } func (r *tarballReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) } image-4.0.1/tarball/tarball_src.go000066400000000000000000000215621354546467100170630ustar00rootroot00000000000000package tarball import ( "bytes" "context" "encoding/json" "fmt" "io" "io/ioutil" "os" "runtime" "strings" "time" "github.com/containers/image/v4/types" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) type tarballImageSource struct { reference tarballReference filenames []string diffIDs []digest.Digest diffSizes []int64 blobIDs []digest.Digest blobSizes []int64 blobTypes []string config []byte configID digest.Digest configSize int64 manifest []byte } func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { // Gather up the digests, sizes, and date information for all of the files. filenames := []string{} diffIDs := []digest.Digest{} diffSizes := []int64{} blobIDs := []digest.Digest{} blobSizes := []int64{} blobTimes := []time.Time{} blobTypes := []string{} for _, filename := range r.filenames { var file *os.File var err error var blobSize int64 var blobTime time.Time var reader io.Reader if filename == "-" { blobSize = int64(len(r.stdin)) blobTime = time.Now() reader = bytes.NewReader(r.stdin) } else { file, err = os.Open(filename) if err != nil { return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) } defer file.Close() reader = file fileinfo, err := file.Stat() if err != nil { return nil, fmt.Errorf("error reading size of %q: %v", filename, err) } blobSize = fileinfo.Size() blobTime = fileinfo.ModTime() } // Default to assuming the layer is compressed. layerType := imgspecv1.MediaTypeImageLayerGzip // Set up to digest the file as it is. blobIDdigester := digest.Canonical.Digester() reader = io.TeeReader(reader, blobIDdigester.Hash()) // Set up to digest the file after we maybe decompress it. diffIDdigester := digest.Canonical.Digester() uncompressed, err := pgzip.NewReader(reader) if err == nil { // It is compressed, so the diffID is the digest of the uncompressed version reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) } else { // It is not compressed, so the diffID and the blobID are going to be the same diffIDdigester = blobIDdigester layerType = imgspecv1.MediaTypeImageLayer uncompressed = nil } // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). n, err := io.Copy(ioutil.Discard, reader) if err != nil { return nil, fmt.Errorf("error reading %q: %v", filename, err) } if uncompressed != nil { uncompressed.Close() } // Grab our uncompressed and possibly-compressed digests and sizes. filenames = append(filenames, filename) diffIDs = append(diffIDs, diffIDdigester.Digest()) diffSizes = append(diffSizes, n) blobIDs = append(blobIDs, blobIDdigester.Digest()) blobSizes = append(blobSizes, blobSize) blobTimes = append(blobTimes, blobTime) blobTypes = append(blobTypes, layerType) } // Build the rootfs and history for the configuration blob. rootfs := imgspecv1.RootFS{ Type: "layers", DiffIDs: diffIDs, } created := time.Time{} history := []imgspecv1.History{} // Pick up the layer comment from the configuration's history list, if one is set. comment := "imported from tarball" if len(r.config.History) > 0 && r.config.History[0].Comment != "" { comment = r.config.History[0].Comment } for i := range diffIDs { createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) history = append(history, imgspecv1.History{ Created: &blobTimes[i], CreatedBy: createdBy, Comment: comment, }) // Use the mtime of the most recently modified file as the image's creation time. if created.Before(blobTimes[i]) { created = blobTimes[i] } } // Pick up other defaults from the config in the reference. config := r.config if config.Created == nil { config.Created = &created } if config.Architecture == "" { config.Architecture = runtime.GOARCH } if config.OS == "" { config.OS = runtime.GOOS } config.RootFS = rootfs config.History = history // Encode and digest the image configuration blob. configBytes, err := json.Marshal(&config) if err != nil { return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) } configID := digest.Canonical.FromBytes(configBytes) configSize := int64(len(configBytes)) // Populate a manifest with the configuration blob and the file as the single layer. layerDescriptors := []imgspecv1.Descriptor{} for i := range blobIDs { layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ Digest: blobIDs[i], Size: blobSizes[i], MediaType: blobTypes[i], }) } annotations := make(map[string]string) for k, v := range r.annotations { annotations[k] = v } manifest := imgspecv1.Manifest{ Versioned: imgspecs.Versioned{ SchemaVersion: 2, }, Config: imgspecv1.Descriptor{ Digest: configID, Size: configSize, MediaType: imgspecv1.MediaTypeImageConfig, }, Layers: layerDescriptors, Annotations: annotations, } // Encode the manifest. manifestBytes, err := json.Marshal(&manifest) if err != nil { return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) } // Return the image. src := &tarballImageSource{ reference: *r, filenames: filenames, diffIDs: diffIDs, diffSizes: diffSizes, blobIDs: blobIDs, blobSizes: blobSizes, blobTypes: blobTypes, config: configBytes, configID: configID, configSize: configSize, manifest: manifestBytes, } return src, nil } func (is *tarballImageSource) Close() error { return nil } // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. func (is *tarballImageSource) HasThreadSafeGetBlob() bool { return false } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We should only be asked about things in the manifest. Maybe the configuration blob. if blobinfo.Digest == is.configID { return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil } // Maybe one of the layer blobs. for i := range is.blobIDs { if blobinfo.Digest == is.blobIDs[i] { // We want to read that layer: open the file or memory block and hand it back. if is.filenames[i] == "-" { return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil } reader, err := os.Open(is.filenames[i]) if err != nil { return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) } return reader, is.blobSizes[i], nil } } return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). func (is *tarballImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { if instanceDigest != nil { return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) } return is.manifest, imgspecv1.MediaTypeImageManifest, nil } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if instanceDigest != nil { return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) } return nil, nil } func (is *tarballImageSource) Reference() types.ImageReference { return &is.reference } // LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. func (*tarballImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { return nil, nil } image-4.0.1/tarball/tarball_transport.go000066400000000000000000000026611354546467100203270ustar00rootroot00000000000000package tarball import ( "errors" "fmt" "io/ioutil" "os" "strings" "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" ) const ( transportName = "tarball" separator = ":" ) var ( // Transport implements the types.ImageTransport interface for "tarball:" images, // which are makeshift images constructed using one or more possibly-compressed tar // archives. Transport = &tarballTransport{} ) type tarballTransport struct { } func (t *tarballTransport) Name() string { return transportName } func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { var stdin []byte var err error filenames := strings.Split(reference, separator) for _, filename := range filenames { if filename == "-" { stdin, err = ioutil.ReadAll(os.Stdin) if err != nil { return nil, fmt.Errorf("error buffering stdin: %v", err) } continue } f, err := os.Open(filename) if err != nil { return nil, fmt.Errorf("error opening %q: %v", filename, err) } f.Close() } ref := &tarballReference{ transport: t, filenames: filenames, stdin: stdin, } return ref, nil } func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { // See the explanation in daemonReference.PolicyConfigurationIdentity. return errors.New(`tarball: does not support any scopes except the default "" one`) } func init() { transports.Register(Transport) } image-4.0.1/transports/000077500000000000000000000000001354546467100150345ustar00rootroot00000000000000image-4.0.1/transports/alltransports/000077500000000000000000000000001354546467100177445ustar00rootroot00000000000000image-4.0.1/transports/alltransports/alltransports.go000066400000000000000000000032271354546467100232070ustar00rootroot00000000000000package alltransports import ( "strings" // register all known transports // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating // a transport. _ "github.com/containers/image/v4/directory" _ "github.com/containers/image/v4/docker" _ "github.com/containers/image/v4/docker/archive" _ "github.com/containers/image/v4/oci/archive" _ "github.com/containers/image/v4/oci/layout" _ "github.com/containers/image/v4/openshift" _ "github.com/containers/image/v4/tarball" // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go "github.com/containers/image/v4/transports" "github.com/containers/image/v4/types" "github.com/pkg/errors" ) // ParseImageName converts a URL-like image name to a types.ImageReference. func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! parts := strings.SplitN(imgName, ":", 2) if len(parts) != 2 { return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) } transport := transports.Get(parts[0]) if transport == nil { return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) } return transport.ParseReference(parts[1]) } // TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when // the transport is unknown or when the input is invalid. func TransportFromImageName(imageName string) types.ImageTransport { // Keep this in sync with ParseImageName! parts := strings.SplitN(imageName, ":", 2) if len(parts) == 2 { return transports.Get(parts[0]) } return nil } image-4.0.1/transports/alltransports/alltransports_test.go000066400000000000000000000046401354546467100242460ustar00rootroot00000000000000package alltransports import ( "testing" "github.com/containers/image/v4/directory" "github.com/containers/image/v4/transports" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestParseImageName(t *testing.T) { // This primarily tests error handling, TestImageNameHandling is a table-driven // test for the expected values. for _, name := range []string{ "", // Empty "busybox", // No transport name ":busybox", // Empty transport name "docker:", // Empty transport reference } { _, err := ParseImageName(name) assert.Error(t, err, name) } } // A table-driven test summarizing the various transports' behavior. func TestImageNameHandling(t *testing.T) { // Always registered transports for _, c := range []struct{ transport, input, roundtrip string }{ {"dir", "/etc", "/etc"}, {"docker", "//busybox", "//busybox:latest"}, {"docker", "//busybox:notlatest", "//busybox:notlatest"}, // This also tests handling of multiple ":" characters {"docker-archive", "/var/lib/oci/busybox.tar:busybox:latest", "/var/lib/oci/busybox.tar:docker.io/library/busybox:latest"}, {"docker-archive", "busybox.tar:busybox:latest", "busybox.tar:docker.io/library/busybox:latest"}, {"oci", "/etc:someimage", "/etc:someimage"}, {"oci", "/etc:someimage:mytag", "/etc:someimage:mytag"}, {"oci-archive", "/etc:someimage", "/etc:someimage"}, {"oci-archive", "/etc:someimage:mytag", "/etc:someimage:mytag"}, // "atomic" not tested here because it depends on per-user configuration for the default cluster. // "containers-storage" not tested here because it needs to initialize various directories on the fs. } { fullInput := c.transport + ":" + c.input ref, err := ParseImageName(fullInput) require.NoError(t, err, fullInput) s := transports.ImageName(ref) assert.Equal(t, c.transport+":"+c.roundtrip, s, fullInput) } // Possibly stubbed-out transports: Only verify that something is registered. for _, c := range []string{"docker-daemon", "ostree"} { transport := transports.Get(c) assert.NotNil(t, transport, c) } } func TestTransportFromImageName(t *testing.T) { dirTransport := TransportFromImageName("dir:/tmp/test") assert.Equal(t, dirTransport.Name(), directory.Transport.Name()) unknownTransport := TransportFromImageName("unknown:ref:test") assert.Equal(t, unknownTransport, nil) invalidName := TransportFromImageName("unknown") assert.Equal(t, invalidName, nil) } image-4.0.1/transports/alltransports/docker_daemon.go000066400000000000000000000002551354546467100230670ustar00rootroot00000000000000// +build !containers_image_docker_daemon_stub package alltransports import ( // Register the docker-daemon transport _ "github.com/containers/image/v4/docker/daemon" ) image-4.0.1/transports/alltransports/docker_daemon_stub.go000066400000000000000000000003151354546467100241210ustar00rootroot00000000000000// +build containers_image_docker_daemon_stub package alltransports import "github.com/containers/image/v4/transports" func init() { transports.Register(transports.NewStubTransport("docker-daemon")) } image-4.0.1/transports/alltransports/ostree.go000066400000000000000000000002301354546467100215670ustar00rootroot00000000000000// +build containers_image_ostree,linux package alltransports import ( // Register the ostree transport _ "github.com/containers/image/v4/ostree" ) image-4.0.1/transports/alltransports/ostree_stub.go000066400000000000000000000003021354546467100226240ustar00rootroot00000000000000// +build !containers_image_ostree !linux package alltransports import "github.com/containers/image/v4/transports" func init() { transports.Register(transports.NewStubTransport("ostree")) } image-4.0.1/transports/alltransports/storage.go000066400000000000000000000002331354546467100217350ustar00rootroot00000000000000// +build !containers_image_storage_stub package alltransports import ( // Register the storage transport _ "github.com/containers/image/v4/storage" ) image-4.0.1/transports/alltransports/storage_stub.go000066400000000000000000000003141354546467100227720ustar00rootroot00000000000000// +build containers_image_storage_stub package alltransports import "github.com/containers/image/v4/transports" func init() { transports.Register(transports.NewStubTransport("containers-storage")) } image-4.0.1/transports/stub.go000066400000000000000000000033451354546467100163450ustar00rootroot00000000000000package transports import ( "fmt" "github.com/containers/image/v4/types" ) // stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. type stubTransport string // NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. func NewStubTransport(name string) types.ImageTransport { return stubTransport(name) } // Name returns the name of the transport, which must be unique among other transports. func (s stubTransport) Name() string { return string(s) } // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) } // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { // Allowing any reference in here allows tools with some transports stubbed-out to still // use signature verification policies which refer to these stubbed-out transports. // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . return nil } image-4.0.1/transports/stub_test.go000066400000000000000000000006331354546467100174010ustar00rootroot00000000000000package transports import ( "testing" "github.com/stretchr/testify/assert" ) func TestStubTransport(t *testing.T) { const name = "whatever" s := NewStubTransport(name) assert.Equal(t, name, s.Name()) _, err := s.ParseReference("this is rejected regardless of content") assert.Error(t, err) err = s.ValidatePolicyConfigurationScope("this is accepted regardless of content") assert.NoError(t, err) } image-4.0.1/transports/transports.go000066400000000000000000000042501354546467100176030ustar00rootroot00000000000000package transports import ( "fmt" "sort" "sync" "github.com/containers/image/v4/types" ) // knownTransports is a registry of known ImageTransport instances. type knownTransports struct { transports map[string]types.ImageTransport mu sync.Mutex } func (kt *knownTransports) Get(k string) types.ImageTransport { kt.mu.Lock() t := kt.transports[k] kt.mu.Unlock() return t } func (kt *knownTransports) Remove(k string) { kt.mu.Lock() delete(kt.transports, k) kt.mu.Unlock() } func (kt *knownTransports) Add(t types.ImageTransport) { kt.mu.Lock() defer kt.mu.Unlock() name := t.Name() if t := kt.transports[name]; t != nil { panic(fmt.Sprintf("Duplicate image transport name %s", name)) } kt.transports[name] = t } var kt *knownTransports func init() { kt = &knownTransports{ transports: make(map[string]types.ImageTransport), } } // Get returns the transport specified by name or nil when unavailable. func Get(name string) types.ImageTransport { return kt.Get(name) } // Delete deletes a transport from the registered transports. func Delete(name string) { kt.Remove(name) } // Register registers a transport. func Register(t types.ImageTransport) { kt.Add(t) } // ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that // ParseImageName(ImageName(reference)) returns an equivalent reference. // // This is the generally recommended way to refer to images in the UI. // // NOTE: The returned string is not promised to be equal to the original input to ParseImageName; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. func ImageName(ref types.ImageReference) string { return ref.Transport().Name() + ":" + ref.StringWithinTransport() } // ListNames returns a list of non deprecated transport names. // Deprecated transports can be used, but are not presented to users. func ListNames() []string { kt.mu.Lock() defer kt.mu.Unlock() deprecated := map[string]bool{ "atomic": true, } var names []string for _, transport := range kt.transports { if !deprecated[transport.Name()] { names = append(names, transport.Name()) } } sort.Strings(names) return names } image-4.0.1/types/000077500000000000000000000000001354546467100137615ustar00rootroot00000000000000image-4.0.1/types/types.go000066400000000000000000001021321354546467100154530ustar00rootroot00000000000000package types import ( "context" "io" "time" "github.com/containers/image/v4/docker/reference" compression "github.com/containers/image/v4/pkg/compression/types" digest "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageTransport is a top-level namespace for ways to to store/load an image. // It should generally correspond to ImageSource/ImageDestination implementations. // // Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. // For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS // (or, even, IPv4 or IPv6). // // OTOH all images using the same transport should (apart from versions of the image format), be interoperable. // For example, several different ImageTransport implementations may be based on local filesystem paths, // but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) // // See also transports.KnownTransports. type ImageTransport interface { // Name returns the name of the transport, which must be unique among other transports. Name() string // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. ParseReference(reference string) (ImageReference, error) // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. ValidatePolicyConfigurationScope(scope string) error } // ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. // // The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening // within an ImageTransport.ParseReference() or equivalent API creating the reference object. // That's also why the various identification/formatting methods of this type do not support returning errors. // // WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside // world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. type ImageReference interface { Transport() ImageTransport // StringWithinTransport returns a string representation of the reference, which MUST be such that // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. // NOTE: The returned string is not promised to be equal to the original input to ParseReference; // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // instead, see transports.ImageName(). StringWithinTransport() string // DockerReference returns a Docker reference associated with this reference // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // not e.g. after redirect or alias processing), or nil if unknown/not applicable. DockerReference() reference.Named // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical // (i.e. various references with exactly the same semantics should return the same configuration identity) // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but // not required/guaranteed that it will be a valid input to Transport().ParseReference(). // Returns "" if configuration identities for these references are not supported. PolicyConfigurationIdentity() string // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed // in order, terminating on first match, and an implicit "" is always checked at the end. // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // and each following element to be a prefix of the element preceding it. PolicyConfigurationNamespaces() []string // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) // DeleteImage deletes the named image from the registry, if supported. DeleteImage(ctx context.Context, sys *SystemContext) error } // LayerCompression indicates if layers must be compressed, decompressed or preserved type LayerCompression int const ( // PreserveOriginal indicates the layer must be preserved, ie // no compression or decompression. PreserveOriginal LayerCompression = iota // Decompress indicates the layer must be decompressed Decompress // Compress indicates the layer must be compressed Compress ) // BlobInfo collects known information about a blob (layer/config). // In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. type BlobInfo struct { Digest digest.Digest // "" if unknown. Size int64 // -1 if unknown URLs []string Annotations map[string]string MediaType string // CompressionOperation is used in Image.UpdateLayerInfos to instruct // whether the original layer should be preserved or (de)compressed. The // field defaults to preserve the original layer. CompressionOperation LayerCompression // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be // set when `CompressionOperation == Compress`. CompressionAlgorithm *compression.Algorithm } // BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. // BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). // The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. // // NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different // tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, // at least by not failing hard when encountering unknown data. type BICTransportScope struct { Opaque string } // BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. // Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob // can look it up using BlobInfoCache.CandidateLocations. // // NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different // tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, // at least by not failing hard when encountering unknown data. type BICLocationReference struct { Opaque string } // BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. type BICReplacementCandidate struct { Digest digest.Digest Location BICLocationReference } // BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies. // // It records two kinds of data: // - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: // One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. // This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), // or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ // // It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known // to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). // // This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently // compress/decompress blobs for their own purposes. // // - Known blob locations, managed by individual transports: // The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), // recording transport-specific information that allows the transport to reuse the blob in the future; // then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. // // Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs // can be directly reused within a registry, or mounted across registries within a registry server.) // // None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; // users of the cahce should just fall back to copying the blobs the usual way. type BlobInfoCache interface { // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). UncompressedDigest(anyDigest digest.Digest) digest.Digest // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. // It’s allowed for anyDigest == uncompressed. // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused // within the specified (transport scope) (if they still exist, which is not guaranteed). // // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate } // ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. // Each ImageSource should eventually be closed by calling Close(). // // WARNING: Various methods which return an object identified by digest generally do not // validate that the returned data actually matches that digest; this is the caller’s responsibility. type ImageSource interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference // Close removes resources associated with an initialized ImageSource, if any. Close() error // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. HasThreadSafeGetBlob() bool // GetSignatures returns the image's signatures. It may use a remote (= slow) service. // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list // (e.g. if the source never returns manifest lists). GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfosForCopy(ctx context.Context) ([]BlobInfo, error) } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. // // There is a specific required order for some of the calls: // TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) // PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) // Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. // // Each ImageDestination should eventually be closed by calling Close(). type ImageDestination interface { // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. Reference() ImageReference // Close removes resources associated with an initialized ImageDestination, if any. Close() error // SupportedManifestMIMETypes tells which manifest mime types the destination supports // If an empty slice or nil it's returned, then any mime type can be tried to upload SupportedManifestMIMETypes() []string // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. SupportsSignatures(ctx context.Context) error // DesiredLayerCompression indicates the kind of compression to apply on layers DesiredLayerCompression() LayerCompression // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually // uploaded to the image destination, true otherwise. AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), // and would prefer to receive an unmodified manifest instead of one modified for the destination. // Does not make a difference if Reference().DockerReference() is nil. IgnoresEmbeddedDockerReference() bool // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. // inputInfo.MediaType describes the blob format, if known. // May update cache. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. HasThreadSafePutBlob() bool // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). // info.Digest must not be empty. // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. // If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size. // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. // May use and/or update cache. TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) // PutManifest writes manifest to the destination. // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. PutManifest(ctx context.Context, manifest []byte) error PutSignatures(ctx context.Context, signatures [][]byte) error // Commit marks the process of storing the image as successful and asks for the image to be persisted. // WARNING: This does not have any transactional semantics: // - Uploaded data MAY be visible to others before Commit() is called // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) Commit(ctx context.Context) error } // ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, // refuses specifically this manifest type, but may accept a different manifest type. type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. Err error } func (e ManifestTypeRejectedError) Error() string { return e.Err.Error() } // UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. // Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, // allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. // This also makes the UnparsedImage→Image conversion an explicitly visible step. // // An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. // // The UnparsedImage must not be used after the underlying ImageSource is Close()d. type UnparsedImage interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. Manifest(ctx context.Context) ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) } // Image is the primary API for inspecting properties of images. // An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. // // The Image must not be used after the underlying ImageSource is Close()d. type Image interface { // Note that Reference may return nil in the return value of UpdatedImage! UnparsedImage // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob(context.Context) ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about // layers in the resulting configuration isn't guaranteed to be returned to due how // old image manifests work (docker v2s1 especially). OCIConfig(context.Context) (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfosForCopy(context.Context) ([]BlobInfo, error) // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. // It returns false if the manifest does not embed a Docker reference. // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) EmbeddedDockerReferenceConflicts(ref reference.Named) bool // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. Inspect(context.Context) (*ImageInspectInfo, error) // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute // (most importantly it forces us to download the full layers even if they are already present at the destination). UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool // UpdatedImage returns a types.Image modified according to options. // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) // Size returns an approximation of the amount of disk space which is consumed by the image in its current // location. If the size is not known, -1 will be returned. Size() (int64, error) } // ImageCloser is an Image with a Close() method which must be called by the user. // This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, // to ensure that the ImageSource is closed. type ImageCloser interface { Image // Close removes resources associated with an initialized ImageCloser. Close() error } // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. InformationOnly ManifestUpdateInformation } // ManifestUpdateInformation is a component of ManifestUpdateOptions, named here // only to make writing struct literals possible. type ManifestUpdateInformation struct { Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. } // ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. // The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported // for other manifest types. type ImageInspectInfo struct { Tag string Created *time.Time DockerVersion string Labels map[string]string Architecture string Os string Layers []string Env []string } // DockerAuthConfig contains authorization information for connecting to a registry. // the value of Username and Password can be empty for accessing the registry anonymously type DockerAuthConfig struct { Username string Password string } // OptionalBool is a boolean with an additional undefined value, which is meant // to be used in the context of user input to distinguish between a // user-specified value and a default value. type OptionalBool byte const ( // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. OptionalBoolUndefined OptionalBool = iota // OptionalBoolTrue represents the boolean true. OptionalBoolTrue // OptionalBoolFalse represents the boolean false. OptionalBoolFalse ) // NewOptionalBool converts the input bool into either OptionalBoolTrue or // OptionalBoolFalse. The function is meant to avoid boilerplate code of users. func NewOptionalBool(b bool) OptionalBool { o := OptionalBoolFalse if b == true { o = OptionalBoolTrue } return o } // SystemContext allows parameterizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly // the same; if in doubt, add a new field. // It is always OK to pass nil instead of a SystemContext. type SystemContext struct { // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). // Not used for any of the more specific path overrides available in this struct. // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . // and there is no need to worry about the environment.) // NOTE: This does NOT affect paths starting by $HOME. RootForImplicitAbsolutePaths string // === Global configuration overrides === // If not "", overrides the system's default path for signature.Policy configuration. SignaturePolicyPath string // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) RegistriesDirPath string // Path to the system-wide registries configuration file SystemRegistriesConfPath string // If not "", overrides the default path for the authentication file AuthFilePath string // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. ArchitectureChoice string // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. OSChoice string // If not "", overrides the system's default directory containing a blob info cache. BlobInfoCacheDir string // Additional tags when creating or copying a docker-archive. DockerArchiveAdditionalTags []reference.NamedTagged // === OCI.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), // a client certificate (ending with ".cert") and a client ceritificate key // (ending with ".key") used when downloading OCI image layers. OCICertPath string // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. OCIInsecureSkipTLSVerify bool // If not "", use a shared directory for storing blobs rather than within OCI layouts OCISharedBlobDirPath string // Allow UnCompress image layer for OCI image layer OCIAcceptUncompressedLayers bool // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), // a client certificate (ending with ".cert") and a client ceritificate key // (ending with ".key") used when talking to a Docker Registry. DockerCertPath string // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. // Ignored if DockerCertPath is non-empty. DockerPerHostCertDirPath string // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. DockerInsecureSkipTLSVerify OptionalBool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials DockerAuthConfig *DockerAuthConfig // if not "", an User-Agent header is added to each request when contacting a registry. DockerRegistryUserAgent string // if true, a V1 ping attempt isn't done to give users a better error. Default is false. // Note that this field is used mainly to integrate containers/image into projectatomic/docker // in order to not break any existing docker's integration tests. DockerDisableV1Ping bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string // === docker/daemon.Transport overrides === // A directory containing a CA certificate (ending with ".crt"), // a client certificate (ending with ".cert") and a client certificate key // (ending with ".key") used when talking to a Docker daemon. DockerDaemonCertPath string // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. DockerDaemonHost string // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. DockerDaemonInsecureSkipTLSVerify bool // === dir.Transport overrides === // DirForceCompress compresses the image layers if set to true DirForceCompress bool // CompressionFormat is the format to use for the compression of the blobs CompressionFormat *compression.Algorithm // CompressionLevel specifies what compression level is used CompressionLevel *int } // ProgressProperties is used to pass information from the copy code to a monitor which // can use the real-time information to produce output or react to changes. type ProgressProperties struct { Artifact BlobInfo Offset uint64 } image-4.0.1/version/000077500000000000000000000000001354546467100143025ustar00rootroot00000000000000image-4.0.1/version/version.go000066400000000000000000000010261354546467100163150ustar00rootroot00000000000000package version import "fmt" const ( // VersionMajor is for an API incompatible changes VersionMajor = 4 // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" ) // Version is the specification version that the package types support. var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)