docker-containerd-tags-docker-1.13.1/000077500000000000000000000000001304421264600173735ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/.gitignore000066400000000000000000000001451304421264600213630ustar00rootroot00000000000000*.exe /containerd/containerd /containerd-shim/containerd-shim /bin/ /ctr/ctr /hack/benchmark /output docker-containerd-tags-docker-1.13.1/CONTRIBUTING.md000066400000000000000000000040421304421264600216240ustar00rootroot00000000000000# Contributing ## Sign your work The sign-off is a simple line at the end of the explanation for the patch. Your signature certifies that you wrote the patch or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: Signed-off-by: Joe Smith Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. docker-containerd-tags-docker-1.13.1/Dockerfile000066400000000000000000000045711304421264600213740ustar00rootroot00000000000000FROM debian:jessie # allow replacing httpredir mirror ARG APT_MIRROR=httpredir.debian.org RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list RUN apt-get update && apt-get install -y \ build-essential \ ca-certificates \ curl \ git \ make \ jq \ pkg-config \ apparmor \ libapparmor-dev \ --no-install-recommends \ && rm -rf /var/lib/apt/lists/* # Install Go ENV GO_VERSION 1.7.1 RUN curl -sSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz ENV PATH /go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/go/src/github.com/docker/containerd/vendor ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 # Grab Go's cover tool for dead-simple code coverage testing # Grab Go's vet tool for examining go code to find suspicious constructs # and help prevent errors that the compiler might not catch RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ && go install -v golang.org/x/tools/cmd/cover \ && go install -v golang.org/x/tools/cmd/vet # Grab Go's lint tool ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ && go install -v github.com/golang/lint/golint WORKDIR /go/src/github.com/docker/containerd # install seccomp: the version shipped in trusty is too old ENV SECCOMP_VERSION 2.3.1 RUN set -x \ && export SECCOMP_PATH="$(mktemp -d)" \ && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ && ( \ cd "$SECCOMP_PATH" \ && ./configure --prefix=/usr/local \ && make \ && make install \ && ldconfig \ ) \ && rm -rf "$SECCOMP_PATH" # Install runc ENV RUNC_COMMIT 51371867a01c467f08af739783b8beafc154c4d7 RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone git://github.com/docker/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ && cd "$GOPATH/src/github.com/opencontainers/runc" \ && git checkout -q "$RUNC_COMMIT" \ && make BUILDTAGS="seccomp apparmor selinux" && make install COPY . /go/src/github.com/docker/containerd WORKDIR /go/src/github.com/docker/containerd RUN make all install docker-containerd-tags-docker-1.13.1/Jenkinsfile000066400000000000000000000005141304421264600215570ustar00rootroot00000000000000wrappedNode(label: "linux && x86_64") { deleteDir() checkout scm stage "build image" def img = docker.build("dockerbuildbot/containerd:${gitCommit()}") try { stage "run tests" sh "docker run --privileged --rm --name '${env.BUILD_TAG}' ${img.id} make test" } finally { sh "docker rmi -f ${img.id} ||:" } } docker-containerd-tags-docker-1.13.1/LICENSE.code000066400000000000000000000250151304421264600213140ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-containerd-tags-docker-1.13.1/LICENSE.docs000066400000000000000000000470441304421264600213400ustar00rootroot00000000000000Attribution-ShareAlike 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. Additional offer from the Licensor -- Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply. c. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ShareAlike. In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the "Licensor." Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. docker-containerd-tags-docker-1.13.1/MAINTAINERS000066400000000000000000000021431304421264600210700ustar00rootroot00000000000000# Containerd maintainers file # # This file describes who runs the docker/containerd project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "crosbymichael", "tonistiigi", "mlaventure", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.crosbymichael] Name = "Michael Crosby" Email = "crosbymichael@gmail.com" GitHub = "crosbymichael" [people.tonistiigi] Name = "Tõnis Tiigi" Email = "tonis@docker.com" GitHub = "tonistiigi" [people.mlaventure] Name = "Kenfe-Mickaël Laventure" Email = "mickael.laventure@docker.com" GitHub = "mlaventure" docker-containerd-tags-docker-1.13.1/Makefile000066400000000000000000000057401304421264600210410ustar00rootroot00000000000000BUILDTAGS= PROJECT=github.com/docker/containerd GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2> /dev/null) LDFLAGS := -X github.com/docker/containerd.GitCommit=${GIT_COMMIT} ${LDFLAGS} TEST_TIMEOUT ?= 5m TEST_SUITE_TIMEOUT ?= 10m RUNTIME ?= runc # if this session isn't interactive, then we don't want to allocate a # TTY, which would fail, but if it is interactive, we do want to attach # so that the user can send e.g. ^C through. INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) ifeq ($(INTERACTIVE), 1) DOCKER_FLAGS += -t endif TESTBENCH_ARTIFACTS_DIR := output/test-artifacts TESTBENCH_BUNDLE_DIR := $(TESTBENCH_ARTIFACTS_DIR)/archives DOCKER_IMAGE := containerd-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_RUN := docker run --privileged --rm -i $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" export GOPATH:=$(CURDIR)/vendor:$(GOPATH) all: client daemon shim static: client-static daemon-static shim-static bin: mkdir -p bin/ clean: rm -rf bin && rm -rf output client: bin cd ctr && go build -ldflags "${LDFLAGS}" -o ../bin/ctr client-static: cd ctr && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/ctr daemon: bin cd containerd && go build -ldflags "${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd daemon-static: cd containerd && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd shim: bin cd containerd-shim && go build -tags "$(BUILDTAGS)" -ldflags "-w ${LDFLAGS}" -o ../bin/containerd-shim shim-static: cd containerd-shim && go build -ldflags "-w -extldflags -static ${LDFLAGS}" -tags "$(BUILDTAGS)" -o ../bin/containerd-shim $(TESTBENCH_BUNDLE_DIR)/busybox.tar: mkdir -p $(TESTBENCH_BUNDLE_DIR) curl -sSL 'https://github.com/jpetazzo/docker-busybox/raw/buildroot-2014.11/rootfs.tar' -o $(TESTBENCH_BUNDLE_DIR)/busybox.tar bundles-rootfs: $(TESTBENCH_BUNDLE_DIR)/busybox.tar dbuild: $(TESTBENCH_BUNDLE_DIR)/busybox.tar @docker build --rm --force-rm -t "$(DOCKER_IMAGE)" . dtest: dbuild $(DOCKER_RUN) make test dbench: dbuild $(DOCKER_RUN) make bench install: cp bin/* /usr/local/bin/ protoc: protoc -I ./api/grpc/types ./api/grpc/types/api.proto --go_out=plugins=grpc:api/grpc/types fmt: @gofmt -s -l . | grep -v vendor | grep -v .pb. | tee /dev/stderr lint: @hack/validate-lint shell: dbuild $(DOCKER_RUN) bash test: validate install bundles-rootfs go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test ) -runtime=$(RUNTIME) ifneq ($(wildcard /.dockerenv), ) cd integration-test ; \ go test -check.v -check.timeout=$(TEST_TIMEOUT) $(TESTFLAGS) timeout=$(TEST_SUITE_TIMEOUT) github.com/docker/containerd/integration-test endif bench: shim validate install bundles-rootfs go test -bench=. -v $(shell go list ./... | grep -v /vendor | grep -v /integration-test) -runtime=$(RUNTIME) validate: fmt lint uninstall: $(foreach file,containerd containerd-shim ctr,rm /usr/local/bin/$(file);) docker-containerd-tags-docker-1.13.1/NOTICE000066400000000000000000000010071304421264600202750ustar00rootroot00000000000000Docker Copyright 2012-2015 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). The following is courtesy of our legal counsel: Use and transfer of Docker may be subject to certain restrictions by the United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. For more information, please see https://www.bis.doc.gov See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. docker-containerd-tags-docker-1.13.1/README.md000066400000000000000000000060051304421264600206530ustar00rootroot00000000000000# containerd containerd is a daemon to control runC, built for performance and density. containerd leverages runC's advanced features such as seccomp and user namespace support as well as checkpoint and restore for cloning and live migration of containers. ## Getting started The easiest way to start using containerd is to download binaries from the [releases page](https://github.com/docker/containerd/releases). The included `ctr` command-line tool allows you interact with the containerd daemon: ``` $ sudo ctr containers start redis /containers/redis $ sudo ctr containers list ID PATH STATUS PROCESSES redis /containers/redis running 14063 ``` `/containers/redis` is the path to an OCI bundle. [See the docs for more information.](docs/bundle.md) ## Docs * [Client CLI reference (`ctr`)](docs/cli.md) * [Daemon CLI reference (`containerd`)](docs/daemon.md) * [Creating OCI bundles](docs/bundle.md) * [containerd changes to the bundle](docs/bundle-changes.md) * [Attaching to STDIO or TTY](docs/attach.md) * [Telemetry and metrics](docs/telemetry.md) All documentation is contained in the `/docs` directory in this repository. ## Building You will need to make sure that you have Go installed on your system and the containerd repository is cloned in your `$GOPATH`. You will also need to make sure that you have all the dependencies cloned as well. Currently, contributing to containerd is not for the first time devs as many dependencies are not vendored and work is being completed at a high rate. After that just run `make` and the binaries for the daemon and client will be localed in the `bin/` directory. ## Performance Starting 1000 containers concurrently runs at 126-140 containers per second. Overall start times: ``` [containerd] 2015/12/04 15:00:54 count: 1000 [containerd] 2015/12/04 14:59:54 min: 23ms [containerd] 2015/12/04 14:59:54 max: 355ms [containerd] 2015/12/04 14:59:54 mean: 78ms [containerd] 2015/12/04 14:59:54 stddev: 34ms [containerd] 2015/12/04 14:59:54 median: 73ms [containerd] 2015/12/04 14:59:54 75%: 91ms [containerd] 2015/12/04 14:59:54 95%: 123ms [containerd] 2015/12/04 14:59:54 99%: 287ms [containerd] 2015/12/04 14:59:54 99.9%: 355ms ``` ## Roadmap The current roadmap and milestones for alpha and beta completion are in the github issues on this repository. Please refer to these issues for what is being worked on and completed for the various stages of development. ## Copyright and license Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. docker-containerd-tags-docker-1.13.1/api/000077500000000000000000000000001304421264600201445ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/grpc/000077500000000000000000000000001304421264600210775ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/grpc/server/000077500000000000000000000000001304421264600224055ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/grpc/server/server.go000066400000000000000000000317371304421264600242550ustar00rootroot00000000000000package server import ( "bufio" "errors" "fmt" "os" "strconv" "strings" "syscall" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" "github.com/docker/containerd" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/runtime" "github.com/docker/containerd/supervisor" "github.com/golang/protobuf/ptypes" "golang.org/x/net/context" ) type apiServer struct { sv *supervisor.Supervisor } // NewServer returns grpc server instance func NewServer(sv *supervisor.Supervisor) types.APIServer { return &apiServer{ sv: sv, } } func (s *apiServer) GetServerVersion(ctx context.Context, c *types.GetServerVersionRequest) (*types.GetServerVersionResponse, error) { return &types.GetServerVersionResponse{ Major: containerd.VersionMajor, Minor: containerd.VersionMinor, Patch: containerd.VersionPatch, Revision: containerd.GitCommit, }, nil } func (s *apiServer) CreateContainer(ctx context.Context, c *types.CreateContainerRequest) (*types.CreateContainerResponse, error) { if c.BundlePath == "" { return nil, errors.New("empty bundle path") } e := &supervisor.StartTask{} e.ID = c.Id e.BundlePath = c.BundlePath e.Stdin = c.Stdin e.Stdout = c.Stdout e.Stderr = c.Stderr e.Labels = c.Labels e.NoPivotRoot = c.NoPivotRoot e.Runtime = c.Runtime e.RuntimeArgs = c.RuntimeArgs e.StartResponse = make(chan supervisor.StartResponse, 1) e.Ctx = ctx if c.Checkpoint != "" { e.CheckpointDir = c.CheckpointDir e.Checkpoint = &runtime.Checkpoint{ Name: c.Checkpoint, } } s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } r := <-e.StartResponse apiC, err := createAPIContainer(r.Container, false) if err != nil { return nil, err } return &types.CreateContainerResponse{ Container: apiC, }, nil } func (s *apiServer) CreateCheckpoint(ctx context.Context, r *types.CreateCheckpointRequest) (*types.CreateCheckpointResponse, error) { e := &supervisor.CreateCheckpointTask{} e.ID = r.Id e.CheckpointDir = r.CheckpointDir e.Checkpoint = &runtime.Checkpoint{ Name: r.Checkpoint.Name, Exit: r.Checkpoint.Exit, TCP: r.Checkpoint.Tcp, UnixSockets: r.Checkpoint.UnixSockets, Shell: r.Checkpoint.Shell, EmptyNS: r.Checkpoint.EmptyNS, } s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } return &types.CreateCheckpointResponse{}, nil } func (s *apiServer) DeleteCheckpoint(ctx context.Context, r *types.DeleteCheckpointRequest) (*types.DeleteCheckpointResponse, error) { if r.Name == "" { return nil, errors.New("checkpoint name cannot be empty") } e := &supervisor.DeleteCheckpointTask{} e.ID = r.Id e.CheckpointDir = r.CheckpointDir e.Checkpoint = &runtime.Checkpoint{ Name: r.Name, } s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } return &types.DeleteCheckpointResponse{}, nil } func (s *apiServer) ListCheckpoint(ctx context.Context, r *types.ListCheckpointRequest) (*types.ListCheckpointResponse, error) { e := &supervisor.GetContainersTask{} s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } var container runtime.Container for _, c := range e.Containers { if c.ID() == r.Id { container = c break } } if container == nil { return nil, grpc.Errorf(codes.NotFound, "no such containers") } var out []*types.Checkpoint checkpoints, err := container.Checkpoints(r.CheckpointDir) if err != nil { return nil, err } for _, c := range checkpoints { out = append(out, &types.Checkpoint{ Name: c.Name, Tcp: c.TCP, Shell: c.Shell, UnixSockets: c.UnixSockets, // TODO: figure out timestamp //Timestamp: c.Timestamp, }) } return &types.ListCheckpointResponse{Checkpoints: out}, nil } func (s *apiServer) Signal(ctx context.Context, r *types.SignalRequest) (*types.SignalResponse, error) { e := &supervisor.SignalTask{} e.ID = r.Id e.PID = r.Pid e.Signal = syscall.Signal(int(r.Signal)) s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } return &types.SignalResponse{}, nil } func (s *apiServer) State(ctx context.Context, r *types.StateRequest) (*types.StateResponse, error) { getState := func(c runtime.Container) (interface{}, error) { return createAPIContainer(c, true) } e := &supervisor.GetContainersTask{} e.ID = r.Id e.GetState = getState s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } m := s.sv.Machine() state := &types.StateResponse{ Machine: &types.Machine{ Cpus: uint32(m.Cpus), Memory: uint64(m.Memory), }, } for idx := range e.Containers { state.Containers = append(state.Containers, e.States[idx].(*types.Container)) } return state, nil } func createAPIContainer(c runtime.Container, getPids bool) (*types.Container, error) { processes, err := c.Processes() if err != nil { return nil, grpc.Errorf(codes.Internal, "get processes for container: "+err.Error()) } var procs []*types.Process for _, p := range processes { oldProc := p.Spec() stdio := p.Stdio() proc := &types.Process{ Pid: p.ID(), SystemPid: uint32(p.SystemPid()), Terminal: oldProc.Terminal, Args: oldProc.Args, Env: oldProc.Env, Cwd: oldProc.Cwd, Stdin: stdio.Stdin, Stdout: stdio.Stdout, Stderr: stdio.Stderr, } proc.User = &types.User{ Uid: oldProc.User.UID, Gid: oldProc.User.GID, AdditionalGids: oldProc.User.AdditionalGids, } proc.Capabilities = oldProc.Capabilities proc.ApparmorProfile = oldProc.ApparmorProfile proc.SelinuxLabel = oldProc.SelinuxLabel proc.NoNewPrivileges = oldProc.NoNewPrivileges for _, rl := range oldProc.Rlimits { proc.Rlimits = append(proc.Rlimits, &types.Rlimit{ Type: rl.Type, Soft: rl.Soft, Hard: rl.Hard, }) } procs = append(procs, proc) } var pids []int state := c.State() if getPids && (state == runtime.Running || state == runtime.Paused) { if pids, err = c.Pids(); err != nil { return nil, grpc.Errorf(codes.Internal, "get all pids for container: "+err.Error()) } } return &types.Container{ Id: c.ID(), BundlePath: c.Path(), Processes: procs, Labels: c.Labels(), Status: string(state), Pids: toUint32(pids), Runtime: c.Runtime(), }, nil } func toUint32(its []int) []uint32 { o := []uint32{} for _, i := range its { o = append(o, uint32(i)) } return o } func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) { e := &supervisor.UpdateTask{} e.ID = r.Id e.State = runtime.State(r.Status) if r.Resources != nil { rs := r.Resources e.Resources = &runtime.Resource{} if rs.CpuShares != 0 { e.Resources.CPUShares = int64(rs.CpuShares) } if rs.BlkioWeight != 0 { e.Resources.BlkioWeight = uint16(rs.BlkioWeight) } if rs.CpuPeriod != 0 { e.Resources.CPUPeriod = int64(rs.CpuPeriod) } if rs.CpuQuota != 0 { e.Resources.CPUQuota = int64(rs.CpuQuota) } if rs.CpusetCpus != "" { e.Resources.CpusetCpus = rs.CpusetCpus } if rs.CpusetMems != "" { e.Resources.CpusetMems = rs.CpusetMems } if rs.KernelMemoryLimit != 0 { e.Resources.KernelMemory = int64(rs.KernelMemoryLimit) } if rs.KernelTCPMemoryLimit != 0 { e.Resources.KernelTCPMemory = int64(rs.KernelTCPMemoryLimit) } if rs.MemoryLimit != 0 { e.Resources.Memory = int64(rs.MemoryLimit) } if rs.MemoryReservation != 0 { e.Resources.MemoryReservation = int64(rs.MemoryReservation) } if rs.MemorySwap != 0 { e.Resources.MemorySwap = int64(rs.MemorySwap) } } s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } return &types.UpdateContainerResponse{}, nil } func (s *apiServer) UpdateProcess(ctx context.Context, r *types.UpdateProcessRequest) (*types.UpdateProcessResponse, error) { e := &supervisor.UpdateProcessTask{} e.ID = r.Id e.PID = r.Pid e.Height = int(r.Height) e.Width = int(r.Width) e.CloseStdin = r.CloseStdin s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } return &types.UpdateProcessResponse{}, nil } func (s *apiServer) Events(r *types.EventsRequest, stream types.API_EventsServer) error { t := time.Time{} if r.Timestamp != nil { from, err := ptypes.Timestamp(r.Timestamp) if err != nil { return err } t = from } if r.StoredOnly && t.IsZero() { return fmt.Errorf("invalid parameter: StoredOnly cannot be specified without setting a valid Timestamp") } events := s.sv.Events(t, r.StoredOnly, r.Id) defer s.sv.Unsubscribe(events) for e := range events { tsp, err := ptypes.TimestampProto(e.Timestamp) if err != nil { return err } if r.Id == "" || e.ID == r.Id { if err := stream.Send(&types.Event{ Id: e.ID, Type: e.Type, Timestamp: tsp, Pid: e.PID, Status: uint32(e.Status), }); err != nil { return err } } } return nil } func convertToPb(st *runtime.Stat) *types.StatsResponse { tsp, _ := ptypes.TimestampProto(st.Timestamp) pbSt := &types.StatsResponse{ Timestamp: tsp, CgroupStats: &types.CgroupStats{}, } systemUsage, _ := getSystemCPUUsage() pbSt.CgroupStats.CpuStats = &types.CpuStats{ CpuUsage: &types.CpuUsage{ TotalUsage: st.CPU.Usage.Total, PercpuUsage: st.CPU.Usage.Percpu, UsageInKernelmode: st.CPU.Usage.Kernel, UsageInUsermode: st.CPU.Usage.User, }, ThrottlingData: &types.ThrottlingData{ Periods: st.CPU.Throttling.Periods, ThrottledPeriods: st.CPU.Throttling.ThrottledPeriods, ThrottledTime: st.CPU.Throttling.ThrottledTime, }, SystemUsage: systemUsage, } pbSt.CgroupStats.MemoryStats = &types.MemoryStats{ Cache: st.Memory.Cache, Usage: &types.MemoryData{ Usage: st.Memory.Usage.Usage, MaxUsage: st.Memory.Usage.Max, Failcnt: st.Memory.Usage.Failcnt, Limit: st.Memory.Usage.Limit, }, SwapUsage: &types.MemoryData{ Usage: st.Memory.Swap.Usage, MaxUsage: st.Memory.Swap.Max, Failcnt: st.Memory.Swap.Failcnt, Limit: st.Memory.Swap.Limit, }, KernelUsage: &types.MemoryData{ Usage: st.Memory.Kernel.Usage, MaxUsage: st.Memory.Kernel.Max, Failcnt: st.Memory.Kernel.Failcnt, Limit: st.Memory.Kernel.Limit, }, Stats: st.Memory.Raw, } pbSt.CgroupStats.BlkioStats = &types.BlkioStats{ IoServiceBytesRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceBytesRecursive), IoServicedRecursive: convertBlkioEntryToPb(st.Blkio.IoServicedRecursive), IoQueuedRecursive: convertBlkioEntryToPb(st.Blkio.IoQueuedRecursive), IoServiceTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoServiceTimeRecursive), IoWaitTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoWaitTimeRecursive), IoMergedRecursive: convertBlkioEntryToPb(st.Blkio.IoMergedRecursive), IoTimeRecursive: convertBlkioEntryToPb(st.Blkio.IoTimeRecursive), SectorsRecursive: convertBlkioEntryToPb(st.Blkio.SectorsRecursive), } pbSt.CgroupStats.HugetlbStats = make(map[string]*types.HugetlbStats) for k, st := range st.Hugetlb { pbSt.CgroupStats.HugetlbStats[k] = &types.HugetlbStats{ Usage: st.Usage, MaxUsage: st.Max, Failcnt: st.Failcnt, } } pbSt.CgroupStats.PidsStats = &types.PidsStats{ Current: st.Pids.Current, Limit: st.Pids.Limit, } return pbSt } func convertBlkioEntryToPb(b []runtime.BlkioEntry) []*types.BlkioStatsEntry { var pbEs []*types.BlkioStatsEntry for _, e := range b { pbEs = append(pbEs, &types.BlkioStatsEntry{ Major: e.Major, Minor: e.Minor, Op: e.Op, Value: e.Value, }) } return pbEs } const nanoSecondsPerSecond = 1e9 // getSystemCPUUsage returns the host system's cpu usage in // nanoseconds. An error is returned if the format of the underlying // file does not match. // // Uses /proc/stat defined by POSIX. Looks for the cpu // statistics line and then sums up the first seven fields // provided. See `man 5 proc` for details on specific field // information. func getSystemCPUUsage() (uint64, error) { var line string f, err := os.Open("/proc/stat") if err != nil { return 0, err } bufReader := bufio.NewReaderSize(nil, 128) defer func() { bufReader.Reset(nil) f.Close() }() bufReader.Reset(f) err = nil for err == nil { line, err = bufReader.ReadString('\n') if err != nil { break } parts := strings.Fields(line) switch parts[0] { case "cpu": if len(parts) < 8 { return 0, fmt.Errorf("bad format of cpu stats") } var totalClockTicks uint64 for _, i := range parts[1:8] { v, err := strconv.ParseUint(i, 10, 64) if err != nil { return 0, fmt.Errorf("error parsing cpu stats") } totalClockTicks += v } return (totalClockTicks * nanoSecondsPerSecond) / clockTicksPerSecond, nil } } return 0, fmt.Errorf("bad stats format") } func (s *apiServer) Stats(ctx context.Context, r *types.StatsRequest) (*types.StatsResponse, error) { e := &supervisor.StatsTask{} e.ID = r.Id e.Stat = make(chan *runtime.Stat, 1) s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } stats := <-e.Stat t := convertToPb(stats) return t, nil } docker-containerd-tags-docker-1.13.1/api/grpc/server/server_linux.go000066400000000000000000000030651304421264600254650ustar00rootroot00000000000000package server import ( "fmt" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/specs" "github.com/docker/containerd/supervisor" "github.com/opencontainers/runc/libcontainer/system" ocs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" ) var clockTicksPerSecond = uint64(system.GetClockTicks()) func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { process := &specs.ProcessSpec{ Terminal: r.Terminal, Args: r.Args, Env: r.Env, Cwd: r.Cwd, } process.User = ocs.User{ UID: r.User.Uid, GID: r.User.Gid, AdditionalGids: r.User.AdditionalGids, } process.Capabilities = r.Capabilities process.ApparmorProfile = r.ApparmorProfile process.SelinuxLabel = r.SelinuxLabel process.NoNewPrivileges = r.NoNewPrivileges for _, rl := range r.Rlimits { process.Rlimits = append(process.Rlimits, ocs.Rlimit{ Type: rl.Type, Soft: rl.Soft, Hard: rl.Hard, }) } if r.Id == "" { return nil, fmt.Errorf("container id cannot be empty") } if r.Pid == "" { return nil, fmt.Errorf("process id cannot be empty") } e := &supervisor.AddProcessTask{} e.ID = r.Id e.PID = r.Pid e.ProcessSpec = process e.Stdin = r.Stdin e.Stdout = r.Stdout e.Stderr = r.Stderr e.StartResponse = make(chan supervisor.StartResponse, 1) e.Ctx = ctx s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } sr := <-e.StartResponse return &types.AddProcessResponse{SystemPid: uint32(sr.ExecPid)}, nil } docker-containerd-tags-docker-1.13.1/api/grpc/server/server_solaris.go000066400000000000000000000016761304421264600260100ustar00rootroot00000000000000package server import ( "fmt" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/specs" "github.com/docker/containerd/supervisor" "golang.org/x/net/context" ) var clockTicksPerSecond uint64 func (s *apiServer) AddProcess(ctx context.Context, r *types.AddProcessRequest) (*types.AddProcessResponse, error) { process := &specs.ProcessSpec{ Terminal: r.Terminal, Args: r.Args, Env: r.Env, Cwd: r.Cwd, } if r.Id == "" { return nil, fmt.Errorf("container id cannot be empty") } if r.Pid == "" { return nil, fmt.Errorf("process id cannot be empty") } e := &supervisor.AddProcessTask{} e.ID = r.Id e.PID = r.Pid e.ProcessSpec = process e.Stdin = r.Stdin e.Stdout = r.Stdout e.Stderr = r.Stderr e.StartResponse = make(chan supervisor.StartResponse, 1) s.sv.SendTask(e) if err := <-e.ErrorCh(); err != nil { return nil, err } <-e.StartResponse return &types.AddProcessResponse{}, nil } docker-containerd-tags-docker-1.13.1/api/grpc/types/000077500000000000000000000000001304421264600222435ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/grpc/types/api.pb.go000066400000000000000000002231011304421264600237420ustar00rootroot00000000000000// Code generated by protoc-gen-go. // source: api.proto // DO NOT EDIT! /* Package types is a generated protocol buffer package. It is generated from these files: api.proto It has these top-level messages: GetServerVersionRequest GetServerVersionResponse UpdateProcessRequest UpdateProcessResponse CreateContainerRequest CreateContainerResponse SignalRequest SignalResponse AddProcessRequest Rlimit User AddProcessResponse CreateCheckpointRequest CreateCheckpointResponse DeleteCheckpointRequest DeleteCheckpointResponse ListCheckpointRequest Checkpoint ListCheckpointResponse StateRequest ContainerState Process Container Machine StateResponse UpdateContainerRequest UpdateResource BlockIODevice WeightDevice ThrottleDevice UpdateContainerResponse EventsRequest Event NetworkStats CpuUsage ThrottlingData CpuStats PidsStats MemoryData MemoryStats BlkioStatsEntry BlkioStats HugetlbStats CgroupStats StatsResponse StatsRequest */ package types import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type GetServerVersionRequest struct { } func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } func (*GetServerVersionRequest) ProtoMessage() {} func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } type GetServerVersionResponse struct { Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` } func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } func (*GetServerVersionResponse) ProtoMessage() {} func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } type UpdateProcessRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` } func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } func (*UpdateProcessRequest) ProtoMessage() {} func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } type UpdateProcessResponse struct { } func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } func (*UpdateProcessResponse) ProtoMessage() {} func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } type CreateContainerRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } func (*CreateContainerRequest) ProtoMessage() {} func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } type CreateContainerResponse struct { Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` } func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } func (*CreateContainerResponse) ProtoMessage() {} func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *CreateContainerResponse) GetContainer() *Container { if m != nil { return m.Container } return nil } type SignalRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` } func (m *SignalRequest) Reset() { *m = SignalRequest{} } func (m *SignalRequest) String() string { return proto.CompactTextString(m) } func (*SignalRequest) ProtoMessage() {} func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } type SignalResponse struct { } func (m *SignalResponse) Reset() { *m = SignalResponse{} } func (m *SignalResponse) String() string { return proto.CompactTextString(m) } func (*SignalResponse) ProtoMessage() {} func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } type AddProcessRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` } func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } func (*AddProcessRequest) ProtoMessage() {} func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *AddProcessRequest) GetUser() *User { if m != nil { return m.User } return nil } func (m *AddProcessRequest) GetRlimits() []*Rlimit { if m != nil { return m.Rlimits } return nil } type Rlimit struct { Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` } func (m *Rlimit) Reset() { *m = Rlimit{} } func (m *Rlimit) String() string { return proto.CompactTextString(m) } func (*Rlimit) ProtoMessage() {} func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } type User struct { Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` } func (m *User) Reset() { *m = User{} } func (m *User) String() string { return proto.CompactTextString(m) } func (*User) ProtoMessage() {} func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } type AddProcessResponse struct { SystemPid uint32 `protobuf:"varint,1,opt,name=systemPid" json:"systemPid,omitempty"` } func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } func (*AddProcessResponse) ProtoMessage() {} func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } type CreateCheckpointRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } func (*CreateCheckpointRequest) ProtoMessage() {} func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { if m != nil { return m.Checkpoint } return nil } type CreateCheckpointResponse struct { } func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } func (*CreateCheckpointResponse) ProtoMessage() {} func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } type DeleteCheckpointRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } func (*DeleteCheckpointRequest) ProtoMessage() {} func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } type DeleteCheckpointResponse struct { } func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } func (*DeleteCheckpointResponse) ProtoMessage() {} func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } type ListCheckpointRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` } func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } func (*ListCheckpointRequest) ProtoMessage() {} func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } type Checkpoint struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` EmptyNS []string `protobuf:"bytes,6,rep,name=emptyNS" json:"emptyNS,omitempty"` } func (m *Checkpoint) Reset() { *m = Checkpoint{} } func (m *Checkpoint) String() string { return proto.CompactTextString(m) } func (*Checkpoint) ProtoMessage() {} func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } type ListCheckpointResponse struct { Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` } func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } func (*ListCheckpointResponse) ProtoMessage() {} func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { if m != nil { return m.Checkpoints } return nil } type StateRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } func (m *StateRequest) Reset() { *m = StateRequest{} } func (m *StateRequest) String() string { return proto.CompactTextString(m) } func (*StateRequest) ProtoMessage() {} func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } type ContainerState struct { Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` } func (m *ContainerState) Reset() { *m = ContainerState{} } func (m *ContainerState) String() string { return proto.CompactTextString(m) } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } type Process struct { Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` } func (m *Process) Reset() { *m = Process{} } func (m *Process) String() string { return proto.CompactTextString(m) } func (*Process) ProtoMessage() {} func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } func (m *Process) GetUser() *User { if m != nil { return m.User } return nil } func (m *Process) GetRlimits() []*Rlimit { if m != nil { return m.Rlimits } return nil } type Container struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` } func (m *Container) Reset() { *m = Container{} } func (m *Container) String() string { return proto.CompactTextString(m) } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } func (m *Container) GetProcesses() []*Process { if m != nil { return m.Processes } return nil } // Machine is information about machine on which containerd is run type Machine struct { Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` } func (m *Machine) Reset() { *m = Machine{} } func (m *Machine) String() string { return proto.CompactTextString(m) } func (*Machine) ProtoMessage() {} func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } // StateResponse is information about containerd daemon type StateResponse struct { Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` } func (m *StateResponse) Reset() { *m = StateResponse{} } func (m *StateResponse) String() string { return proto.CompactTextString(m) } func (*StateResponse) ProtoMessage() {} func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } func (m *StateResponse) GetContainers() []*Container { if m != nil { return m.Containers } return nil } func (m *StateResponse) GetMachine() *Machine { if m != nil { return m.Machine } return nil } type UpdateContainerRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` } func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } func (*UpdateContainerRequest) ProtoMessage() {} func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } func (m *UpdateContainerRequest) GetResources() *UpdateResource { if m != nil { return m.Resources } return nil } type UpdateResource struct { BlkioWeight uint64 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` CpuShares uint64 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` CpuPeriod uint64 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` CpuQuota uint64 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` MemoryLimit uint64 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` MemorySwap uint64 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` MemoryReservation uint64 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` KernelMemoryLimit uint64 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` KernelTCPMemoryLimit uint64 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"` BlkioLeafWeight uint64 `protobuf:"varint,12,opt,name=blkioLeafWeight" json:"blkioLeafWeight,omitempty"` BlkioWeightDevice []*WeightDevice `protobuf:"bytes,13,rep,name=blkioWeightDevice" json:"blkioWeightDevice,omitempty"` BlkioThrottleReadBpsDevice []*ThrottleDevice `protobuf:"bytes,14,rep,name=blkioThrottleReadBpsDevice" json:"blkioThrottleReadBpsDevice,omitempty"` BlkioThrottleWriteBpsDevice []*ThrottleDevice `protobuf:"bytes,15,rep,name=blkioThrottleWriteBpsDevice" json:"blkioThrottleWriteBpsDevice,omitempty"` BlkioThrottleReadIopsDevice []*ThrottleDevice `protobuf:"bytes,16,rep,name=blkioThrottleReadIopsDevice" json:"blkioThrottleReadIopsDevice,omitempty"` BlkioThrottleWriteIopsDevice []*ThrottleDevice `protobuf:"bytes,17,rep,name=blkioThrottleWriteIopsDevice" json:"blkioThrottleWriteIopsDevice,omitempty"` } func (m *UpdateResource) Reset() { *m = UpdateResource{} } func (m *UpdateResource) String() string { return proto.CompactTextString(m) } func (*UpdateResource) ProtoMessage() {} func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } func (m *UpdateResource) GetBlkioWeightDevice() []*WeightDevice { if m != nil { return m.BlkioWeightDevice } return nil } func (m *UpdateResource) GetBlkioThrottleReadBpsDevice() []*ThrottleDevice { if m != nil { return m.BlkioThrottleReadBpsDevice } return nil } func (m *UpdateResource) GetBlkioThrottleWriteBpsDevice() []*ThrottleDevice { if m != nil { return m.BlkioThrottleWriteBpsDevice } return nil } func (m *UpdateResource) GetBlkioThrottleReadIopsDevice() []*ThrottleDevice { if m != nil { return m.BlkioThrottleReadIopsDevice } return nil } func (m *UpdateResource) GetBlkioThrottleWriteIopsDevice() []*ThrottleDevice { if m != nil { return m.BlkioThrottleWriteIopsDevice } return nil } type BlockIODevice struct { Major int64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` Minor int64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` } func (m *BlockIODevice) Reset() { *m = BlockIODevice{} } func (m *BlockIODevice) String() string { return proto.CompactTextString(m) } func (*BlockIODevice) ProtoMessage() {} func (*BlockIODevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } type WeightDevice struct { BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"` Weight uint32 `protobuf:"varint,2,opt,name=weight" json:"weight,omitempty"` LeafWeight uint32 `protobuf:"varint,3,opt,name=leafWeight" json:"leafWeight,omitempty"` } func (m *WeightDevice) Reset() { *m = WeightDevice{} } func (m *WeightDevice) String() string { return proto.CompactTextString(m) } func (*WeightDevice) ProtoMessage() {} func (*WeightDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } func (m *WeightDevice) GetBlkIODevice() *BlockIODevice { if m != nil { return m.BlkIODevice } return nil } type ThrottleDevice struct { BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"` Rate uint64 `protobuf:"varint,2,opt,name=rate" json:"rate,omitempty"` } func (m *ThrottleDevice) Reset() { *m = ThrottleDevice{} } func (m *ThrottleDevice) String() string { return proto.CompactTextString(m) } func (*ThrottleDevice) ProtoMessage() {} func (*ThrottleDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } func (m *ThrottleDevice) GetBlkIODevice() *BlockIODevice { if m != nil { return m.BlkIODevice } return nil } type UpdateContainerResponse struct { } func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } func (*UpdateContainerResponse) ProtoMessage() {} func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } type EventsRequest struct { // Tag 1 is deprecated (old uint64 timestamp) Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` StoredOnly bool `protobuf:"varint,3,opt,name=storedOnly" json:"storedOnly,omitempty"` Id string `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"` } func (m *EventsRequest) Reset() { *m = EventsRequest{} } func (m *EventsRequest) String() string { return proto.CompactTextString(m) } func (*EventsRequest) ProtoMessage() {} func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } func (m *EventsRequest) GetTimestamp() *google_protobuf.Timestamp { if m != nil { return m.Timestamp } return nil } type Event struct { Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` // Tag 5 is deprecated (old uint64 timestamp) Timestamp *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=timestamp" json:"timestamp,omitempty"` } func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } func (m *Event) GetTimestamp() *google_protobuf.Timestamp { if m != nil { return m.Timestamp } return nil } type NetworkStats struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"` Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"` RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=rxErrors" json:"Rx_errors,omitempty"` RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=rxDropped" json:"Rx_dropped,omitempty"` TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=txBytes" json:"Tx_bytes,omitempty"` TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=txPackets" json:"Tx_packets,omitempty"` TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=txErrors" json:"Tx_errors,omitempty"` TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=txDropped" json:"Tx_dropped,omitempty"` } func (m *NetworkStats) Reset() { *m = NetworkStats{} } func (m *NetworkStats) String() string { return proto.CompactTextString(m) } func (*NetworkStats) ProtoMessage() {} func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } type CpuUsage struct { TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"` PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"` UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"` UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"` } func (m *CpuUsage) Reset() { *m = CpuUsage{} } func (m *CpuUsage) String() string { return proto.CompactTextString(m) } func (*CpuUsage) ProtoMessage() {} func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } type ThrottlingData struct { Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"` ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` } func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } func (*ThrottlingData) ProtoMessage() {} func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } type CpuStats struct { CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"` SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"` } func (m *CpuStats) Reset() { *m = CpuStats{} } func (m *CpuStats) String() string { return proto.CompactTextString(m) } func (*CpuStats) ProtoMessage() {} func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } func (m *CpuStats) GetCpuUsage() *CpuUsage { if m != nil { return m.CpuUsage } return nil } func (m *CpuStats) GetThrottlingData() *ThrottlingData { if m != nil { return m.ThrottlingData } return nil } type PidsStats struct { Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` } func (m *PidsStats) Reset() { *m = PidsStats{} } func (m *PidsStats) String() string { return proto.CompactTextString(m) } func (*PidsStats) ProtoMessage() {} func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } type MemoryData struct { Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` } func (m *MemoryData) Reset() { *m = MemoryData{} } func (m *MemoryData) String() string { return proto.CompactTextString(m) } func (*MemoryData) ProtoMessage() {} func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } type MemoryStats struct { Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"` KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"` Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` } func (m *MemoryStats) Reset() { *m = MemoryStats{} } func (m *MemoryStats) String() string { return proto.CompactTextString(m) } func (*MemoryStats) ProtoMessage() {} func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } func (m *MemoryStats) GetUsage() *MemoryData { if m != nil { return m.Usage } return nil } func (m *MemoryStats) GetSwapUsage() *MemoryData { if m != nil { return m.SwapUsage } return nil } func (m *MemoryStats) GetKernelUsage() *MemoryData { if m != nil { return m.KernelUsage } return nil } func (m *MemoryStats) GetStats() map[string]uint64 { if m != nil { return m.Stats } return nil } type BlkioStatsEntry struct { Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` } func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } func (*BlkioStatsEntry) ProtoMessage() {} func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } type BlkioStats struct { IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` } func (m *BlkioStats) Reset() { *m = BlkioStats{} } func (m *BlkioStats) String() string { return proto.CompactTextString(m) } func (*BlkioStats) ProtoMessage() {} func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { if m != nil { return m.IoServiceBytesRecursive } return nil } func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { if m != nil { return m.IoServicedRecursive } return nil } func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { if m != nil { return m.IoQueuedRecursive } return nil } func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { if m != nil { return m.IoServiceTimeRecursive } return nil } func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { if m != nil { return m.IoWaitTimeRecursive } return nil } func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { if m != nil { return m.IoMergedRecursive } return nil } func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { if m != nil { return m.IoTimeRecursive } return nil } func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { if m != nil { return m.SectorsRecursive } return nil } type HugetlbStats struct { Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` } func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } func (*HugetlbStats) ProtoMessage() {} func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } type CgroupStats struct { CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"` MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"` BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"` HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"` } func (m *CgroupStats) Reset() { *m = CgroupStats{} } func (m *CgroupStats) String() string { return proto.CompactTextString(m) } func (*CgroupStats) ProtoMessage() {} func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } func (m *CgroupStats) GetCpuStats() *CpuStats { if m != nil { return m.CpuStats } return nil } func (m *CgroupStats) GetMemoryStats() *MemoryStats { if m != nil { return m.MemoryStats } return nil } func (m *CgroupStats) GetBlkioStats() *BlkioStats { if m != nil { return m.BlkioStats } return nil } func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { if m != nil { return m.HugetlbStats } return nil } func (m *CgroupStats) GetPidsStats() *PidsStats { if m != nil { return m.PidsStats } return nil } type StatsResponse struct { NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"` CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"` // Tag 3 is deprecated (old uint64 timestamp) Timestamp *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=timestamp" json:"timestamp,omitempty"` } func (m *StatsResponse) Reset() { *m = StatsResponse{} } func (m *StatsResponse) String() string { return proto.CompactTextString(m) } func (*StatsResponse) ProtoMessage() {} func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } func (m *StatsResponse) GetNetworkStats() []*NetworkStats { if m != nil { return m.NetworkStats } return nil } func (m *StatsResponse) GetCgroupStats() *CgroupStats { if m != nil { return m.CgroupStats } return nil } func (m *StatsResponse) GetTimestamp() *google_protobuf.Timestamp { if m != nil { return m.Timestamp } return nil } type StatsRequest struct { Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` } func (m *StatsRequest) Reset() { *m = StatsRequest{} } func (m *StatsRequest) String() string { return proto.CompactTextString(m) } func (*StatsRequest) ProtoMessage() {} func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } func init() { proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") proto.RegisterType((*Rlimit)(nil), "types.Rlimit") proto.RegisterType((*User)(nil), "types.User") proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") proto.RegisterType((*StateRequest)(nil), "types.StateRequest") proto.RegisterType((*ContainerState)(nil), "types.ContainerState") proto.RegisterType((*Process)(nil), "types.Process") proto.RegisterType((*Container)(nil), "types.Container") proto.RegisterType((*Machine)(nil), "types.Machine") proto.RegisterType((*StateResponse)(nil), "types.StateResponse") proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") proto.RegisterType((*BlockIODevice)(nil), "types.BlockIODevice") proto.RegisterType((*WeightDevice)(nil), "types.WeightDevice") proto.RegisterType((*ThrottleDevice)(nil), "types.ThrottleDevice") proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") proto.RegisterType((*Event)(nil), "types.Event") proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") proto.RegisterType((*CpuStats)(nil), "types.CpuStats") proto.RegisterType((*PidsStats)(nil), "types.PidsStats") proto.RegisterType((*MemoryData)(nil), "types.MemoryData") proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion3 // Client API for API service type APIClient interface { GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) } type aPIClient struct { cc *grpc.ClientConn } func NewAPIClient(cc *grpc.ClientConn) APIClient { return &aPIClient{cc} } func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { out := new(GetServerVersionResponse) err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { out := new(CreateContainerResponse) err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { out := new(UpdateContainerResponse) err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { out := new(SignalResponse) err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { out := new(UpdateProcessResponse) err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { out := new(AddProcessResponse) err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { out := new(CreateCheckpointResponse) err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { out := new(DeleteCheckpointResponse) err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { out := new(ListCheckpointResponse) err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { out := new(StateResponse) err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) if err != nil { return nil, err } x := &aPIEventsClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type API_EventsClient interface { Recv() (*Event, error) grpc.ClientStream } type aPIEventsClient struct { grpc.ClientStream } func (x *aPIEventsClient) Recv() (*Event, error) { m := new(Event) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { out := new(StatsResponse) err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for API service type APIServer interface { GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) Signal(context.Context, *SignalRequest) (*SignalResponse, error) UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) State(context.Context, *StateRequest) (*StateResponse, error) Events(*EventsRequest, API_EventsServer) error Stats(context.Context, *StatsRequest) (*StatsResponse, error) } func RegisterAPIServer(s *grpc.Server, srv APIServer) { s.RegisterService(&_API_serviceDesc, srv) } func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetServerVersionRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).GetServerVersion(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/GetServerVersion", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) } return interceptor(ctx, in, info, handler) } func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateContainerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).CreateContainer(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/CreateContainer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) } return interceptor(ctx, in, info, handler) } func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateContainerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).UpdateContainer(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/UpdateContainer", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) } return interceptor(ctx, in, info, handler) } func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SignalRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).Signal(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/Signal", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) } return interceptor(ctx, in, info, handler) } func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateProcessRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).UpdateProcess(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/UpdateProcess", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) } return interceptor(ctx, in, info, handler) } func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(AddProcessRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).AddProcess(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/AddProcess", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) } return interceptor(ctx, in, info, handler) } func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateCheckpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).CreateCheckpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/CreateCheckpoint", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) } return interceptor(ctx, in, info, handler) } func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DeleteCheckpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).DeleteCheckpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/DeleteCheckpoint", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) } return interceptor(ctx, in, info, handler) } func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListCheckpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).ListCheckpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/ListCheckpoint", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) } return interceptor(ctx, in, info, handler) } func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).State(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/State", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).State(ctx, req.(*StateRequest)) } return interceptor(ctx, in, info, handler) } func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(EventsRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(APIServer).Events(m, &aPIEventsServer{stream}) } type API_EventsServer interface { Send(*Event) error grpc.ServerStream } type aPIEventsServer struct { grpc.ServerStream } func (x *aPIEventsServer) Send(m *Event) error { return x.ServerStream.SendMsg(m) } func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StatsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).Stats(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/types.API/Stats", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) } return interceptor(ctx, in, info, handler) } var _API_serviceDesc = grpc.ServiceDesc{ ServiceName: "types.API", HandlerType: (*APIServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "GetServerVersion", Handler: _API_GetServerVersion_Handler, }, { MethodName: "CreateContainer", Handler: _API_CreateContainer_Handler, }, { MethodName: "UpdateContainer", Handler: _API_UpdateContainer_Handler, }, { MethodName: "Signal", Handler: _API_Signal_Handler, }, { MethodName: "UpdateProcess", Handler: _API_UpdateProcess_Handler, }, { MethodName: "AddProcess", Handler: _API_AddProcess_Handler, }, { MethodName: "CreateCheckpoint", Handler: _API_CreateCheckpoint_Handler, }, { MethodName: "DeleteCheckpoint", Handler: _API_DeleteCheckpoint_Handler, }, { MethodName: "ListCheckpoint", Handler: _API_ListCheckpoint_Handler, }, { MethodName: "State", Handler: _API_State_Handler, }, { MethodName: "Stats", Handler: _API_Stats_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Events", Handler: _API_Events_Handler, ServerStreams: true, }, }, Metadata: fileDescriptor0, } func init() { proto.RegisterFile("api.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 2606 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x39, 0xcb, 0x6f, 0x1c, 0x4d, 0xf1, 0xd9, 0x87, 0xd7, 0xde, 0xda, 0x87, 0xbd, 0x93, 0xc4, 0xd9, 0x6c, 0x9e, 0xbf, 0xd1, 0xf7, 0x83, 0x00, 0x9f, 0x9c, 0xe0, 0x7c, 0x1f, 0x44, 0x20, 0x21, 0x25, 0x76, 0xf8, 0x30, 0x5f, 0x1e, 0xce, 0xd8, 0x21, 0x42, 0x42, 0x5a, 0x8d, 0x77, 0x3b, 0xbb, 0x83, 0x67, 0x67, 0xe6, 0x9b, 0xe9, 0xf5, 0xe3, 0xc2, 0x81, 0x03, 0xdc, 0xe0, 0x8a, 0xc4, 0x91, 0x1b, 0x77, 0x0e, 0xf0, 0x0f, 0x20, 0xf1, 0x87, 0x70, 0xe3, 0xce, 0x91, 0xea, 0xea, 0xc7, 0xf4, 0xec, 0xc3, 0x4e, 0x90, 0x10, 0x17, 0x2e, 0xa3, 0xae, 0xea, 0x7a, 0x75, 0x75, 0x55, 0x75, 0x75, 0x0f, 0xd4, 0xfd, 0x24, 0xd8, 0x4a, 0xd2, 0x98, 0xc7, 0xce, 0x0a, 0x3f, 0x4f, 0x58, 0xd6, 0xbb, 0x37, 0x8a, 0xe3, 0x51, 0xc8, 0x1e, 0x12, 0xf2, 0x68, 0xfa, 0xfe, 0x21, 0x0f, 0x26, 0x2c, 0xe3, 0xfe, 0x24, 0x91, 0x74, 0xee, 0x4d, 0xb8, 0xf1, 0x05, 0xe3, 0x07, 0x2c, 0x3d, 0x61, 0xe9, 0x4f, 0x58, 0x9a, 0x05, 0x71, 0xe4, 0xb1, 0xaf, 0xa6, 0x48, 0xe3, 0x9e, 0x41, 0x77, 0x7e, 0x2a, 0x4b, 0xe2, 0x28, 0x63, 0xce, 0x35, 0x58, 0x99, 0xf8, 0x3f, 0x8f, 0xd3, 0x6e, 0xe9, 0x7e, 0xe9, 0x41, 0xcb, 0x93, 0x00, 0x61, 0x83, 0x08, 0xb1, 0x65, 0x85, 0x15, 0x80, 0xc0, 0x26, 0x3e, 0x1f, 0x8c, 0xbb, 0x15, 0x89, 0x25, 0xc0, 0xe9, 0xc1, 0x5a, 0xca, 0x4e, 0x02, 0x21, 0xb5, 0x5b, 0xc5, 0x89, 0xba, 0x67, 0x60, 0xf7, 0x57, 0x25, 0xb8, 0xf6, 0x36, 0x19, 0xfa, 0x9c, 0xed, 0xa7, 0xf1, 0x80, 0x65, 0x99, 0x32, 0xc9, 0x69, 0x43, 0x39, 0x18, 0x92, 0xce, 0xba, 0x87, 0x23, 0x67, 0x03, 0x2a, 0x09, 0x22, 0xca, 0x84, 0x10, 0x43, 0xe7, 0x2e, 0xc0, 0x20, 0x8c, 0x33, 0x76, 0xc0, 0x87, 0x41, 0x44, 0x1a, 0xd7, 0x3c, 0x0b, 0x23, 0x8c, 0x39, 0x0d, 0x86, 0x7c, 0x4c, 0x3a, 0xd1, 0x18, 0x02, 0x9c, 0x4d, 0xa8, 0x8d, 0x59, 0x30, 0x1a, 0xf3, 0xee, 0x0a, 0xa1, 0x15, 0xe4, 0xde, 0x80, 0xeb, 0x33, 0x76, 0xc8, 0xf5, 0xbb, 0x7f, 0x2b, 0xc3, 0xe6, 0x4e, 0xca, 0x70, 0x66, 0x27, 0x8e, 0xb8, 0x1f, 0x44, 0x2c, 0x5d, 0x66, 0x23, 0x5a, 0x74, 0x34, 0x8d, 0x86, 0x21, 0xdb, 0xf7, 0x51, 0xad, 0x34, 0xd5, 0xc2, 0x90, 0xc5, 0x63, 0x36, 0x38, 0x4e, 0xe2, 0x20, 0xe2, 0x64, 0x31, 0xce, 0xe7, 0x18, 0x61, 0x71, 0x46, 0x8b, 0x91, 0x5e, 0x92, 0x80, 0xb0, 0x18, 0x07, 0xf1, 0x54, 0x5a, 0x5c, 0xf7, 0x14, 0xa4, 0xf0, 0x2c, 0x4d, 0xbb, 0x35, 0x83, 0x47, 0x48, 0xe0, 0x43, 0xff, 0x88, 0x85, 0x59, 0x77, 0xf5, 0x7e, 0x45, 0xe0, 0x25, 0xe4, 0xdc, 0x87, 0x46, 0x14, 0xef, 0x07, 0x27, 0x31, 0xf7, 0xe2, 0x98, 0x77, 0xd7, 0xc8, 0x61, 0x36, 0xca, 0xe9, 0xc2, 0x6a, 0x3a, 0x8d, 0x44, 0xdc, 0x74, 0xeb, 0x24, 0x52, 0x83, 0x82, 0x57, 0x0d, 0x9f, 0xa6, 0xa3, 0xac, 0x0b, 0x24, 0xd8, 0x46, 0x39, 0x9f, 0x40, 0x2b, 0x5f, 0xc9, 0x6e, 0x90, 0x76, 0x1b, 0x24, 0xa1, 0x88, 0x74, 0xf7, 0xe0, 0xc6, 0x9c, 0x2f, 0x55, 0x9c, 0x6d, 0x41, 0x7d, 0xa0, 0x91, 0xe4, 0xd3, 0xc6, 0xf6, 0xc6, 0x16, 0x85, 0xf6, 0x56, 0x4e, 0x9c, 0x93, 0xa0, 0xa8, 0xd6, 0x41, 0x30, 0x8a, 0xfc, 0xf0, 0xc3, 0x23, 0x46, 0x78, 0x8c, 0x58, 0x54, 0x7c, 0x2a, 0xc8, 0xdd, 0x80, 0xb6, 0x16, 0xa5, 0x36, 0xfd, 0x4f, 0x15, 0xe8, 0x3c, 0x1d, 0x0e, 0x2f, 0x89, 0x49, 0x0c, 0x6c, 0xce, 0x52, 0x0c, 0x7d, 0x94, 0x58, 0x26, 0x77, 0x1a, 0xd8, 0xb9, 0x07, 0xd5, 0x69, 0x86, 0x2b, 0xa9, 0xd0, 0x4a, 0x1a, 0x6a, 0x25, 0x6f, 0x11, 0xe5, 0xd1, 0x84, 0xe3, 0x40, 0xd5, 0x17, 0xbe, 0xac, 0x92, 0x2f, 0x69, 0x2c, 0x4c, 0x66, 0xd1, 0x09, 0xee, 0xb3, 0x40, 0x89, 0xa1, 0xc0, 0x0c, 0x4e, 0x87, 0x6a, 0x87, 0xc5, 0x50, 0x2f, 0x6b, 0x35, 0x5f, 0x96, 0x09, 0x9b, 0xb5, 0xc5, 0x61, 0x53, 0x5f, 0x12, 0x36, 0x50, 0x08, 0x1b, 0x17, 0x9a, 0x03, 0x3f, 0xf1, 0x8f, 0x82, 0x30, 0xe0, 0x01, 0xcb, 0x70, 0xff, 0x84, 0x11, 0x05, 0x9c, 0xf3, 0x00, 0xd6, 0xfd, 0x24, 0xf1, 0xd3, 0x49, 0x9c, 0xa2, 0x6b, 0xde, 0x07, 0x21, 0xeb, 0x36, 0x49, 0xc8, 0x2c, 0x5a, 0x48, 0xcb, 0x58, 0x18, 0x44, 0xd3, 0xb3, 0x17, 0x22, 0xfa, 0xba, 0x2d, 0x22, 0x2b, 0xe0, 0x84, 0xb4, 0x28, 0x7e, 0xc5, 0x4e, 0xf7, 0xd3, 0xe0, 0x04, 0x79, 0x46, 0xa8, 0xb4, 0x4d, 0x5e, 0x9c, 0x45, 0x3b, 0x5f, 0xc7, 0xc0, 0x0c, 0x83, 0x49, 0xc0, 0xb3, 0xee, 0x3a, 0x9a, 0xd5, 0xd8, 0x6e, 0x29, 0x7f, 0x7a, 0x84, 0xf5, 0xf4, 0xac, 0xbb, 0x0b, 0x35, 0x89, 0x12, 0xee, 0x15, 0x24, 0x6a, 0xb7, 0x68, 0x2c, 0x70, 0x59, 0xfc, 0x9e, 0xd3, 0x5e, 0x55, 0x3d, 0x1a, 0x0b, 0xdc, 0xd8, 0x4f, 0x87, 0xb4, 0x4f, 0x88, 0x13, 0x63, 0xd7, 0x83, 0xaa, 0xd8, 0x28, 0xe1, 0xea, 0xa9, 0xda, 0xf0, 0x96, 0x27, 0x86, 0x02, 0x33, 0x52, 0x31, 0x85, 0x18, 0x1c, 0x3a, 0x5f, 0x83, 0xb6, 0x3f, 0x1c, 0xa2, 0x7b, 0x62, 0xdc, 0xf5, 0x2f, 0x82, 0x61, 0x86, 0x92, 0x2a, 0x38, 0x39, 0x83, 0x75, 0xb7, 0xc1, 0xb1, 0x03, 0x4a, 0x05, 0xfd, 0x6d, 0xa8, 0x67, 0xe7, 0x19, 0x67, 0x93, 0x7d, 0xa3, 0x27, 0x47, 0xb8, 0xbf, 0x2c, 0x99, 0x74, 0x31, 0x59, 0xb4, 0x2c, 0x16, 0xbf, 0x5d, 0xa8, 0x2d, 0x65, 0x8a, 0xba, 0x8e, 0xce, 0x9f, 0x9c, 0xdb, 0x2e, 0x37, 0x73, 0x29, 0x5b, 0x59, 0x94, 0xb2, 0x3d, 0xe8, 0xce, 0xdb, 0xa0, 0xd2, 0x64, 0x00, 0x37, 0x76, 0x59, 0xc8, 0x3e, 0xc4, 0x3e, 0xf4, 0x73, 0xe4, 0x63, 0x61, 0x91, 0xe9, 0x48, 0xe3, 0x0f, 0x37, 0x60, 0x5e, 0x89, 0x32, 0xe0, 0x25, 0x5c, 0x7f, 0x11, 0x64, 0xfc, 0x72, 0xf5, 0x73, 0xaa, 0xca, 0x8b, 0x54, 0xfd, 0xae, 0x04, 0x90, 0xcb, 0x32, 0x36, 0x97, 0x2c, 0x9b, 0x11, 0xc7, 0xce, 0x02, 0xae, 0xf2, 0x9d, 0xc6, 0x22, 0x2a, 0xf8, 0x20, 0x51, 0x47, 0x90, 0x18, 0x8a, 0x7a, 0x39, 0x8d, 0x82, 0xb3, 0x83, 0x78, 0x70, 0xcc, 0x78, 0x46, 0xf5, 0x1c, 0x6b, 0xad, 0x85, 0xa2, 0xa4, 0x1d, 0xb3, 0x30, 0xa4, 0xa2, 0xbe, 0xe6, 0x49, 0x40, 0x54, 0x60, 0x36, 0x49, 0xf8, 0xf9, 0xab, 0x03, 0x4c, 0x79, 0x91, 0x7f, 0x1a, 0xc4, 0x95, 0x6e, 0xce, 0xae, 0x54, 0xc5, 0xd0, 0x63, 0x68, 0xe4, 0xab, 0xc8, 0xd0, 0xd8, 0xca, 0xe2, 0xad, 0xb7, 0xa9, 0xdc, 0xbb, 0xd0, 0x3c, 0xe0, 0xb8, 0xa9, 0x4b, 0xfc, 0xe5, 0x3e, 0x80, 0xb6, 0xa9, 0xba, 0x44, 0x28, 0xeb, 0x86, 0xcf, 0xa7, 0x99, 0xa2, 0x52, 0x90, 0xfb, 0xe7, 0x0a, 0xac, 0xaa, 0xb0, 0xd6, 0xb5, 0xa9, 0x94, 0xd7, 0xa6, 0xff, 0x4a, 0x89, 0x2c, 0x64, 0xd5, 0xea, 0x4c, 0x56, 0xfd, 0xaf, 0x5c, 0xe6, 0xe5, 0xf2, 0xaf, 0x25, 0xa8, 0x9b, 0x6d, 0xfe, 0xe8, 0x76, 0xe6, 0x53, 0xa8, 0x27, 0x72, 0xe3, 0x99, 0xac, 0x7a, 0x8d, 0xed, 0xb6, 0x52, 0xa4, 0xeb, 0x5c, 0x4e, 0x60, 0xc5, 0x4f, 0xd5, 0x8e, 0x1f, 0xab, 0x5d, 0x59, 0x29, 0xb4, 0x2b, 0xb8, 0xf9, 0x89, 0x28, 0xa7, 0x35, 0x2a, 0xa7, 0x34, 0xb6, 0x1b, 0x94, 0xd5, 0x42, 0x83, 0xe2, 0x7e, 0x0e, 0xab, 0x2f, 0xfd, 0xc1, 0x18, 0xd7, 0x21, 0x18, 0x07, 0x89, 0x0a, 0x53, 0x64, 0x14, 0x63, 0xa1, 0x64, 0xc2, 0xd0, 0xdf, 0xe7, 0xaa, 0xf6, 0x2b, 0xc8, 0x3d, 0xc6, 0x26, 0x42, 0xa6, 0x81, 0x4a, 0xa6, 0x47, 0x58, 0x46, 0xb5, 0x43, 0x74, 0x2e, 0xcd, 0xb7, 0x21, 0x16, 0x0d, 0x6e, 0xcb, 0xea, 0x44, 0x6a, 0x56, 0x55, 0x57, 0xfb, 0x40, 0xd9, 0xe3, 0xe9, 0x69, 0xf7, 0xd7, 0x25, 0xd8, 0x94, 0x3d, 0xe6, 0xa5, 0x9d, 0xe4, 0xe2, 0xde, 0x45, 0xba, 0xaf, 0x52, 0x70, 0xdf, 0x63, 0xa8, 0xa7, 0x2c, 0x8b, 0xa7, 0x29, 0xba, 0x99, 0x3c, 0xdb, 0xd8, 0xbe, 0xae, 0x33, 0x89, 0x74, 0x79, 0x6a, 0xd6, 0xcb, 0xe9, 0xdc, 0x7f, 0xd4, 0xa0, 0x5d, 0x9c, 0x15, 0x15, 0xeb, 0x28, 0x3c, 0x0e, 0xe2, 0x77, 0xb2, 0x39, 0x2e, 0x91, 0x9b, 0x6c, 0x94, 0xc8, 0x2a, 0xf4, 0xe5, 0x01, 0x9e, 0x90, 0xa8, 0x49, 0xba, 0x31, 0x47, 0xa8, 0xd9, 0x7d, 0x96, 0x06, 0xb1, 0x3e, 0x4c, 0x73, 0x84, 0x28, 0x03, 0x08, 0xbc, 0x99, 0xc6, 0xdc, 0x27, 0x23, 0xab, 0x9e, 0x81, 0xa9, 0x2b, 0xc6, 0x3d, 0x62, 0x7c, 0x47, 0xec, 0xda, 0x8a, 0xea, 0x8a, 0x0d, 0x26, 0x9f, 0x7f, 0xc9, 0x26, 0x99, 0x4a, 0x73, 0x0b, 0x23, 0x2c, 0x97, 0xbb, 0xf9, 0x42, 0x04, 0x35, 0x05, 0x06, 0x5a, 0x6e, 0xa1, 0x84, 0x04, 0x09, 0x1e, 0x9c, 0xfa, 0x09, 0xa5, 0x7d, 0xd5, 0xb3, 0x30, 0x18, 0xc8, 0x1d, 0x09, 0xa1, 0x37, 0xf0, 0x0e, 0xe4, 0x8b, 0x63, 0x9b, 0xca, 0x40, 0xd5, 0x9b, 0x9f, 0x10, 0xd4, 0xc7, 0x2c, 0x8d, 0x58, 0xf8, 0xd2, 0xd2, 0x0a, 0x92, 0x7a, 0x6e, 0xc2, 0xd9, 0x86, 0x6b, 0x12, 0x79, 0xb8, 0xb3, 0x6f, 0x33, 0x34, 0x88, 0x61, 0xe1, 0x9c, 0xc8, 0x74, 0x72, 0xfc, 0x0b, 0xe6, 0xbf, 0x57, 0xfb, 0xd1, 0x24, 0xf2, 0x59, 0xb4, 0xf3, 0x14, 0x3a, 0xd6, 0x16, 0xed, 0xe2, 0xad, 0x6a, 0xc0, 0xb0, 0x78, 0x88, 0xa8, 0xbd, 0xaa, 0xa2, 0xc0, 0x9e, 0xf2, 0xe6, 0xa9, 0x9d, 0xb7, 0xd0, 0x23, 0xe4, 0xe1, 0x18, 0x6f, 0x89, 0x3c, 0xc4, 0x88, 0xf0, 0x87, 0xcf, 0x92, 0x4c, 0xc9, 0x6a, 0x93, 0x2c, 0x1d, 0x51, 0x9a, 0x46, 0x49, 0xbb, 0x80, 0xd1, 0x79, 0x07, 0xb7, 0x0a, 0xb3, 0xef, 0xd2, 0x80, 0xb3, 0x5c, 0xee, 0xfa, 0x45, 0x72, 0x2f, 0xe2, 0x9c, 0x13, 0x2c, 0xd4, 0xee, 0xc5, 0x46, 0xf0, 0xc6, 0x87, 0x0b, 0x2e, 0x72, 0x3a, 0x3f, 0x85, 0xdb, 0xf3, 0x7a, 0x2d, 0xc9, 0x9d, 0x8b, 0x24, 0x5f, 0xc8, 0xea, 0x7e, 0x1f, 0x5a, 0xcf, 0x42, 0x3c, 0xf8, 0xf7, 0x5e, 0x2b, 0x5d, 0x85, 0x4b, 0x75, 0x65, 0xe1, 0xa5, 0xba, 0xa2, 0x2e, 0xd5, 0xee, 0x2f, 0xa0, 0x59, 0xd8, 0xb0, 0xef, 0x50, 0xa6, 0x6a, 0x51, 0xea, 0xaa, 0x74, 0x4d, 0x99, 0x55, 0x50, 0xe3, 0xd9, 0x84, 0xa2, 0x82, 0x9c, 0xca, 0x60, 0x92, 0xed, 0xab, 0x82, 0x44, 0x76, 0x84, 0x79, 0xa0, 0xc9, 0x9b, 0x91, 0x85, 0x71, 0x7f, 0x06, 0xed, 0xe2, 0x62, 0xff, 0x6d, 0x0b, 0xb0, 0x32, 0xa7, 0x58, 0x73, 0x74, 0xff, 0x2d, 0xc6, 0xe2, 0x55, 0x62, 0xae, 0x26, 0xaa, 0xe6, 0xee, 0x1c, 0x5a, 0xcf, 0x4f, 0x18, 0x76, 0x2b, 0xba, 0x4a, 0x3e, 0x81, 0xba, 0x79, 0xd4, 0x50, 0xc5, 0xb6, 0xb7, 0x25, 0x9f, 0x3d, 0xb6, 0xf4, 0xb3, 0xc7, 0xd6, 0xa1, 0xa6, 0xf0, 0x72, 0x62, 0xb1, 0xc6, 0x8c, 0xc7, 0x29, 0x1b, 0xbe, 0x8e, 0xc2, 0x73, 0xfd, 0x56, 0x90, 0x63, 0x54, 0xfd, 0xad, 0x9a, 0xf6, 0xe7, 0xb7, 0x25, 0x58, 0x21, 0xdd, 0x0b, 0xef, 0x11, 0x92, 0xba, 0x6c, 0xaa, 0x75, 0xb1, 0x36, 0xb7, 0x4c, 0x6d, 0x56, 0x55, 0xbc, 0x9a, 0x57, 0xf1, 0xc2, 0x0a, 0x6a, 0x1f, 0xb1, 0x02, 0xf7, 0x37, 0x65, 0x68, 0xbe, 0x62, 0xfc, 0x34, 0x4e, 0x8f, 0xc5, 0x89, 0x95, 0x2d, 0x6c, 0x4e, 0x6f, 0xc2, 0x5a, 0x7a, 0xd6, 0x3f, 0x3a, 0xe7, 0xa6, 0x42, 0xaf, 0xa6, 0x67, 0xcf, 0x04, 0xe8, 0xdc, 0x01, 0xc0, 0xa9, 0x7d, 0x5f, 0x36, 0xa4, 0xaa, 0x40, 0xa7, 0x67, 0x0a, 0xe1, 0xdc, 0x82, 0xba, 0x77, 0xd6, 0xc7, 0xc6, 0x26, 0x4e, 0x33, 0x5d, 0xa1, 0xd3, 0xb3, 0xe7, 0x04, 0x0b, 0x5e, 0x9c, 0x1c, 0xa6, 0x71, 0x92, 0xb0, 0x21, 0x55, 0x68, 0xe2, 0xdd, 0x95, 0x08, 0xa1, 0xf5, 0x50, 0x6b, 0xad, 0x49, 0xad, 0x3c, 0xd7, 0x8a, 0x53, 0x89, 0xd2, 0x2a, 0x4b, 0x73, 0x9d, 0xdb, 0x5a, 0x0f, 0x8d, 0x56, 0x59, 0x97, 0xd7, 0xb8, 0xa5, 0xf5, 0x30, 0xd7, 0x5a, 0xd7, 0xbc, 0x4a, 0xab, 0xfb, 0xc7, 0x12, 0xac, 0xe1, 0xf9, 0xf0, 0x36, 0xf3, 0x47, 0x0c, 0x5b, 0xc9, 0x06, 0xc7, 0xb3, 0x24, 0xec, 0x4f, 0x05, 0xa8, 0x4e, 0x2f, 0x20, 0x94, 0x24, 0xf8, 0x3f, 0x68, 0x26, 0x2c, 0xc5, 0x53, 0x43, 0x51, 0x94, 0x31, 0x99, 0xf1, 0x94, 0x90, 0x38, 0x49, 0xb2, 0x05, 0x57, 0x69, 0xae, 0x1f, 0x44, 0x7d, 0x59, 0x96, 0x27, 0xf1, 0x90, 0x29, 0x57, 0x75, 0x68, 0x6a, 0x2f, 0xfa, 0xd2, 0x4c, 0x38, 0xdf, 0x84, 0x8e, 0xa1, 0x17, 0xed, 0x2a, 0x51, 0x4b, 0xd7, 0xad, 0x2b, 0xea, 0xb7, 0x0a, 0x8d, 0x39, 0xac, 0x73, 0x28, 0x88, 0x46, 0xbb, 0x3e, 0x9e, 0x7a, 0xd8, 0xca, 0x24, 0x74, 0x36, 0x66, 0xca, 0x5a, 0x0d, 0x3a, 0xdf, 0x82, 0x0e, 0x57, 0xf9, 0x36, 0xec, 0x6b, 0x1a, 0xb9, 0x9b, 0x1b, 0x66, 0x62, 0x5f, 0x11, 0xff, 0x3f, 0xb4, 0x73, 0x62, 0x6a, 0x8c, 0xa4, 0xbd, 0x2d, 0x83, 0x15, 0xd1, 0xe4, 0xfe, 0x5e, 0x3a, 0x4b, 0x46, 0xce, 0xa7, 0x74, 0x54, 0x5b, 0xae, 0x6a, 0x6c, 0xaf, 0xeb, 0x16, 0x47, 0x39, 0x83, 0x8e, 0x67, 0xe9, 0x96, 0x1f, 0xc0, 0x3a, 0x37, 0xa6, 0xf7, 0x31, 0x53, 0x7d, 0x95, 0x7a, 0x33, 0x95, 0x50, 0x2d, 0xcc, 0x6b, 0xf3, 0xe2, 0x42, 0xd1, 0xf3, 0xb2, 0xf7, 0x56, 0x0a, 0xa5, 0x7d, 0x0d, 0x89, 0x23, 0x15, 0x58, 0x1e, 0xeb, 0xd8, 0x98, 0x67, 0xd2, 0x3a, 0x74, 0xcc, 0x60, 0x9a, 0xa6, 0x98, 0x7b, 0xda, 0x31, 0x0a, 0x14, 0xe5, 0x91, 0xfa, 0x56, 0xe5, 0x0c, 0x09, 0xb8, 0x31, 0x80, 0x3c, 0x3b, 0x49, 0x1b, 0xd2, 0xd8, 0x21, 0x20, 0x01, 0x11, 0x67, 0x13, 0xff, 0xcc, 0x6c, 0x3d, 0xc5, 0x19, 0x22, 0xe4, 0x02, 0x51, 0xe1, 0x7b, 0x3f, 0x08, 0x07, 0xea, 0x49, 0x0e, 0x15, 0x2a, 0x30, 0x57, 0x58, 0xb5, 0x15, 0xfe, 0xa1, 0x0c, 0x0d, 0xa9, 0x51, 0x1a, 0x8c, 0x54, 0x03, 0xec, 0xf0, 0x8c, 0x4a, 0x02, 0xb0, 0x07, 0x5f, 0xc9, 0xd5, 0xe5, 0xf7, 0xb1, 0xdc, 0x54, 0x6d, 0x1b, 0x76, 0x9c, 0x19, 0x36, 0x21, 0x96, 0x77, 0x16, 0x52, 0xd7, 0x05, 0x91, 0x34, 0xf8, 0x33, 0x68, 0xca, 0xf8, 0x54, 0x3c, 0xd5, 0x65, 0x3c, 0x0d, 0x49, 0x26, 0xb9, 0x1e, 0x8b, 0x6b, 0x0f, 0xda, 0x4b, 0x6d, 0x76, 0x63, 0xfb, 0x4e, 0x81, 0x9c, 0x56, 0xb2, 0x45, 0xdf, 0xe7, 0x11, 0xc7, 0x7e, 0x47, 0xd2, 0xf6, 0x9e, 0x00, 0xe4, 0x48, 0x51, 0xcf, 0x8e, 0xd9, 0xb9, 0xbe, 0xde, 0xe1, 0x50, 0xac, 0xfd, 0xc4, 0x0f, 0xa7, 0xda, 0xa9, 0x12, 0xf8, 0x5e, 0xf9, 0x49, 0xc9, 0x1d, 0xc0, 0xfa, 0x33, 0x71, 0x24, 0x5a, 0xec, 0x85, 0x43, 0xaf, 0xba, 0xf0, 0xd0, 0xab, 0xea, 0x97, 0x64, 0x2c, 0xb1, 0x71, 0xa2, 0x5a, 0x5d, 0x1c, 0xe5, 0x8a, 0xaa, 0x96, 0x22, 0xf7, 0xef, 0x55, 0x80, 0x5c, 0x8b, 0x73, 0x00, 0xbd, 0x20, 0xee, 0x8b, 0x4e, 0x0d, 0x4f, 0x1b, 0x59, 0x90, 0xfa, 0x29, 0xc3, 0xf0, 0xc9, 0x82, 0x13, 0xa6, 0x9a, 0xf9, 0x4d, 0x73, 0x4c, 0x15, 0x8c, 0xf3, 0x6e, 0x20, 0x24, 0x19, 0xa9, 0x72, 0x79, 0x9a, 0xcd, 0xf9, 0x31, 0x5c, 0xcf, 0x85, 0x0e, 0x2d, 0x79, 0xe5, 0x0b, 0xe5, 0x5d, 0x35, 0xf2, 0x86, 0xb9, 0xac, 0x1f, 0x02, 0xa2, 0xfb, 0x78, 0x98, 0x4d, 0x0b, 0x92, 0x2a, 0x17, 0x4a, 0xea, 0x04, 0xf1, 0x1b, 0xe2, 0xc8, 0xe5, 0xbc, 0x81, 0x9b, 0xd6, 0x42, 0x45, 0xda, 0x5b, 0xd2, 0xaa, 0x17, 0x4a, 0xdb, 0x34, 0x76, 0x89, 0xc2, 0x90, 0x8b, 0xfc, 0x12, 0x70, 0xa6, 0x7f, 0xea, 0x07, 0x7c, 0x56, 0xde, 0xca, 0x65, 0xeb, 0x7c, 0x87, 0x4c, 0x45, 0x61, 0x72, 0x9d, 0x13, 0x96, 0x8e, 0x0a, 0xeb, 0xac, 0x5d, 0xb6, 0xce, 0x97, 0xc4, 0x91, 0xcb, 0x79, 0x06, 0x88, 0x9c, 0xb5, 0x67, 0xf5, 0x42, 0x29, 0xeb, 0xd8, 0x85, 0x15, 0x6c, 0xd9, 0x81, 0x4e, 0xc6, 0x06, 0x78, 0xd4, 0xdb, 0xb1, 0xb0, 0x76, 0xa1, 0x8c, 0x0d, 0xc5, 0x60, 0x84, 0xb8, 0x5f, 0x41, 0xf3, 0x47, 0xd3, 0x11, 0xe3, 0xe1, 0x91, 0xc9, 0xf9, 0xff, 0x74, 0x99, 0xf9, 0x27, 0x96, 0x99, 0x9d, 0x51, 0x1a, 0x4f, 0x93, 0x42, 0xd5, 0x96, 0x39, 0x3c, 0x57, 0xb5, 0x89, 0x86, 0xaa, 0xb6, 0xa4, 0xfe, 0x1c, 0x9a, 0xf2, 0xe6, 0xa2, 0x18, 0x64, 0x15, 0x72, 0xe6, 0x93, 0x5e, 0xdf, 0x94, 0x24, 0xdb, 0xb6, 0xba, 0x05, 0x2a, 0xae, 0x62, 0x35, 0xca, 0xdd, 0xe4, 0xc1, 0x51, 0x9e, 0x75, 0x7b, 0xd0, 0x1a, 0x4b, 0xdf, 0x28, 0x2e, 0x19, 0x80, 0x9f, 0x68, 0xe3, 0xf2, 0x35, 0x6c, 0xd9, 0x3e, 0x94, 0xae, 0x6e, 0x8e, 0x6d, 0xb7, 0x3e, 0x04, 0x10, 0xf7, 0xfc, 0xbe, 0x2e, 0x54, 0xf6, 0x4f, 0x00, 0x73, 0x42, 0x78, 0xf5, 0x44, 0x0f, 0x7b, 0x87, 0xd0, 0x99, 0x93, 0xb9, 0xa0, 0x4c, 0x7d, 0xc3, 0x2e, 0x53, 0xf9, 0xd5, 0xc8, 0x66, 0xb5, 0x6b, 0xd7, 0x5f, 0x4a, 0xf2, 0x59, 0x20, 0x7f, 0xa7, 0x7d, 0x02, 0xad, 0x48, 0x36, 0x5f, 0x66, 0x03, 0xec, 0x3b, 0x96, 0xdd, 0x98, 0x79, 0xcd, 0xc8, 0x6e, 0xd3, 0x70, 0x23, 0x06, 0xe4, 0x81, 0x85, 0x1b, 0x61, 0x39, 0xc7, 0x6b, 0x0c, 0xac, 0xdd, 0x2e, 0x34, 0x8a, 0xd5, 0x8f, 0x69, 0x14, 0xd5, 0xcb, 0xde, 0xb2, 0x9f, 0x16, 0xdb, 0x78, 0xf7, 0xaf, 0x3c, 0xdd, 0xdf, 0xc3, 0x7b, 0xdf, 0xc6, 0xec, 0x3f, 0x3f, 0xe7, 0xae, 0x32, 0x6b, 0xc9, 0x7f, 0xc2, 0xde, 0xbd, 0xa5, 0xf3, 0xaa, 0x65, 0xbf, 0xe2, 0x78, 0xb0, 0x3e, 0xf3, 0x87, 0xc7, 0xd1, 0x47, 0xcd, 0xe2, 0xbf, 0x68, 0xbd, 0xbb, 0xcb, 0xa6, 0x6d, 0x99, 0x33, 0x77, 0x04, 0x23, 0x73, 0xf1, 0x7b, 0x8a, 0x91, 0xb9, 0xec, 0x6a, 0x71, 0xc5, 0xf9, 0x2e, 0xd4, 0xe4, 0x3f, 0x1f, 0x47, 0x5f, 0x5c, 0x0a, 0x7f, 0x93, 0x7a, 0xd7, 0x67, 0xb0, 0x86, 0xf1, 0x05, 0xb4, 0x0a, 0x3f, 0x0a, 0x9d, 0x5b, 0x05, 0x5d, 0xc5, 0x5f, 0x46, 0xbd, 0xdb, 0x8b, 0x27, 0x8d, 0xb4, 0x1d, 0x80, 0xfc, 0xb7, 0x80, 0xd3, 0x55, 0xd4, 0x73, 0xbf, 0x9e, 0x7a, 0x37, 0x17, 0xcc, 0x18, 0x21, 0xb8, 0x95, 0xb3, 0x4f, 0xf4, 0xce, 0x8c, 0x57, 0x67, 0x1f, 0xc8, 0xcd, 0x56, 0x2e, 0x7d, 0xdb, 0x27, 0xb1, 0xb3, 0x0f, 0xef, 0x46, 0xec, 0x92, 0x67, 0x7f, 0x23, 0x76, 0xe9, 0x8b, 0xfd, 0x15, 0xe7, 0x35, 0xb4, 0x8b, 0x2f, 0xd9, 0x8e, 0x76, 0xd2, 0xc2, 0xa7, 0xfc, 0xde, 0x9d, 0x25, 0xb3, 0x46, 0xe0, 0x67, 0xb0, 0x22, 0x9f, 0xa8, 0x75, 0x3a, 0xda, 0x2f, 0xdb, 0xbd, 0x6b, 0x45, 0xa4, 0xe1, 0x7a, 0x04, 0x35, 0x79, 0xbb, 0x34, 0x01, 0x50, 0xb8, 0x6c, 0xf6, 0x9a, 0x36, 0xd6, 0xbd, 0xf2, 0xa8, 0xa4, 0xf5, 0x64, 0x05, 0x3d, 0xd9, 0x22, 0x3d, 0xd6, 0xe6, 0x1c, 0xd5, 0x28, 0x5d, 0x1f, 0xff, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x11, 0x58, 0x45, 0xd9, 0xb2, 0x1f, 0x00, 0x00, } docker-containerd-tags-docker-1.13.1/api/grpc/types/api.proto000066400000000000000000000223311304421264600241020ustar00rootroot00000000000000syntax = "proto3"; package types; import "google/protobuf/timestamp.proto"; service API { rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} rpc Signal(SignalRequest) returns (SignalResponse) {} rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} rpc State(StateRequest) returns (StateResponse) {} rpc Events(EventsRequest) returns (stream Event) {} rpc Stats(StatsRequest) returns (StatsResponse) {} } message GetServerVersionRequest { } message GetServerVersionResponse { uint32 major = 1; uint32 minor = 2; uint32 patch = 3; string revision = 4; } message UpdateProcessRequest { string id = 1; string pid = 2; bool closeStdin = 3; // Close stdin of the container uint32 width = 4; uint32 height = 5; } message UpdateProcessResponse { } message CreateContainerRequest { string id = 1; // ID of container string bundlePath = 2; // path to OCI bundle string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) string stdin = 4; // path to the file where stdin will be read (optional) string stdout = 5; // path to file where stdout will be written (optional) string stderr = 6; // path to file where stderr will be written (optional) repeated string labels = 7; bool noPivotRoot = 8; string runtime = 9; repeated string runtimeArgs = 10; string checkpointDir = 11; // Directory where checkpoints are stored } message CreateContainerResponse { Container container = 1; } message SignalRequest { string id = 1; // ID of container string pid = 2; // PID of process inside container uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" } message SignalResponse { } message AddProcessRequest { string id = 1; // ID of container bool terminal = 2; // Use tty for container stdio User user = 3; // User under which process will be run repeated string args = 4; // Arguments for process, first is binary path itself repeated string env = 5; // List of environment variables for process string cwd = 6; // Working directory of process string pid = 7; // Process ID string stdin = 8; // path to the file where stdin will be read (optional) string stdout = 9; // path to file where stdout will be written (optional) string stderr = 10; // path to file where stderr will be written (optional) repeated string capabilities = 11; string apparmorProfile = 12; string selinuxLabel = 13; bool noNewPrivileges = 14; repeated Rlimit rlimits = 15; } message Rlimit { string type = 1; uint64 soft = 2; uint64 hard = 3; } message User { uint32 uid = 1; // UID of user uint32 gid = 2; // GID of user repeated uint32 additionalGids = 3; // Additional groups to which user will be added } message AddProcessResponse { uint32 systemPid = 1; } message CreateCheckpointRequest { string id = 1; // ID of container Checkpoint checkpoint = 2; // Checkpoint configuration string checkpointDir = 3; // Directory where checkpoints are stored } message CreateCheckpointResponse { } message DeleteCheckpointRequest { string id = 1; // ID of container string name = 2; // Name of checkpoint string checkpointDir = 3; // Directory where checkpoints are stored } message DeleteCheckpointResponse { } message ListCheckpointRequest { string id = 1; // ID of container string checkpointDir = 2; // Directory where checkpoints are stored } message Checkpoint { string name = 1; // Name of checkpoint bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not bool tcp = 3; // allow open tcp connections bool unixSockets = 4; // allow external unix sockets bool shell = 5; // allow shell-jobs repeated string emptyNS = 6; } message ListCheckpointResponse { repeated Checkpoint checkpoints = 1; // List of checkpoints } message StateRequest { string id = 1; // container id for a single container } message ContainerState { string status = 1; } message Process { string pid = 1; bool terminal = 2; // Use tty for container stdio User user = 3; // User under which process will be run repeated string args = 4; // Arguments for process, first is binary path itself repeated string env = 5; // List of environment variables for process string cwd = 6; // Working directory of process uint32 systemPid = 7; string stdin = 8; // path to the file where stdin will be read (optional) string stdout = 9; // path to file where stdout will be written (optional) string stderr = 10; // path to file where stderr will be written (optional) repeated string capabilities = 11; string apparmorProfile = 12; string selinuxLabel = 13; bool noNewPrivileges = 14; repeated Rlimit rlimits = 15; } message Container { string id = 1; // ID of container string bundlePath = 2; // Path to OCI bundle repeated Process processes = 3; // List of processes which run in container string status = 4; // Container status ("running", "paused", etc.) repeated string labels = 5; repeated uint32 pids = 6; string runtime = 7; // runtime used to execute the container } // Machine is information about machine on which containerd is run message Machine { uint32 cpus = 1; // number of cpus uint64 memory = 2; // amount of memory } // StateResponse is information about containerd daemon message StateResponse { repeated Container containers = 1; Machine machine = 2; } message UpdateContainerRequest { string id = 1; // ID of container string pid = 2; string status = 3; // Status to which containerd will try to change UpdateResource resources =4; } message UpdateResource { uint64 blkioWeight = 1; uint64 cpuShares = 2; uint64 cpuPeriod = 3; uint64 cpuQuota = 4; string cpusetCpus = 5; string cpusetMems = 6; uint64 memoryLimit = 7; uint64 memorySwap = 8; uint64 memoryReservation = 9; uint64 kernelMemoryLimit = 10; uint64 kernelTCPMemoryLimit = 11; uint64 blkioLeafWeight = 12; repeated WeightDevice blkioWeightDevice = 13; repeated ThrottleDevice blkioThrottleReadBpsDevice = 14; repeated ThrottleDevice blkioThrottleWriteBpsDevice = 15; repeated ThrottleDevice blkioThrottleReadIopsDevice = 16; repeated ThrottleDevice blkioThrottleWriteIopsDevice = 17; } message BlockIODevice { int64 major = 1; int64 minor = 2; } message WeightDevice { BlockIODevice blkIODevice = 1; uint32 weight = 2; uint32 leafWeight = 3; } message ThrottleDevice { BlockIODevice blkIODevice = 1; uint64 rate = 2; } message UpdateContainerResponse { } message EventsRequest { // Tag 1 is deprecated (old uint64 timestamp) google.protobuf.Timestamp timestamp = 2; bool storedOnly = 3; string id = 4; } message Event { string type = 1; string id = 2; uint32 status = 3; string pid = 4; // Tag 5 is deprecated (old uint64 timestamp) google.protobuf.Timestamp timestamp = 6; } message NetworkStats { string name = 1; // name of network interface uint64 rx_bytes = 2; uint64 rx_Packets = 3; uint64 Rx_errors = 4; uint64 Rx_dropped = 5; uint64 Tx_bytes = 6; uint64 Tx_packets = 7; uint64 Tx_errors = 8; uint64 Tx_dropped = 9; } message CpuUsage { uint64 total_usage = 1; repeated uint64 percpu_usage = 2; uint64 usage_in_kernelmode = 3; uint64 usage_in_usermode = 4; } message ThrottlingData { uint64 periods = 1; uint64 throttled_periods = 2; uint64 throttled_time = 3; } message CpuStats { CpuUsage cpu_usage = 1; ThrottlingData throttling_data = 2; uint64 system_usage = 3; } message PidsStats { uint64 current = 1; uint64 limit = 2; } message MemoryData { uint64 usage = 1; uint64 max_usage = 2; uint64 failcnt = 3; uint64 limit = 4; } message MemoryStats { uint64 cache = 1; MemoryData usage = 2; MemoryData swap_usage = 3; MemoryData kernel_usage = 4; map stats = 5; } message BlkioStatsEntry { uint64 major = 1; uint64 minor = 2; string op = 3; uint64 value = 4; } message BlkioStats { repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device repeated BlkioStatsEntry io_serviced_recursive = 2; repeated BlkioStatsEntry io_queued_recursive = 3; repeated BlkioStatsEntry io_service_time_recursive = 4; repeated BlkioStatsEntry io_wait_time_recursive = 5; repeated BlkioStatsEntry io_merged_recursive = 6; repeated BlkioStatsEntry io_time_recursive = 7; repeated BlkioStatsEntry sectors_recursive = 8; } message HugetlbStats { uint64 usage = 1; uint64 max_usage = 2; uint64 failcnt = 3; uint64 limit = 4; } message CgroupStats { CpuStats cpu_stats = 1; MemoryStats memory_stats = 2; BlkioStats blkio_stats = 3; map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" PidsStats pids_stats = 5; } message StatsResponse { repeated NetworkStats network_stats = 1; CgroupStats cgroup_stats = 2; // Tag 3 is deprecated (old uint64 timestamp) google.protobuf.Timestamp timestamp = 4; }; message StatsRequest { string id = 1; } docker-containerd-tags-docker-1.13.1/api/http/000077500000000000000000000000001304421264600211235ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/http/pprof/000077500000000000000000000000001304421264600222515ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/api/http/pprof/pprof.go000066400000000000000000000007731304421264600237350ustar00rootroot00000000000000package pprof import ( // expvar init routine adds the "/debug/vars" handler _ "expvar" "net/http" // net/http/pprof installs the "/debug/pprof/{block,heap,goroutine,threadcreate}" handler _ "net/http/pprof" "github.com/Sirupsen/logrus" ) // Enable registers the "/debug/pprof" handler func Enable(address string) { http.Handle("/", http.RedirectHandler("/debug/pprof", http.StatusMovedPermanently)) go http.ListenAndServe(address, nil) logrus.Debug("pprof listening in address %s", address) } docker-containerd-tags-docker-1.13.1/archutils/000077500000000000000000000000001304421264600213715ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/archutils/epoll.go000066400000000000000000000010341304421264600230310ustar00rootroot00000000000000// +build linux,!arm64 package archutils import ( "syscall" ) // EpollCreate1 directly calls syscall.EpollCreate1 func EpollCreate1(flag int) (int, error) { return syscall.EpollCreate1(flag) } // EpollCtl directly calls syscall.EpollCtl func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { return syscall.EpollCtl(epfd, op, fd, event) } // EpollWait directly calls syscall.EpollWait func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { return syscall.EpollWait(epfd, events, msec) } docker-containerd-tags-docker-1.13.1/archutils/epoll_arm64.go000066400000000000000000000031441304421264600240460ustar00rootroot00000000000000// +build linux,arm64 package archutils // #include /* int EpollCreate1(int flag) { return epoll_create1(flag); } int EpollCtl(int efd, int op,int sfd, int events, int fd) { struct epoll_event event; event.events = events; event.data.fd = fd; return epoll_ctl(efd, op, sfd, &event); } struct event_t { uint32_t events; int fd; }; struct epoll_event events[128]; int run_epoll_wait(int fd, struct event_t *event) { int n, i; n = epoll_wait(fd, events, 128, -1); for (i = 0; i < n; i++) { event[i].events = events[i].events; event[i].fd = events[i].data.fd; } return n; } */ import "C" import ( "fmt" "syscall" "unsafe" ) // EpollCreate1 calls a C implementation func EpollCreate1(flag int) (int, error) { fd := int(C.EpollCreate1(C.int(flag))) if fd < 0 { return fd, fmt.Errorf("failed to create epoll, errno is %d", fd) } return fd, nil } // EpollCtl calls a C implementation func EpollCtl(epfd int, op int, fd int, event *syscall.EpollEvent) error { errno := C.EpollCtl(C.int(epfd), C.int(syscall.EPOLL_CTL_ADD), C.int(fd), C.int(event.Events), C.int(event.Fd)) if errno < 0 { return fmt.Errorf("Failed to ctl epoll") } return nil } // EpollWait calls a C implementation func EpollWait(epfd int, events []syscall.EpollEvent, msec int) (int, error) { var c_events [128]C.struct_event_t n := int(C.run_epoll_wait(C.int(epfd), (*C.struct_event_t)(unsafe.Pointer(&c_events)))) if n < 0 { return int(n), fmt.Errorf("Failed to wait epoll") } for i := 0; i < n; i++ { events[i].Fd = int32(c_events[i].fd) events[i].Events = uint32(c_events[i].events) } return int(n), nil } docker-containerd-tags-docker-1.13.1/containerd-shim/000077500000000000000000000000001304421264600224575ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/console.go000066400000000000000000000030151304421264600244470ustar00rootroot00000000000000// +build !solaris package main import ( "fmt" "os" "syscall" "unsafe" ) // NewConsole returns an initialized console that can be used within a container by copying bytes // from the master side to the slave that is attached as the tty for the container's init process. func newConsole(uid, gid int) (*os.File, string, error) { master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0) if err != nil { return nil, "", err } console, err := ptsname(master) if err != nil { return nil, "", err } if err := unlockpt(master); err != nil { return nil, "", err } if err := os.Chmod(console, 0600); err != nil { return nil, "", err } if err := os.Chown(console, uid, gid); err != nil { return nil, "", err } return master, console, nil } func ioctl(fd uintptr, flag, data uintptr) error { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 { return err } return nil } // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. func unlockpt(f *os.File) error { var u int32 return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) } // ptsname retrieves the name of the first available pts for the given master. func ptsname(f *os.File) (string, error) { var n int32 if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil { return "", err } return fmt.Sprintf("/dev/pts/%d", n), nil } docker-containerd-tags-docker-1.13.1/containerd-shim/console_solaris.go000066400000000000000000000006021304421264600262020ustar00rootroot00000000000000// +build solaris package main import ( "errors" "os" ) // NewConsole returns an initalized console that can be used within a container by copying bytes // from the master side to the slave that is attached as the tty for the container's init process. func newConsole(uid, gid int) (*os.File, string, error) { return nil, "", errors.New("newConsole not implemented on Solaris") } docker-containerd-tags-docker-1.13.1/containerd-shim/example/000077500000000000000000000000001304421264600241125ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/config.json000066400000000000000000000000001304421264600262400ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/000077500000000000000000000000001304421264600250555ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/exit000066400000000000000000000000001304421264600257370ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/pid000066400000000000000000000000001304421264600255420ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/process.json000066400000000000000000000000001304421264600274140ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/resize000066400000000000000000000000001304421264600262670ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/stderr000066400000000000000000000000001304421264600262710ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/stdin000066400000000000000000000000001304421264600261070ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/init/stdout000066400000000000000000000000001304421264600263100ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/000077500000000000000000000000001304421264600253715ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/exit000066400000000000000000000000001304421264600262530ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/pid000066400000000000000000000000001304421264600260560ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/process.json000066400000000000000000000000001304421264600277300ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/resize000066400000000000000000000000001304421264600266030ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/stderr000066400000000000000000000000001304421264600266050ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/stdin000066400000000000000000000000001304421264600264230ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/example/logger/stdout000066400000000000000000000000001304421264600266240ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd-shim/main.go000066400000000000000000000075231304421264600237410ustar00rootroot00000000000000package main import ( "flag" "fmt" "os" "os/signal" "path/filepath" "runtime" "syscall" "github.com/docker/containerd/osutils" "github.com/docker/docker/pkg/term" ) func writeMessage(f *os.File, level string, err error) { fmt.Fprintf(f, `{"level": "%s","msg": "%s"}`, level, err) } type controlMessage struct { Type int Width int Height int } // containerd-shim is a small shim that sits in front of a runtime implementation // that allows it to be repartented to init and handle reattach from the caller. // // the cwd of the shim should be the path to the state directory where the shim // can locate fifos and other information. // Arg0: id of the container // Arg1: bundle path // Arg2: runtime binary func main() { flag.Parse() cwd, err := os.Getwd() if err != nil { panic(err) } f, err := os.OpenFile(filepath.Join(cwd, "shim-log.json"), os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) if err != nil { panic(err) } if err := start(f); err != nil { // this means that the runtime failed starting the container and will have the // proper error messages in the runtime log so we should to treat this as a // shim failure because the sim executed properly if err == errRuntime { f.Close() return } // log the error instead of writing to stderr because the shim will have // /dev/null as it's stdio because it is supposed to be reparented to system // init and will not have anyone to read from it writeMessage(f, "error", err) f.Close() os.Exit(1) } } func start(log *os.File) error { // start handling signals as soon as possible so that things are properly reaped // or if runtime exits before we hit the handler signals := make(chan os.Signal, 2048) signal.Notify(signals) // set the shim as the subreaper for all orphaned processes created by the container if err := osutils.SetSubreaper(1); err != nil { return err } // open the exit pipe f, err := os.OpenFile("exit", syscall.O_WRONLY, 0) if err != nil { return err } defer f.Close() control, err := os.OpenFile("control", syscall.O_RDWR, 0) if err != nil { return err } defer control.Close() p, err := newProcess(flag.Arg(0), flag.Arg(1), flag.Arg(2)) if err != nil { return err } defer func() { if err := p.Close(); err != nil { writeMessage(log, "warn", err) } }() if err := p.create(); err != nil { p.delete() return err } msgC := make(chan controlMessage, 32) go func() { for { var m controlMessage if _, err := fmt.Fscanf(control, "%d %d %d\n", &m.Type, &m.Width, &m.Height); err != nil { continue } msgC <- m } }() if runtime.GOOS == "solaris" { return nil } var exitShim bool for { select { case s := <-signals: switch s { case syscall.SIGCHLD: exits, _ := osutils.Reap(false) for _, e := range exits { // check to see if runtime is one of the processes that has exited if e.Pid == p.pid() { exitShim = true writeInt("exitStatus", e.Status) } } } // runtime has exited so the shim can also exit if exitShim { // kill all processes in the container incase it was not running in // its own PID namespace p.killAll() // wait for all the processes and IO to finish p.Wait() // delete the container from the runtime p.delete() // the close of the exit fifo will happen when the shim exits return nil } case msg := <-msgC: switch msg.Type { case 0: // close stdin if p.stdinCloser != nil { p.stdinCloser.Close() } case 1: if p.console == nil { continue } ws := term.Winsize{ Width: uint16(msg.Width), Height: uint16(msg.Height), } term.SetWinsize(p.console.Fd(), &ws) } } } return nil } func writeInt(path string, i int) error { f, err := os.Create(path) if err != nil { return err } defer f.Close() _, err = fmt.Fprintf(f, "%d", i) return err } docker-containerd-tags-docker-1.13.1/containerd-shim/process.go000066400000000000000000000150011304421264600244610ustar00rootroot00000000000000package main import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strconv" "sync" "syscall" "time" "github.com/docker/containerd/specs" ) var errRuntime = errors.New("shim: runtime execution error") type checkpoint struct { // Timestamp is the time that checkpoint happened Created time.Time `json:"created"` // Name is the name of the checkpoint Name string `json:"name"` // TCP checkpoints open tcp connections TCP bool `json:"tcp"` // UnixSockets persists unix sockets in the checkpoint UnixSockets bool `json:"unixSockets"` // Shell persists tty sessions in the checkpoint Shell bool `json:"shell"` // Exit exits the container after the checkpoint is finished Exit bool `json:"exit"` // EmptyNS tells CRIU not to restore a particular namespace EmptyNS []string `json:"emptyNS,omitempty"` } type processState struct { specs.ProcessSpec Exec bool `json:"exec"` Stdin string `json:"containerdStdin"` Stdout string `json:"containerdStdout"` Stderr string `json:"containerdStderr"` RuntimeArgs []string `json:"runtimeArgs"` NoPivotRoot bool `json:"noPivotRoot"` CheckpointPath string `json:"checkpoint"` RootUID int `json:"rootUID"` RootGID int `json:"rootGID"` } type process struct { sync.WaitGroup id string bundle string stdio *stdio exec bool containerPid int checkpoint *checkpoint checkpointPath string shimIO *IO stdinCloser io.Closer console *os.File consolePath string state *processState runtime string } func newProcess(id, bundle, runtimeName string) (*process, error) { p := &process{ id: id, bundle: bundle, runtime: runtimeName, } s, err := loadProcess() if err != nil { return nil, err } p.state = s if s.CheckpointPath != "" { cpt, err := loadCheckpoint(s.CheckpointPath) if err != nil { return nil, err } p.checkpoint = cpt p.checkpointPath = s.CheckpointPath } if err := p.openIO(); err != nil { return nil, err } return p, nil } func loadProcess() (*processState, error) { f, err := os.Open("process.json") if err != nil { return nil, err } defer f.Close() var s processState if err := json.NewDecoder(f).Decode(&s); err != nil { return nil, err } return &s, nil } func loadCheckpoint(checkpointPath string) (*checkpoint, error) { f, err := os.Open(filepath.Join(checkpointPath, "config.json")) if err != nil { return nil, err } defer f.Close() var cpt checkpoint if err := json.NewDecoder(f).Decode(&cpt); err != nil { return nil, err } return &cpt, nil } func (p *process) create() error { cwd, err := os.Getwd() if err != nil { return err } logPath := filepath.Join(cwd, "log.json") args := append([]string{ "--log", logPath, "--log-format", "json", }, p.state.RuntimeArgs...) if p.state.Exec { args = append(args, "exec", "-d", "--process", filepath.Join(cwd, "process.json"), "--console", p.consolePath, ) } else if p.checkpoint != nil { args = append(args, "restore", "-d", "--image-path", p.checkpointPath, "--work-path", filepath.Join(p.checkpointPath, "criu.work", "restore-"+time.Now().Format(time.RFC3339)), ) add := func(flags ...string) { args = append(args, flags...) } if p.checkpoint.Shell { add("--shell-job") } if p.checkpoint.TCP { add("--tcp-established") } if p.checkpoint.UnixSockets { add("--ext-unix-sk") } if p.state.NoPivotRoot { add("--no-pivot") } for _, ns := range p.checkpoint.EmptyNS { add("--empty-ns", ns) } } else { args = append(args, "create", "--bundle", p.bundle, "--console", p.consolePath, ) if p.state.NoPivotRoot { args = append(args, "--no-pivot") } } args = append(args, "--pid-file", filepath.Join(cwd, "pid"), p.id, ) cmd := exec.Command(p.runtime, args...) cmd.Dir = p.bundle cmd.Stdin = p.stdio.stdin cmd.Stdout = p.stdio.stdout cmd.Stderr = p.stdio.stderr // Call out to setPDeathSig to set SysProcAttr as elements are platform specific cmd.SysProcAttr = setPDeathSig() if err := cmd.Start(); err != nil { if exErr, ok := err.(*exec.Error); ok { if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { return fmt.Errorf("%s not installed on system", p.runtime) } } return err } if runtime.GOOS != "solaris" { // Since current logic dictates that we need a pid at the end of p.create // we need to call runtime start as well on Solaris hence we need the // pipes to stay open. p.stdio.stdout.Close() p.stdio.stderr.Close() } if err := cmd.Wait(); err != nil { if _, ok := err.(*exec.ExitError); ok { return errRuntime } return err } data, err := ioutil.ReadFile("pid") if err != nil { return err } pid, err := strconv.Atoi(string(data)) if err != nil { return err } p.containerPid = pid return nil } func (p *process) pid() int { return p.containerPid } func (p *process) delete() error { if !p.state.Exec { cmd := exec.Command(p.runtime, append(p.state.RuntimeArgs, "delete", p.id)...) cmd.SysProcAttr = setPDeathSig() out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("%s: %v", out, err) } } return nil } // IO holds all 3 standard io Reader/Writer (stdin,stdout,stderr) type IO struct { Stdin io.WriteCloser Stdout io.ReadCloser Stderr io.ReadCloser } func (p *process) initializeIO(rootuid int) (i *IO, err error) { var fds []uintptr i = &IO{} // cleanup in case of an error defer func() { if err != nil { for _, fd := range fds { syscall.Close(int(fd)) } } }() // STDIN r, w, err := os.Pipe() if err != nil { return nil, err } fds = append(fds, r.Fd(), w.Fd()) p.stdio.stdin, i.Stdin = r, w // STDOUT if r, w, err = os.Pipe(); err != nil { return nil, err } fds = append(fds, r.Fd(), w.Fd()) p.stdio.stdout, i.Stdout = w, r // STDERR if r, w, err = os.Pipe(); err != nil { return nil, err } fds = append(fds, r.Fd(), w.Fd()) p.stdio.stderr, i.Stderr = w, r // change ownership of the pipes in case we are in a user namespace for _, fd := range fds { if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil { return nil, err } } return i, nil } func (p *process) Close() error { return p.stdio.Close() } type stdio struct { stdin *os.File stdout *os.File stderr *os.File } func (s *stdio) Close() error { err := s.stdin.Close() if oerr := s.stdout.Close(); err == nil { err = oerr } if oerr := s.stderr.Close(); err == nil { err = oerr } return err } docker-containerd-tags-docker-1.13.1/containerd-shim/process_linux.go000066400000000000000000000055151304421264600257110ustar00rootroot00000000000000// +build !solaris package main import ( "fmt" "io" "os/exec" "syscall" "time" "github.com/tonistiigi/fifo" "golang.org/x/net/context" ) // setPDeathSig sets the parent death signal to SIGKILL so that if the // shim dies the container process also dies. func setPDeathSig() *syscall.SysProcAttr { return &syscall.SysProcAttr{ Pdeathsig: syscall.SIGKILL, } } // openIO opens the pre-created fifo's for use with the container // in RDWR so that they remain open if the other side stops listening func (p *process) openIO() error { p.stdio = &stdio{} var ( uid = p.state.RootUID gid = p.state.RootGID ) ctx, _ := context.WithTimeout(context.Background(), 15*time.Second) stdinCloser, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { return err } p.stdinCloser = stdinCloser if p.state.Terminal { master, console, err := newConsole(uid, gid) if err != nil { return err } p.console = master p.consolePath = console stdin, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_RDONLY, 0) if err != nil { return err } go io.Copy(master, stdin) stdoutw, err := fifo.OpenFifo(ctx, p.state.Stdout, syscall.O_WRONLY, 0) if err != nil { return err } stdoutr, err := fifo.OpenFifo(ctx, p.state.Stdout, syscall.O_RDONLY, 0) if err != nil { return err } p.Add(1) go func() { io.Copy(stdoutw, master) master.Close() stdoutr.Close() stdoutw.Close() p.Done() }() return nil } i, err := p.initializeIO(uid) if err != nil { return err } p.shimIO = i // non-tty for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){ p.state.Stdout: func(wc io.WriteCloser, rc io.Closer) { p.Add(1) go func() { io.Copy(wc, i.Stdout) p.Done() wc.Close() rc.Close() }() }, p.state.Stderr: func(wc io.WriteCloser, rc io.Closer) { p.Add(1) go func() { io.Copy(wc, i.Stderr) p.Done() wc.Close() rc.Close() }() }, } { fw, err := fifo.OpenFifo(ctx, name, syscall.O_WRONLY, 0) if err != nil { return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) } fr, err := fifo.OpenFifo(ctx, name, syscall.O_RDONLY, 0) if err != nil { return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err) } dest(fw, fr) } f, err := fifo.OpenFifo(ctx, p.state.Stdin, syscall.O_RDONLY, 0) if err != nil { return fmt.Errorf("containerd-shim: opening %s failed: %s", p.state.Stdin, err) } go func() { io.Copy(i.Stdin, f) i.Stdin.Close() f.Close() }() return nil } func (p *process) killAll() error { if !p.state.Exec { cmd := exec.Command(p.runtime, append(p.state.RuntimeArgs, "kill", "--all", p.id, "SIGKILL")...) cmd.SysProcAttr = setPDeathSig() out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("%s: %v", out, err) } } return nil } docker-containerd-tags-docker-1.13.1/containerd-shim/process_solaris.go000066400000000000000000000025221304421264600262210ustar00rootroot00000000000000// +build solaris package main import ( "io" "os" "syscall" ) // setPDeathSig is a no-op on Solaris as Pdeathsig is not defined. func setPDeathSig() *syscall.SysProcAttr { return nil } // TODO: Update to using fifo's package in openIO. Need to // 1. Merge and vendor changes in the package to use sys/unix. // 2. Figure out why context.Background is timing out. // openIO opens the pre-created fifo's for use with the container // in RDWR so that they remain open if the other side stops listening func (p *process) openIO() error { p.stdio = &stdio{} var ( uid = p.state.RootUID ) i, err := p.initializeIO(uid) if err != nil { return err } p.shimIO = i // Both tty and non-tty mode are handled by the runtime using // the following pipes for name, dest := range map[string]func(f *os.File){ p.state.Stdout: func(f *os.File) { p.Add(1) go func() { io.Copy(f, i.Stdout) p.Done() }() }, p.state.Stderr: func(f *os.File) { p.Add(1) go func() { io.Copy(f, i.Stderr) p.Done() }() }, } { f, err := os.OpenFile(name, syscall.O_RDWR, 0) if err != nil { return err } dest(f) } f, err := os.OpenFile(p.state.Stdin, syscall.O_RDONLY, 0) if err != nil { return err } go func() { io.Copy(i.Stdin, f) i.Stdin.Close() }() return nil } func (p *process) killAll() error { return nil } docker-containerd-tags-docker-1.13.1/containerd/000077500000000000000000000000001304421264600215215ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/containerd/main.go000066400000000000000000000144641304421264600230050ustar00rootroot00000000000000package main import ( "fmt" "log" "net" "os" "os/signal" "runtime" "strings" "sync" "syscall" "time" "google.golang.org/grpc" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/cyberdelia/go-metrics-graphite" "github.com/docker/containerd" "github.com/docker/containerd/api/grpc/server" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/api/http/pprof" "github.com/docker/containerd/supervisor" "github.com/docker/docker/pkg/listeners" "github.com/rcrowley/go-metrics" ) const ( usage = `High performance container daemon` minRlimit = 1024 defaultStateDir = "/run/containerd" defaultGRPCEndpoint = "unix:///run/containerd/containerd.sock" ) var daemonFlags = []cli.Flag{ cli.BoolFlag{ Name: "debug", Usage: "enable debug output in the logs", }, cli.StringFlag{ Name: "state-dir", Value: defaultStateDir, Usage: "runtime state directory", }, cli.DurationFlag{ Name: "metrics-interval", Value: 5 * time.Minute, Usage: "interval for flushing metrics to the store", }, cli.StringFlag{ Name: "listen,l", Value: defaultGRPCEndpoint, Usage: "proto://address on which the GRPC API will listen", }, cli.StringFlag{ Name: "runtime,r", Value: "runc", Usage: "name or path of the OCI compliant runtime to use when executing containers", }, cli.StringSliceFlag{ Name: "runtime-args", Value: &cli.StringSlice{}, Usage: "specify additional runtime args", }, cli.StringFlag{ Name: "shim", Value: "containerd-shim", Usage: "Name or path of shim", }, cli.StringFlag{ Name: "pprof-address", Usage: "http address to listen for pprof events", }, cli.DurationFlag{ Name: "start-timeout", Value: 30 * time.Second, Usage: "timeout duration for waiting on a container to start before it is killed", }, cli.IntFlag{ Name: "retain-count", Value: 500, Usage: "number of past events to keep in the event log", }, cli.StringFlag{ Name: "graphite-address", Usage: "Address of graphite server", }, } // DumpStacks dumps the runtime stack. func dumpStacks() { var ( buf []byte stackSize int ) bufferLen := 16384 for stackSize == len(buf) { buf = make([]byte, bufferLen) stackSize = runtime.Stack(buf, true) bufferLen *= 2 } buf = buf[:stackSize] logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) } func setupDumpStacksTrap() { c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR1) go func() { for range c { dumpStacks() } }() } func main() { logrus.SetFormatter(&logrus.TextFormatter{TimestampFormat: time.RFC3339Nano}) app := cli.NewApp() app.Name = "containerd" if containerd.GitCommit != "" { app.Version = fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit) } else { app.Version = containerd.Version } app.Usage = usage app.Flags = daemonFlags app.Before = func(context *cli.Context) error { setupDumpStacksTrap() if context.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) if context.GlobalDuration("metrics-interval") > 0 { if err := debugMetrics(context.GlobalDuration("metrics-interval"), context.GlobalString("graphite-address")); err != nil { return err } } } if p := context.GlobalString("pprof-address"); len(p) > 0 { pprof.Enable(p) } if err := checkLimits(); err != nil { return err } return nil } app.Action = func(context *cli.Context) { if err := daemon(context); err != nil { logrus.Fatal(err) } } if err := app.Run(os.Args); err != nil { logrus.Fatal(err) } } func daemon(context *cli.Context) error { s := make(chan os.Signal, 2048) signal.Notify(s, syscall.SIGTERM, syscall.SIGINT) sv, err := supervisor.New( context.String("state-dir"), context.String("runtime"), context.String("shim"), context.StringSlice("runtime-args"), context.Duration("start-timeout"), context.Int("retain-count")) if err != nil { return err } wg := &sync.WaitGroup{} for i := 0; i < 10; i++ { wg.Add(1) w := supervisor.NewWorker(sv, wg) go w.Start() } if err := sv.Start(); err != nil { return err } // Split the listen string of the form proto://addr listenSpec := context.String("listen") listenParts := strings.SplitN(listenSpec, "://", 2) if len(listenParts) != 2 { return fmt.Errorf("bad listen address format %s, expected proto://address", listenSpec) } server, err := startServer(listenParts[0], listenParts[1], sv) if err != nil { return err } for ss := range s { switch ss { default: logrus.Infof("stopping containerd after receiving %s", ss) server.Stop() os.Exit(0) } } return nil } func startServer(protocol, address string, sv *supervisor.Supervisor) (*grpc.Server, error) { // TODO: We should use TLS. // TODO: Add an option for the SocketGroup. sockets, err := listeners.Init(protocol, address, "", nil) if err != nil { return nil, err } if len(sockets) != 1 { return nil, fmt.Errorf("incorrect number of listeners") } l := sockets[0] s := grpc.NewServer() types.RegisterAPIServer(s, server.NewServer(sv)) healthServer := health.NewServer() grpc_health_v1.RegisterHealthServer(s, healthServer) go func() { logrus.Debugf("containerd: grpc api on %s", address) if err := s.Serve(l); err != nil { logrus.WithField("error", err).Fatal("containerd: serve grpc") } }() return s, nil } func checkLimits() error { var l syscall.Rlimit if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l); err != nil { return err } if l.Cur <= minRlimit { logrus.WithFields(logrus.Fields{ "current": l.Cur, "max": l.Max, }).Warn("containerd: low RLIMIT_NOFILE changing to max") l.Cur = l.Max return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) } return nil } func debugMetrics(interval time.Duration, graphiteAddr string) error { for name, m := range supervisor.Metrics() { if err := metrics.DefaultRegistry.Register(name, m); err != nil { return err } } processMetrics() if graphiteAddr != "" { addr, err := net.ResolveTCPAddr("tcp", graphiteAddr) if err != nil { return err } go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr) } else { l := log.New(os.Stdout, "[containerd] ", log.LstdFlags) go metrics.Log(metrics.DefaultRegistry, interval, l) } return nil } docker-containerd-tags-docker-1.13.1/containerd/main_linux.go000066400000000000000000000020431304421264600242120ustar00rootroot00000000000000package main import ( "os" "runtime" "time" "github.com/Sirupsen/logrus" "github.com/cloudfoundry/gosigar" "github.com/docker/containerd/osutils" "github.com/rcrowley/go-metrics" ) func processMetrics() { var ( g = metrics.NewGauge() fg = metrics.NewGauge() memg = metrics.NewGauge() ) metrics.DefaultRegistry.Register("goroutines", g) metrics.DefaultRegistry.Register("fds", fg) metrics.DefaultRegistry.Register("memory-used", memg) collect := func() { // update number of goroutines g.Update(int64(runtime.NumGoroutine())) // collect the number of open fds fds, err := osutils.GetOpenFds(os.Getpid()) if err != nil { logrus.WithField("error", err).Error("containerd: get open fd count") } fg.Update(int64(fds)) // get the memory used m := sigar.ProcMem{} if err := m.Get(os.Getpid()); err != nil { logrus.WithField("error", err).Error("containerd: get pid memory information") } memg.Update(int64(m.Size)) } go func() { collect() for range time.Tick(30 * time.Second) { collect() } }() } docker-containerd-tags-docker-1.13.1/containerd/main_solaris.go000066400000000000000000000000501304421264600245230ustar00rootroot00000000000000package main func processMetrics() { } docker-containerd-tags-docker-1.13.1/ctr/000077500000000000000000000000001304421264600201635ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/ctr/Makefile000066400000000000000000000000171304421264600216210ustar00rootroot00000000000000all: go build docker-containerd-tags-docker-1.13.1/ctr/checkpoint_linux.go000066400000000000000000000100271304421264600240600ustar00rootroot00000000000000package main import ( "fmt" "os" "text/tabwriter" "github.com/codegangsta/cli" "github.com/docker/containerd/api/grpc/types" netcontext "golang.org/x/net/context" ) var checkpointSubCmds = []cli.Command{ listCheckpointCommand, createCheckpointCommand, deleteCheckpointCommand, } var checkpointCommand = cli.Command{ Name: "checkpoints", Usage: "list all checkpoints", ArgsUsage: "COMMAND [arguments...]", Subcommands: checkpointSubCmds, Description: func() string { desc := "\n COMMAND:\n" for _, command := range checkpointSubCmds { desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) } return desc }(), Action: listCheckpoints, } var listCheckpointCommand = cli.Command{ Name: "list", Usage: "list all checkpoints for a container", Action: listCheckpoints, Flags: []cli.Flag{ cli.StringFlag{ Name: "checkpoint-dir", Value: "", Usage: "path to checkpoint directory", }, }, } func listCheckpoints(context *cli.Context) { var ( c = getClient(context) id = context.Args().First() ) if id == "" { fatal("container id cannot be empty", ExitStatusMissingArg) } resp, err := c.ListCheckpoint(netcontext.Background(), &types.ListCheckpointRequest{ Id: id, CheckpointDir: context.String("checkpoint-dir"), }) if err != nil { fatal(err.Error(), 1) } w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) fmt.Fprint(w, "NAME\tTCP\tUNIX SOCKETS\tSHELL\n") for _, c := range resp.Checkpoints { fmt.Fprintf(w, "%s\t%v\t%v\t%v\n", c.Name, c.Tcp, c.UnixSockets, c.Shell) } if err := w.Flush(); err != nil { fatal(err.Error(), 1) } } var createCheckpointCommand = cli.Command{ Name: "create", Usage: "create a new checkpoint for the container", Flags: []cli.Flag{ cli.BoolFlag{ Name: "tcp", Usage: "persist open tcp connections", }, cli.BoolFlag{ Name: "unix-sockets", Usage: "persist unix sockets", }, cli.BoolFlag{ Name: "exit", Usage: "exit the container after the checkpoint completes successfully", }, cli.BoolFlag{ Name: "shell", Usage: "checkpoint shell jobs", }, cli.StringFlag{ Name: "checkpoint-dir", Value: "", Usage: "directory to store checkpoints", }, cli.StringSliceFlag{ Name: "empty-ns", Usage: "create a namespace, but don't restore its properties", }, }, Action: func(context *cli.Context) { var ( containerID = context.Args().Get(0) name = context.Args().Get(1) ) if containerID == "" { fatal("container id at cannot be empty", ExitStatusMissingArg) } if name == "" { fatal("checkpoint name cannot be empty", ExitStatusMissingArg) } c := getClient(context) checkpoint := types.Checkpoint{ Name: name, Exit: context.Bool("exit"), Tcp: context.Bool("tcp"), Shell: context.Bool("shell"), UnixSockets: context.Bool("unix-sockets"), } emptyNSes := context.StringSlice("empty-ns") checkpoint.EmptyNS = append(checkpoint.EmptyNS, emptyNSes...) if _, err := c.CreateCheckpoint(netcontext.Background(), &types.CreateCheckpointRequest{ Id: containerID, CheckpointDir: context.String("checkpoint-dir"), Checkpoint: &checkpoint, }); err != nil { fatal(err.Error(), 1) } }, } var deleteCheckpointCommand = cli.Command{ Name: "delete", Usage: "delete a container's checkpoint", Flags: []cli.Flag{ cli.StringFlag{ Name: "checkpoint-dir", Value: "", Usage: "path to checkpoint directory", }, }, Action: func(context *cli.Context) { var ( containerID = context.Args().Get(0) name = context.Args().Get(1) ) if containerID == "" { fatal("container id at cannot be empty", ExitStatusMissingArg) } if name == "" { fatal("checkpoint name cannot be empty", ExitStatusMissingArg) } c := getClient(context) if _, err := c.DeleteCheckpoint(netcontext.Background(), &types.DeleteCheckpointRequest{ Id: containerID, Name: name, CheckpointDir: context.String("checkpoint-dir"), }); err != nil { fatal(err.Error(), 1) } }, } docker-containerd-tags-docker-1.13.1/ctr/checkpoint_solaris.go000066400000000000000000000014401304421264600243740ustar00rootroot00000000000000package main import ( "fmt" "github.com/codegangsta/cli" ) var checkpointSubCmds = []cli.Command{ listCheckpointCommand, } var checkpointCommand = cli.Command{ Name: "checkpoints", Usage: "list all checkpoints", ArgsUsage: "COMMAND [arguments...]", Subcommands: checkpointSubCmds, Description: func() string { desc := "\n COMMAND:\n" for _, command := range checkpointSubCmds { desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) } return desc }(), Action: listCheckpoints, } var listCheckpointCommand = cli.Command{ Name: "list", Usage: "list all checkpoints for a container", Action: listCheckpoints, } func listCheckpoints(context *cli.Context) { fatal("checkpoint command is not supported on Solaris", ExitStatusUnsupported) } docker-containerd-tags-docker-1.13.1/ctr/const.go000066400000000000000000000004721304421264600216430ustar00rootroot00000000000000package main // ctr wide constants const ( // ExitStatusOK indicates successful completion ExitStatusOK = 0 // ExitStatusMissingArg indicates failure due to missing argument(s) ExitStatusMissingArg = 1 // ExitStatusUnsupported indicates failure due to unsupported subcommand(s) ExitStatusUnsupported = 2 ) docker-containerd-tags-docker-1.13.1/ctr/container.go000066400000000000000000000411261304421264600225000ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "io" "io/ioutil" "log" "net" "os" "os/signal" "path/filepath" "strconv" "strings" "syscall" "text/tabwriter" "time" "github.com/codegangsta/cli" "github.com/docker/containerd/api/grpc/types" "github.com/docker/containerd/specs" "github.com/docker/docker/pkg/term" "github.com/golang/protobuf/ptypes" netcontext "golang.org/x/net/context" "golang.org/x/sys/unix" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/transport" ) // TODO: parse flags and pass opts func getClient(ctx *cli.Context) types.APIClient { // Parse proto://address form addresses. bindSpec := ctx.GlobalString("address") bindParts := strings.SplitN(bindSpec, "://", 2) if len(bindParts) != 2 { fatal(fmt.Sprintf("bad bind address format %s, expected proto://address", bindSpec), 1) } // reset the logger for grpc to log to dev/null so that it does not mess with our stdio grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(ctx.GlobalDuration("conn-timeout"))} dialOpts = append(dialOpts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(bindParts[0], bindParts[1], timeout) }, )) conn, err := grpc.Dial(bindSpec, dialOpts...) if err != nil { fatal(err.Error(), 1) } return types.NewAPIClient(conn) } var contSubCmds = []cli.Command{ execCommand, killCommand, listCommand, pauseCommand, resumeCommand, startCommand, stateCommand, statsCommand, watchCommand, updateCommand, } var containersCommand = cli.Command{ Name: "containers", Usage: "interact with running containers", ArgsUsage: "COMMAND [arguments...]", Subcommands: contSubCmds, Description: func() string { desc := "\n COMMAND:\n" for _, command := range contSubCmds { desc += fmt.Sprintf(" %-10.10s%s\n", command.Name, command.Usage) } return desc }(), Action: listContainers, } var stateCommand = cli.Command{ Name: "state", Usage: "get a raw dump of the containerd state", Action: func(context *cli.Context) { c := getClient(context) resp, err := c.State(netcontext.Background(), &types.StateRequest{ Id: context.Args().First(), }) if err != nil { fatal(err.Error(), 1) } data, err := json.Marshal(resp) if err != nil { fatal(err.Error(), 1) } fmt.Print(string(data)) }, } var listCommand = cli.Command{ Name: "list", Usage: "list all running containers", Action: listContainers, } func listContainers(context *cli.Context) { c := getClient(context) resp, err := c.State(netcontext.Background(), &types.StateRequest{ Id: context.Args().First(), }) if err != nil { fatal(err.Error(), 1) } w := tabwriter.NewWriter(os.Stdout, 20, 1, 3, ' ', 0) fmt.Fprint(w, "ID\tPATH\tSTATUS\tPROCESSES\n") sortContainers(resp.Containers) for _, c := range resp.Containers { procs := []string{} for _, p := range c.Processes { procs = append(procs, p.Pid) } fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", c.Id, c.BundlePath, c.Status, strings.Join(procs, ",")) } if err := w.Flush(); err != nil { fatal(err.Error(), 1) } } var startCommand = cli.Command{ Name: "start", Usage: "start a container", ArgsUsage: "ID BundlePath", Flags: []cli.Flag{ cli.StringFlag{ Name: "checkpoint,c", Value: "", Usage: "checkpoint to start the container from", }, cli.StringFlag{ Name: "checkpoint-dir", Value: "", Usage: "path to checkpoint directory", }, cli.BoolFlag{ Name: "attach,a", Usage: "connect to the stdio of the container", }, cli.StringSliceFlag{ Name: "label,l", Value: &cli.StringSlice{}, Usage: "set labels for the container", }, cli.BoolFlag{ Name: "no-pivot", Usage: "do not use pivot root", }, cli.StringFlag{ Name: "runtime,r", Value: "runc", Usage: "name or path of the OCI compliant runtime to use when executing containers", }, cli.StringSliceFlag{ Name: "runtime-args", Value: &cli.StringSlice{}, Usage: "specify additional runtime args", }, }, Action: func(context *cli.Context) { var ( id = context.Args().Get(0) path = context.Args().Get(1) ) if path == "" { fatal("bundle path cannot be empty", ExitStatusMissingArg) } if id == "" { fatal("container id cannot be empty", ExitStatusMissingArg) } bpath, err := filepath.Abs(path) if err != nil { fatal(fmt.Sprintf("cannot get the absolute path of the bundle: %v", err), 1) } s, tmpDir, err := createStdio() defer func() { if tmpDir != "" { os.RemoveAll(tmpDir) } }() if err != nil { fatal(err.Error(), 1) } var ( restoreAndCloseStdin func() tty bool c = getClient(context) r = &types.CreateContainerRequest{ Id: id, BundlePath: bpath, Checkpoint: context.String("checkpoint"), CheckpointDir: context.String("checkpoint-dir"), Stdin: s.stdin, Stdout: s.stdout, Stderr: s.stderr, Labels: context.StringSlice("label"), NoPivotRoot: context.Bool("no-pivot"), Runtime: context.String("runtime"), RuntimeArgs: context.StringSlice("runtime-args"), } ) restoreAndCloseStdin = func() { if state != nil { term.RestoreTerminal(os.Stdin.Fd(), state) } if stdin != nil { stdin.Close() } } defer restoreAndCloseStdin() if context.Bool("attach") { mkterm, err := readTermSetting(bpath) if err != nil { fatal(err.Error(), 1) } tty = mkterm if mkterm { s, err := term.SetRawTerminal(os.Stdin.Fd()) if err != nil { fatal(err.Error(), 1) } state = s } if err := attachStdio(s); err != nil { fatal(err.Error(), 1) } } events, err := c.Events(netcontext.Background(), &types.EventsRequest{}) if err != nil { fatal(err.Error(), 1) } if _, err := c.CreateContainer(netcontext.Background(), r); err != nil { fatal(err.Error(), 1) } if context.Bool("attach") { go func() { io.Copy(stdin, os.Stdin) if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ Id: id, Pid: "init", CloseStdin: true, }); err != nil { fatal(err.Error(), 1) } restoreAndCloseStdin() }() if tty { resize(id, "init", c) go func() { s := make(chan os.Signal, 64) signal.Notify(s, syscall.SIGWINCH) for range s { if err := resize(id, "init", c); err != nil { log.Println(err) } } }() } waitForExit(c, events, id, "init", restoreAndCloseStdin) } }, } func resize(id, pid string, c types.APIClient) error { ws, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { return err } if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ Id: id, Pid: "init", Width: uint32(ws.Width), Height: uint32(ws.Height), }); err != nil { return err } return nil } var ( stdin io.WriteCloser state *term.State ) // readTermSetting reads the Terminal option out of the specs configuration // to know if ctr should allocate a pty func readTermSetting(path string) (bool, error) { f, err := os.Open(filepath.Join(path, "config.json")) if err != nil { return false, err } defer f.Close() var spec specs.Spec if err := json.NewDecoder(f).Decode(&spec); err != nil { return false, err } return spec.Process.Terminal, nil } func attachStdio(s stdio) error { stdinf, err := os.OpenFile(s.stdin, syscall.O_RDWR, 0) if err != nil { return err } // FIXME: assign to global stdin = stdinf stdoutf, err := os.OpenFile(s.stdout, syscall.O_RDWR, 0) if err != nil { return err } go io.Copy(os.Stdout, stdoutf) stderrf, err := os.OpenFile(s.stderr, syscall.O_RDWR, 0) if err != nil { return err } go io.Copy(os.Stderr, stderrf) return nil } var watchCommand = cli.Command{ Name: "watch", Usage: "print container events", Action: func(context *cli.Context) { c := getClient(context) id := context.Args().First() if id != "" { resp, err := c.State(netcontext.Background(), &types.StateRequest{Id: id}) if err != nil { fatal(err.Error(), 1) } for _, c := range resp.Containers { if c.Id == id { break } } if id == "" { fatal("Invalid container id", 1) } } events, reqErr := c.Events(netcontext.Background(), &types.EventsRequest{}) if reqErr != nil { fatal(reqErr.Error(), 1) } for { e, err := events.Recv() if err != nil { fatal(err.Error(), 1) } if id == "" || e.Id == id { fmt.Printf("%#v\n", e) } } }, } var pauseCommand = cli.Command{ Name: "pause", Usage: "pause a container", Action: func(context *cli.Context) { id := context.Args().First() if id == "" { fatal("container id cannot be empty", ExitStatusMissingArg) } c := getClient(context) _, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{ Id: id, Pid: "init", Status: "paused", }) if err != nil { fatal(err.Error(), 1) } }, } var resumeCommand = cli.Command{ Name: "resume", Usage: "resume a paused container", Action: func(context *cli.Context) { id := context.Args().First() if id == "" { fatal("container id cannot be empty", ExitStatusMissingArg) } c := getClient(context) _, err := c.UpdateContainer(netcontext.Background(), &types.UpdateContainerRequest{ Id: id, Pid: "init", Status: "running", }) if err != nil { fatal(err.Error(), 1) } }, } var killCommand = cli.Command{ Name: "kill", Usage: "send a signal to a container or its processes", Flags: []cli.Flag{ cli.StringFlag{ Name: "pid,p", Value: "init", Usage: "pid of the process to signal within the container", }, cli.IntFlag{ Name: "signal,s", Value: 15, Usage: "signal to send to the container", }, }, Action: func(context *cli.Context) { id := context.Args().First() if id == "" { fatal("container id cannot be empty", ExitStatusMissingArg) } c := getClient(context) if _, err := c.Signal(netcontext.Background(), &types.SignalRequest{ Id: id, Pid: context.String("pid"), Signal: uint32(context.Int("signal")), }); err != nil { fatal(err.Error(), 1) } }, } var execCommand = cli.Command{ Name: "exec", Usage: "exec another process in an existing container", Flags: []cli.Flag{ cli.StringFlag{ Name: "id", Usage: "container id to add the process to", }, cli.StringFlag{ Name: "pid", Usage: "process id for the new process", }, cli.BoolFlag{ Name: "attach,a", Usage: "connect to the stdio of the container", }, cli.StringFlag{ Name: "cwd", Usage: "current working directory for the process", }, cli.BoolFlag{ Name: "tty,t", Usage: "create a terminal for the process", }, cli.StringSliceFlag{ Name: "env,e", Value: &cli.StringSlice{}, Usage: "environment variables for the process", }, cli.IntFlag{ Name: "uid,u", Usage: "user id of the user for the process", }, cli.IntFlag{ Name: "gid,g", Usage: "group id of the user for the process", }, }, Action: func(context *cli.Context) { var restoreAndCloseStdin func() p := &types.AddProcessRequest{ Id: context.String("id"), Pid: context.String("pid"), Args: context.Args(), Cwd: context.String("cwd"), Terminal: context.Bool("tty"), Env: context.StringSlice("env"), User: &types.User{ Uid: uint32(context.Int("uid")), Gid: uint32(context.Int("gid")), }, } s, tmpDir, err := createStdio() defer func() { if tmpDir != "" { os.RemoveAll(tmpDir) } }() if err != nil { fatal(err.Error(), 1) } p.Stdin = s.stdin p.Stdout = s.stdout p.Stderr = s.stderr restoreAndCloseStdin = func() { if state != nil { term.RestoreTerminal(os.Stdin.Fd(), state) } if stdin != nil { stdin.Close() } } defer restoreAndCloseStdin() if context.Bool("attach") { if context.Bool("tty") { s, err := term.SetRawTerminal(os.Stdin.Fd()) if err != nil { fatal(err.Error(), 1) } state = s } if err := attachStdio(s); err != nil { fatal(err.Error(), 1) } } c := getClient(context) events, err := c.Events(netcontext.Background(), &types.EventsRequest{}) if err != nil { fatal(err.Error(), 1) } if _, err := c.AddProcess(netcontext.Background(), p); err != nil { fatal(err.Error(), 1) } if context.Bool("attach") { go func() { io.Copy(stdin, os.Stdin) if _, err := c.UpdateProcess(netcontext.Background(), &types.UpdateProcessRequest{ Id: p.Id, Pid: p.Pid, CloseStdin: true, }); err != nil { log.Println(err) } restoreAndCloseStdin() }() if context.Bool("tty") { resize(p.Id, p.Pid, c) go func() { s := make(chan os.Signal, 64) signal.Notify(s, syscall.SIGWINCH) for range s { if err := resize(p.Id, p.Pid, c); err != nil { log.Println(err) } } }() } waitForExit(c, events, context.String("id"), context.String("pid"), restoreAndCloseStdin) } }, } var statsCommand = cli.Command{ Name: "stats", Usage: "get stats for running container", Action: func(context *cli.Context) { req := &types.StatsRequest{ Id: context.Args().First(), } c := getClient(context) stats, err := c.Stats(netcontext.Background(), req) if err != nil { fatal(err.Error(), 1) } data, err := json.Marshal(stats) if err != nil { fatal(err.Error(), 1) } fmt.Print(string(data)) }, } func getUpdateCommandInt64Flag(context *cli.Context, name string) uint64 { str := context.String(name) if str == "" { return 0 } val, err := strconv.ParseUint(str, 0, 64) if err != nil { fatal(err.Error(), 1) } return val } var updateCommand = cli.Command{ Name: "update", Usage: "update a containers resources", Flags: []cli.Flag{ cli.StringFlag{ Name: "memory-limit", }, cli.StringFlag{ Name: "memory-reservation", }, cli.StringFlag{ Name: "memory-swap", }, cli.StringFlag{ Name: "cpu-quota", }, cli.StringFlag{ Name: "cpu-period", }, cli.StringFlag{ Name: "kernel-limit", }, cli.StringFlag{ Name: "kernel-tcp-limit", }, cli.StringFlag{ Name: "blkio-weight", }, cli.StringFlag{ Name: "cpuset-cpus", }, cli.StringFlag{ Name: "cpuset-mems", }, }, Action: func(context *cli.Context) { req := &types.UpdateContainerRequest{ Id: context.Args().First(), } req.Resources = &types.UpdateResource{} req.Resources.MemoryLimit = getUpdateCommandInt64Flag(context, "memory-limit") req.Resources.MemoryReservation = getUpdateCommandInt64Flag(context, "memory-reservation") req.Resources.MemorySwap = getUpdateCommandInt64Flag(context, "memory-swap") req.Resources.BlkioWeight = getUpdateCommandInt64Flag(context, "blkio-weight") req.Resources.CpuPeriod = getUpdateCommandInt64Flag(context, "cpu-period") req.Resources.CpuQuota = getUpdateCommandInt64Flag(context, "cpu-quota") req.Resources.CpuShares = getUpdateCommandInt64Flag(context, "cpu-shares") req.Resources.CpusetCpus = context.String("cpuset-cpus") req.Resources.CpusetMems = context.String("cpuset-mems") req.Resources.KernelMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-limit") req.Resources.KernelTCPMemoryLimit = getUpdateCommandInt64Flag(context, "kernel-tcp-limit") c := getClient(context) if _, err := c.UpdateContainer(netcontext.Background(), req); err != nil { fatal(err.Error(), 1) } }, } func waitForExit(c types.APIClient, events types.API_EventsClient, id, pid string, closer func()) { timestamp := time.Now() for { e, err := events.Recv() if err != nil { if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc { closer() os.Exit(128 + int(syscall.SIGHUP)) } time.Sleep(1 * time.Second) tsp, err := ptypes.TimestampProto(timestamp) if err != nil { closer() fmt.Fprintf(os.Stderr, "%s", err.Error()) os.Exit(1) } events, _ = c.Events(netcontext.Background(), &types.EventsRequest{Timestamp: tsp}) continue } timestamp, err = ptypes.Timestamp(e.Timestamp) if e.Id == id && e.Type == "exit" && e.Pid == pid { closer() os.Exit(int(e.Status)) } } } type stdio struct { stdin string stdout string stderr string } func createStdio() (s stdio, tmp string, err error) { tmp, err = ioutil.TempDir("", "ctr-") if err != nil { return s, tmp, err } // create fifo's for the process for name, fd := range map[string]*string{ "stdin": &s.stdin, "stdout": &s.stdout, "stderr": &s.stderr, } { path := filepath.Join(tmp, name) if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { return s, tmp, err } *fd = path } return s, tmp, nil } docker-containerd-tags-docker-1.13.1/ctr/events.go000066400000000000000000000026261304421264600220240ustar00rootroot00000000000000package main import ( "fmt" "os" "text/tabwriter" "time" "github.com/codegangsta/cli" "github.com/docker/containerd/api/grpc/types" "github.com/golang/protobuf/ptypes" netcontext "golang.org/x/net/context" ) var eventsCommand = cli.Command{ Name: "events", Usage: "receive events from the containerd daemon", Flags: []cli.Flag{ cli.StringFlag{ Name: "timestamp,t", Usage: "get events from a specific time stamp in RFC3339Nano format", }, }, Action: func(context *cli.Context) { var ( t = time.Time{} c = getClient(context) ) if ts := context.String("timestamp"); ts != "" { from, err := time.Parse(time.RFC3339Nano, ts) if err != nil { fatal(err.Error(), 1) } t = from } tsp, err := ptypes.TimestampProto(t) if err != nil { fatal(err.Error(), 1) } events, err := c.Events(netcontext.Background(), &types.EventsRequest{ Timestamp: tsp, }) if err != nil { fatal(err.Error(), 1) } w := tabwriter.NewWriter(os.Stdout, 31, 1, 1, ' ', 0) fmt.Fprint(w, "TIME\tTYPE\tID\tPID\tSTATUS\n") w.Flush() for { e, err := events.Recv() if err != nil { fatal(err.Error(), 1) } t, err := ptypes.Timestamp(e.Timestamp) if err != nil { fmt.Fprintf(os.Stderr, "Unable to convert timestamp") t = time.Time{} } fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\n", t.Format(time.RFC3339Nano), e.Type, e.Id, e.Pid, e.Status) w.Flush() } }, } docker-containerd-tags-docker-1.13.1/ctr/main.go000066400000000000000000000036451304421264600214460ustar00rootroot00000000000000package main import ( "fmt" "os" "time" netcontext "golang.org/x/net/context" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/docker/containerd" "github.com/docker/containerd/api/grpc/types" ) const usage = `High performance container daemon cli` type exit struct { Code int } func main() { // We want our defer functions to be run when calling fatal() defer func() { if e := recover(); e != nil { if ex, ok := e.(exit); ok == true { os.Exit(ex.Code) } panic(e) } }() app := cli.NewApp() app.Name = "ctr" if containerd.GitCommit != "" { app.Version = fmt.Sprintf("%s commit: %s", containerd.Version, containerd.GitCommit) } else { app.Version = containerd.Version } app.Usage = usage app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "debug", Usage: "enable debug output in the logs", }, cli.StringFlag{ Name: "address", Value: "unix:///run/containerd/containerd.sock", Usage: "proto://address of GRPC API", }, cli.DurationFlag{ Name: "conn-timeout", Value: 1 * time.Second, Usage: "GRPC connection timeout", }, } app.Commands = []cli.Command{ checkpointCommand, containersCommand, eventsCommand, stateCommand, versionCommand, } app.Before = func(context *cli.Context) error { if context.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } return nil } if err := app.Run(os.Args); err != nil { logrus.Fatal(err) } } var versionCommand = cli.Command{ Name: "version", Usage: "return the daemon version", Action: func(context *cli.Context) { c := getClient(context) resp, err := c.GetServerVersion(netcontext.Background(), &types.GetServerVersionRequest{}) if err != nil { fatal(err.Error(), 1) } fmt.Printf("daemon version %d.%d.%d commit: %s\n", resp.Major, resp.Minor, resp.Patch, resp.Revision) }, } func fatal(err string, code int) { fmt.Fprintf(os.Stderr, "[ctr] %s\n", err) panic(exit{code}) } docker-containerd-tags-docker-1.13.1/ctr/sort.go000066400000000000000000000006611304421264600215040ustar00rootroot00000000000000package main import ( "sort" "github.com/docker/containerd/api/grpc/types" ) func sortContainers(c []*types.Container) { sort.Sort(&containerSorter{c}) } type containerSorter struct { c []*types.Container } func (s *containerSorter) Len() int { return len(s.c) } func (s *containerSorter) Swap(i, j int) { s.c[i], s.c[j] = s.c[j], s.c[i] } func (s *containerSorter) Less(i, j int) bool { return s.c[i].Id < s.c[j].Id } docker-containerd-tags-docker-1.13.1/docs/000077500000000000000000000000001304421264600203235ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/docs/api.md000066400000000000000000000006761304421264600214270ustar00rootroot00000000000000# API The API for containerd is with GRPC over a unix socket located at the default location of `/run/containerd/containerd.sock`. At this time please refer to the [proto at](https://github.com/docker/containerd/blob/master/api/grpc/types/api.proto) for the API methods and types. There is a Go implementation and types checked into this repository but alternate language implementations can be created using the grpc and protoc toolchain. docker-containerd-tags-docker-1.13.1/docs/attach.md000066400000000000000000000041501304421264600221110ustar00rootroot00000000000000# Attaching to STDIO or TTY The model for STDIO, TTY, and logging is a little different in containerd. Because of the various methods that consumers want on the logging side these types of decisions are pushed to the client. Containerd API is developed for access on a single host therefore many things like paths on the host system are acceptable in the API. For the STDIO model the client requesting to start a container provides the paths for the IO. ## Logging If no options are specified on create all STDIO of the processes launched by containerd will be sent to `/dev/null`. If you want containerd to send the STDIO of the processes to a file, you can pass paths to the files in the create container method defined by this proto in the stdin, stdout, and stderr fields: ```proto message CreateContainerRequest { string id = 1; // ID of container string bundlePath = 2; // path to OCI bundle string stdin = 3; // path to the file where stdin will be read (optional) string stdout = 4; // path to file where stdout will be written (optional) string stderr = 5; // path to file where stderr will be written (optional) string console = 6; // path to the console for a container (optional) string checkpoint = 7; // checkpoint name if you want to create immediate checkpoint (optional) } ``` ## Attach In order to have attach like functionality for your containers you use the same API request but named pipes or fifos can be used to achieve this type of functionality. The default CLI for containerd does this if you specify the `--attach` flag on `create` or `start`. It will create fifos for each of the containers stdio which the CLI can read and write to. This can be used to create an interactive session with the container, `bash` for example, or to have a blocking way to collect the container's STDIO and forward it to your logging facilities. ## TTY The tty model is the same as above only the client creates a pty and provides to other side to containerd in the create request in the `console` field. Containerd will provide the pty to the container to use and the session can be opened with the container after it starts. docker-containerd-tags-docker-1.13.1/docs/bundle-changes.md000066400000000000000000000012201304421264600235170ustar00rootroot00000000000000# containerd changes to the bundle Containerd will make changes to the container's bundle by adding additional files or folders by default with options to change the output. The current change that it makes is if you create a checkpoint of a container, the checkpoints will be saved by default in the container bundle at `{bundle}/checkpoints/{checkpoint name}`. A user can also populate this directory and provide the checkpoint name on the create request so that the container is started from this checkpoint. As of this point, containerd has no other additions to the bundle. Runtime state is currently stored in a tmpfs filesystem like `/run`. docker-containerd-tags-docker-1.13.1/docs/bundle.md000066400000000000000000000076341304421264600221300ustar00rootroot00000000000000# Creating OCI bundles Since containerd consumes the OCI bundle format containers and configuration will have to be created on the machine that containerd is running on. The easiest way to do this is to download an image with docker and export it. ## Setup First thing we need to do to create a bundle is setup the initial directory structure. Create a directory with a unique name. In this example we will create a redis container. We will create this container in a `/containers` directory. ```bash mkdir redis ``` Inside the `redis` directory create another directory named `rootfs` ```bash mkdir redis/rootfs ``` ## Root Filesystem Now we need to populate the `rootfs` directory with the filesystem of a redis container. To do this we need to pull the redis image with docker and export its contents to the `rootfs` directory. ```bash docker pull redis # create the container with a temp name so that we can export it docker create --name tempredis redis # export it into the rootfs directory docker export tempredis | tar -C redis/rootfs -xf - # remove the container now that we have exported docker rm tempredis ``` Now that we have the root filesystem populated we need to create the configs for the container. ## Configs An easy way to get temp configs for the container bundle is to use the `runc` cli tool from the [runc](https://github.com/opencontainers/runc) repository. You need to `cd` into the `redis` directory and run the `runc spec` command. After doing this you should have a file `config.json` created. The directory structure should look like this: ``` /containers/redis ├── config.json └── rootfs/ ``` ## Edits We need to edit the config to add `redis-server` as the application to launch inside the container, and remove the network namespace so that you can connect to the redis server on your system. The resulting `config.json` should look like this: ```json { "ociVersion": "0.4.0", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": true, "user": {}, "args": [ "redis-server", "--bind", "0.0.0.0" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "capabilities": [ "CAP_AUDIT_WRITE", "CAP_KILL", "CAP_NET_BIND_SERVICE" ], "rlimits": [ { "type": "RLIMIT_NOFILE", "hard": 1024, "soft": 1024 } ], "noNewPrivileges": true }, "root": { "path": "rootfs", "readonly": true }, "hostname": "runc", "mounts": [ { "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] } ], "hooks": {}, "linux": { "resources": { "devices": [ { "allow": false, "access": "rwm" } ] }, "namespaces": [ { "type": "pid" }, { "type": "ipc" }, { "type": "uts" }, { "type": "mount" } ], "devices": null } } ``` This is what you need to do to make a OCI compliant bundle for containerd to start. docker-containerd-tags-docker-1.13.1/docs/cli.md000066400000000000000000000122331304421264600214150ustar00rootroot00000000000000# Client CLI There is a default cli named `ctr` based on the GRPC api. This cli will allow you to create and manage containers run with containerd. ``` $ ctr -h NAME: ctr - High performance container daemon cli USAGE: ctr [global options] command [command options] [arguments...] VERSION: 0.1.0 commit: 54c213e8a719d734001beb2cb8f130c84cc3bd20 COMMANDS: checkpoints list all checkpoints containers interact with running containers events receive events from the containerd daemon state get a raw dump of the containerd state help, h Shows a list of commands or help for one command GLOBAL OPTIONS: --debug enable debug output in the logs --address "/run/containerd/containerd.sock" address of GRPC API --help, -h show help --version, -v print the version ``` ## Starting a container ``` $ ctr containers start -h NAME: ctr containers start - start a container USAGE: ctr containers start [command options] [arguments...] OPTIONS: --checkpoint, -c checkpoint to start the container from --attach, -a connect to the stdio of the container --label, -l [--label option --label option] set labels for the container ``` ```bash $ sudo ctr containers start redis /containers/redis ``` `/containers/redis` is the path to an OCI bundle. [See the bundle docs for more information.](bundle.md) ## Listing containers ```bash $ sudo ctr containers ID PATH STATUS PROCESSES 1 /containers/redis running 14063 19 /containers/redis running 14100 14 /containers/redis running 14117 4 /containers/redis running 14030 16 /containers/redis running 14061 3 /containers/redis running 14024 12 /containers/redis running 14097 10 /containers/redis running 14131 18 /containers/redis running 13977 13 /containers/redis running 13979 15 /containers/redis running 13998 5 /containers/redis running 14021 9 /containers/redis running 14075 6 /containers/redis running 14107 2 /containers/redis running 14135 11 /containers/redis running 13978 17 /containers/redis running 13989 8 /containers/redis running 14053 7 /containers/redis running 14022 0 /containers/redis running 14006 ``` ## Kill a container's process ``` $ ctr containers kill -h NAME: ctr containers kill - send a signal to a container or its processes USAGE: ctr containers kill [command options] [arguments...] OPTIONS: --pid, -p "init" pid of the process to signal within the container --signal, -s "15" signal to send to the container ``` ## Exec another process into a container ``` $ ctr containers exec -h NAME: ctr containers exec - exec another process in an existing container USAGE: ctr containers exec [command options] [arguments...] OPTIONS: --id container id to add the process to --pid process id for the new process --attach, -a connect to the stdio of the container --cwd current working directory for the process --tty, -t create a terminal for the process --env, -e [--env option --env option] environment variables for the process --uid, -u "0" user id of the user for the process --gid, -g "0" group id of the user for the process ``` ## Stats for a container ``` $ ctr containers stats -h NAME: ctr containers stats - get stats for running container USAGE: ctr containers stats [arguments...] ``` ## List checkpoints ``` $ sudo ctr checkpoints redis NAME TCP UNIX SOCKETS SHELL test false false false test2 false false false ``` ## Create a new checkpoint ``` $ ctr checkpoints create -h NAME: ctr checkpoints create - create a new checkpoint for the container USAGE: ctr checkpoints create [command options] [arguments...] OPTIONS: --tcp persist open tcp connections --unix-sockets perist unix sockets --exit exit the container after the checkpoint completes successfully --shell checkpoint shell jobs ``` ## Get events ``` $ sudo ctr events TYPE ID PID STATUS exit redis 24761 0 ``` docker-containerd-tags-docker-1.13.1/docs/daemon.md000066400000000000000000000020271304421264600221110ustar00rootroot00000000000000# Daemon options ``` $ containerd -h NAME: containerd - High performance container daemon USAGE: containerd [global options] command [command options] [arguments...] VERSION: 0.1.0 commit: 54c213e8a719d734001beb2cb8f130c84cc3bd20 COMMANDS: help, h Shows a list of commands or help for one command GLOBAL OPTIONS: --debug enable debug output in the logs --state-dir "/run/containerd" runtime state directory --metrics-interval "5m0s" interval for flushing metrics to the store --listen, -l "/run/containerd/containerd.sock" Address on which GRPC API will listen --runtime, -r "runc" name of the OCI compliant runtime to use when executing containers --graphite-address Address of graphite server --help, -h show help --version, -v print the version ``` docker-containerd-tags-docker-1.13.1/docs/telemetry.md000066400000000000000000000030561304421264600226630ustar00rootroot00000000000000# Telemetry Currently containerd only outputs metrics to stdout but will support dumping to various backends in the future. ``` [containerd] 2015/12/16 11:48:28 timer container-start-time [containerd] 2015/12/16 11:48:28 count: 22 [containerd] 2015/12/16 11:48:28 min: 25425883 [containerd] 2015/12/16 11:48:28 max: 113077691 [containerd] 2015/12/16 11:48:28 mean: 68386923.27 [containerd] 2015/12/16 11:48:28 stddev: 20928453.26 [containerd] 2015/12/16 11:48:28 median: 65489003.50 [containerd] 2015/12/16 11:48:28 75%: 82393210.50 [containerd] 2015/12/16 11:48:28 95%: 112267814.75 [containerd] 2015/12/16 11:48:28 99%: 113077691.00 [containerd] 2015/12/16 11:48:28 99.9%: 113077691.00 [containerd] 2015/12/16 11:48:28 1-min rate: 0.00 [containerd] 2015/12/16 11:48:28 5-min rate: 0.01 [containerd] 2015/12/16 11:48:28 15-min rate: 0.01 [containerd] 2015/12/16 11:48:28 mean rate: 0.03 [containerd] 2015/12/16 11:48:28 counter containers [containerd] 2015/12/16 11:48:28 count: 1 [containerd] 2015/12/16 11:48:28 counter events [containerd] 2015/12/16 11:48:28 count: 87 [containerd] 2015/12/16 11:48:28 counter events-subscribers [containerd] 2015/12/16 11:48:28 count: 2 [containerd] 2015/12/16 11:48:28 gauge goroutines [containerd] 2015/12/16 11:48:28 value: 38 [containerd] 2015/12/16 11:48:28 gauge fds [containerd] 2015/12/16 11:48:28 value: 18 ``` docker-containerd-tags-docker-1.13.1/hack/000077500000000000000000000000001304421264600203015ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/hack/.vendor-helpers.sh000077500000000000000000000054201304421264600236540ustar00rootroot00000000000000#!/usr/bin/env bash PROJECT=github.com/docker/containerd # Downloads dependencies into vendor/ directory mkdir -p vendor export GOPATH="$GOPATH:${PWD}/vendor" find='find' if [ "$(go env GOHOSTOS)" = 'windows' ]; then find='/usr/bin/find' fi clone() { local vcs="$1" local pkg="$2" local rev="$3" local url="$4" : ${url:=https://$pkg} local target="vendor/src/$pkg" echo -n "$pkg @ $rev: " if [ -d "$target" ]; then echo -n 'rm old, ' rm -rf "$target" fi echo -n 'clone, ' case "$vcs" in git) git clone --quiet --no-checkout "$url" "$target" ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" ) ;; hg) hg clone --quiet --updaterev "$rev" "$url" "$target" ;; esac echo -n 'rm VCS, ' ( cd "$target" && rm -rf .{git,hg} ) echo -n 'rm vendor, ' ( cd "$target" && rm -rf vendor Godeps/_workspace ) echo done } clean() { local packages=( "${PROJECT}/containerd" # package main "${PROJECT}/ctr" # package main "${PROJECT}/containerd-shim" # package main "${PROJECT}/integration-test" # package main ) local platforms=( linux/amd64 linux/386 windows/amd64 windows/386 darwin/amd64 ) local buildTagCombos=( 'libcontainer runc seccomp' ) echo echo -n 'collecting import graph, ' local IFS=$'\n' local imports=( $( for platform in "${platforms[@]}"; do export GOOS="${platform%/*}"; export GOARCH="${platform##*/}"; for buildTags in "${buildTagCombos[@]}"; do go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}" done done | grep -vE "^${PROJECT}" | sort -u ) ) imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") ) unset IFS echo -n 'pruning unused packages, ' findArgs=( # for some reason go list doesn't detect this as a dependency -path vendor/src/github.com/vdemeester/shakers ) for import in "${imports[@]}"; do [ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or ) findArgs+=( -path "vendor/src/$import" ) done local IFS=$'\n' local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) unset IFS for dir in "${prune[@]}"; do $find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';' rmdir "$dir" 2>/dev/null || true done echo -n 'pruning unused files, ' $find vendor -type f -name '*_test.go' -exec rm -v '{}' ';' echo done } # Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring fix_rewritten_imports () { local pkg="$1" local remove="${pkg}/Godeps/_workspace/src/" local target="vendor/src/$pkg" echo "$pkg: fixing rewritten imports" $find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \; } docker-containerd-tags-docker-1.13.1/hack/benchmark.go000066400000000000000000000033121304421264600225610ustar00rootroot00000000000000// single app that will run containers in containerd and output // the total time in seconds that it took for the execution. // go run benchmark.go -count 1000 -bundle /containers/redis package main import ( "flag" "net" "strconv" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/api/grpc/types" netcontext "golang.org/x/net/context" "google.golang.org/grpc" ) func init() { flag.StringVar(&bundle, "bundle", "/containers/redis", "the bundle path") flag.StringVar(&addr, "addr", "/run/containerd/containerd.sock", "address to the container d instance") flag.IntVar(&count, "count", 1000, "number of containers to run") flag.Parse() } var ( count int bundle, addr string group = sync.WaitGroup{} jobs = make(chan string, 20) ) func getClient() types.APIClient { dialOpts := []grpc.DialOption{grpc.WithInsecure()} dialOpts = append(dialOpts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }, )) conn, err := grpc.Dial(addr, dialOpts...) if err != nil { logrus.Fatal(err) } return types.NewAPIClient(conn) } func main() { client := getClient() for i := 0; i < 100; i++ { group.Add(1) go worker(client) } start := time.Now() for i := 0; i < count; i++ { id := strconv.Itoa(i) jobs <- id } close(jobs) group.Wait() end := time.Now() duration := end.Sub(start).Seconds() logrus.Info(duration) } func worker(client types.APIClient) { defer group.Done() for id := range jobs { if _, err := client.CreateContainer(netcontext.Background(), &types.CreateContainerRequest{ Id: id, BundlePath: bundle, }); err != nil { logrus.Error(err) } } } docker-containerd-tags-docker-1.13.1/hack/containerd.service000066400000000000000000000002741304421264600240140ustar00rootroot00000000000000[Unit] Description=containerd Documentation=https://containerd.tools After=network.target [Service] ExecStart=/usr/local/bin/containerd Delegate=yes [Install] WantedBy=multi-user.target docker-containerd-tags-docker-1.13.1/hack/validate-lint000077500000000000000000000002171304421264600227640ustar00rootroot00000000000000#!/bin/bash lint_error=$(golint ./... | grep -v vendor | grep -v .pb. | tee /dev/stderr) if [ "$lint_error" != "" ]; then exit 1 fi exit 0 docker-containerd-tags-docker-1.13.1/hack/vendor.sh000077500000000000000000000044461304421264600221450ustar00rootroot00000000000000#!/usr/bin/env bash set -e rm -rf vendor/ source 'hack/.vendor-helpers.sh' clone git github.com/Sirupsen/logrus 4b6ea7319e214d98c938f12692336f7ca9348d6b clone git github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05 clone git github.com/codegangsta/cli 9fec0fad02befc9209347cc6d620e68e1b45f74d clone git github.com/coreos/go-systemd 7b2428fec40033549c68f54e26e89e7ca9a9ce31 clone git github.com/cyberdelia/go-metrics-graphite 7e54b5c2aa6eaff4286c44129c3def899dff528c clone git github.com/docker/docker 2f6e3b0ba027b558adabd41344fee59db4441011 clone git github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444 clone git github.com/godbus/dbus e2cf28118e66a6a63db46cf6088a35d2054d3bb0 clone git github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 clone git github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a clone git github.com/opencontainers/runc 51371867a01c467f08af739783b8beafc154c4d7 https://github.com/docker/runc.git clone git github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c clone git github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43 clone git github.com/satori/go.uuid f9ab0dce87d815821e221626b772e3475a0d2749 clone git github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 clone git github.com/vishvananda/netlink adb0f53af689dd38f1443eba79489feaacf0b22e clone git github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe clone git golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git clone git golang.org/x/sys d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 https://github.com/golang/sys.git clone git google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go.git clone git github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1 clone git github.com/tonistiigi/fifo b45391ebcd3d282404092c04a2b015b37df12383 clone git github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 clone git github.com/go-check/check a625211d932a2a643d0d17352095f03fb7774663 https://github.com/cpuguy83/check.git # dependencies of docker/pkg/listeners clone git github.com/docker/go-connections v0.2.0 clone git github.com/Microsoft/go-winio v0.3.2 clean docker-containerd-tags-docker-1.13.1/integration-test/000077500000000000000000000000001304421264600226735ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/integration-test/bundle_utils_test.go000066400000000000000000000064751304421264600267660ustar00rootroot00000000000000package main import ( "encoding/json" "fmt" "os" "os/exec" "path/filepath" "reflect" "runtime" utils "github.com/docker/containerd/testutils" ocs "github.com/opencontainers/runtime-spec/specs-go" ) type OciProcessArgs struct { Cmd string Args []string } type Bundle struct { Source string Name string Spec ocs.Spec Path string } var bundleMap map[string]Bundle // untarRootfs untars the given `source` tarPath into `destination/rootfs` func untarRootfs(source string, destination string) error { var tar *exec.Cmd if runtime.GOOS == "solaris" { destination = filepath.Join(destination, "/rootfs/root") tar = exec.Command("gtar", "-C", destination, "-xf", source) } else { destination = filepath.Join(destination, "rootfs") tar = exec.Command("tar", "-C", destination, "-xf", source) } if err := os.MkdirAll(destination, 0755); err != nil { return nil } return tar.Run() } // CreateBundleWithFilter generate a new oci-bundle named `name` from // the provide `source` rootfs. It starts from the default spec // generated by `runc spec`, overrides the `spec.Process.Args` value // with `args` and set `spec.Process.Terminal` to false. It then apply // `filter()` to the resulting spec if it is provided. func CreateBundleWithFilter(source, name string, args []string, filter func(spec *ocs.Spec)) error { // Generate the spec var spec ocs.Spec f, err := os.Open(utils.RefOciSpecsPath) if err != nil { return fmt.Errorf("Failed to open default spec: %v", err) } if err := json.NewDecoder(f).Decode(&spec); err != nil { return fmt.Errorf("Failed to load default spec: %v", err) } f.Close() spec.Process.Args = args spec.Process.Terminal = false if filter != nil { filter(&spec) } bundlePath := filepath.Join(utils.BundlesRoot, name) nb := Bundle{source, name, spec, bundlePath} // Check that we don't already have such a bundle if b, ok := bundleMap[name]; ok { if reflect.DeepEqual(b, nb) == false { return fmt.Errorf("A bundle name named '%s' already exist but with different properties! %#v != %#v", name, b, nb) } return nil } // Nothing should be there, but just in case os.RemoveAll(bundlePath) var sourceStr string if runtime.GOOS == "solaris" { sourceStr = source + ".tar.gz" } else { sourceStr = source + ".tar" } if err := untarRootfs(filepath.Join(utils.ArchivesDir, sourceStr), bundlePath); err != nil { return fmt.Errorf("Failed to untar %s.tar: %v", source, err) } // create a place for the io fifo if err := os.Mkdir(filepath.Join(bundlePath, "io"), 0755); err != nil { return fmt.Errorf("Failed to create bundle io directory: %v", err) } // Write the updated spec to the right location config, e := os.Create(filepath.Join(bundlePath, "config.json")) if e != nil { return fmt.Errorf("Failed to create oci spec: %v", e) } defer config.Close() if err := json.NewEncoder(config).Encode(&spec); err != nil { return fmt.Errorf("Failed to encore oci spec: %v", e) } bundleMap[name] = nb return nil } func GetBundle(name string) *Bundle { bundle, ok := bundleMap[name] if !ok { return nil } return &bundle } func CreateBusyboxBundle(name string, args []string) error { return CreateBundleWithFilter("busybox", name, args, nil) } func CreateSolarisBundle(name string, args []string) error { return CreateBundleWithFilter("rootfs", name, args, nil) } docker-containerd-tags-docker-1.13.1/integration-test/check_test.go000066400000000000000000000157761304421264600253560ustar00rootroot00000000000000package main import ( "fmt" "io/ioutil" "log" "net" "os" "os/exec" "path/filepath" "strings" "sync" "testing" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health/grpc_health_v1" "github.com/docker/containerd/api/grpc/types" utils "github.com/docker/containerd/testutils" "github.com/go-check/check" "github.com/golang/protobuf/ptypes/timestamp" ) func Test(t *testing.T) { check.TestingT(t) } func init() { check.Suite(&ContainerdSuite{}) } type ContainerdSuite struct { cwd string outputDir string stateDir string grpcSocket string logFile *os.File cd *exec.Cmd syncChild chan error grpcClient types.APIClient eventFiltersMutex sync.Mutex eventFilters map[string]func(event *types.Event) lastEventTs *timestamp.Timestamp } // getClient returns a connection to the Suite containerd func (cs *ContainerdSuite) getClient(socket string) error { // Parse proto://address form addresses. bindParts := strings.SplitN(socket, "://", 2) if len(bindParts) != 2 { return fmt.Errorf("bad bind address format %s, expected proto://address", socket) } // reset the logger for grpc to log to dev/null so that it does not mess with our stdio grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) dialOpts := []grpc.DialOption{grpc.WithInsecure()} dialOpts = append(dialOpts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout(bindParts[0], bindParts[1], timeout) }), grpc.WithBlock(), grpc.WithTimeout(5*time.Second), ) conn, err := grpc.Dial(socket, dialOpts...) if err != nil { return err } healthClient := grpc_health_v1.NewHealthClient(conn) if _, err := healthClient.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{}); err != nil { return err } cs.grpcClient = types.NewAPIClient(conn) return nil } // ContainerdEventsHandler will process all events coming from // containerd. If a filter as been register for a given container id // via `SetContainerEventFilter()`, it will be invoked every time an // event for that id is received func (cs *ContainerdSuite) ContainerdEventsHandler(events types.API_EventsClient) { for { e, err := events.Recv() if err != nil { // If daemon died or exited, return if strings.Contains(err.Error(), "transport is closing") { break } time.Sleep(1 * time.Second) events, _ = cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: cs.lastEventTs}) continue } cs.lastEventTs = e.Timestamp cs.eventFiltersMutex.Lock() if f, ok := cs.eventFilters[e.Id]; ok { f(e) if e.Type == "exit" && e.Pid == "init" { delete(cs.eventFilters, e.Id) } } cs.eventFiltersMutex.Unlock() } } func (cs *ContainerdSuite) StopDaemon(kill bool) { if cs.cd == nil { return } if kill { cs.cd.Process.Kill() <-cs.syncChild cs.cd = nil } else { // Terminate gently if possible cs.cd.Process.Signal(os.Interrupt) done := false for done == false { select { case err := <-cs.syncChild: if err != nil { fmt.Printf("master containerd did not exit cleanly: %v\n", err) } done = true case <-time.After(3 * time.Second): fmt.Println("Timeout while waiting for containerd to exit, killing it!") cs.cd.Process.Kill() } } } } func (cs *ContainerdSuite) RestartDaemon(kill bool) error { cs.StopDaemon(kill) cd := exec.Command("containerd", "--debug", "--state-dir", cs.stateDir, "--listen", cs.grpcSocket, "--metrics-interval", "0m0s", "--runtime-args", fmt.Sprintf("--root=%s", filepath.Join(cs.cwd, cs.outputDir, "runc")), ) cd.Stderr = cs.logFile cd.Stdout = cs.logFile if err := cd.Start(); err != nil { return err } cs.cd = cd if err := cs.getClient(cs.grpcSocket); err != nil { // Kill the daemon cs.cd.Process.Kill() return err } // Monitor events events, err := cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: cs.lastEventTs}) if err != nil { return err } go cs.ContainerdEventsHandler(events) go func() { cs.syncChild <- cd.Wait() }() return nil } func (cs *ContainerdSuite) SetUpSuite(c *check.C) { bundleMap = make(map[string]Bundle) cs.eventFilters = make(map[string]func(event *types.Event)) // Get working directory for tests wd := utils.GetTestOutDir() if err := os.Chdir(wd); err != nil { c.Fatalf("Could not change working directory: %v", err) } cs.cwd = wd // Clean old bundles os.RemoveAll(utils.BundlesRoot) // Ensure the oci bundles directory exists if err := os.MkdirAll(utils.BundlesRoot, 0755); err != nil { c.Fatalf("Failed to create bundles directory: %v", err) } // Generate the reference spec if err := utils.GenerateReferenceSpecs(utils.BundlesRoot); err != nil { c.Fatalf("Unable to generate OCI reference spec: %v", err) } // Create our output directory cs.outputDir = fmt.Sprintf(utils.OutputDirFormat, time.Now().Format("2006-01-02_150405.000000")) cs.stateDir = filepath.Join(cs.outputDir, "containerd-master") if err := os.MkdirAll(cs.stateDir, 0755); err != nil { c.Fatalf("Unable to created output directory '%s': %v", cs.stateDir, err) } cs.grpcSocket = "unix://" + filepath.Join(cs.outputDir, "containerd-master", "containerd.sock") cdLogFile := filepath.Join(cs.outputDir, "containerd-master", "containerd.log") f, err := os.OpenFile(cdLogFile, os.O_CREATE|os.O_TRUNC|os.O_RDWR|os.O_SYNC, 0777) if err != nil { c.Fatalf("Failed to create master containerd log file: %v", err) } cs.logFile = f cs.syncChild = make(chan error) cs.RestartDaemon(false) } func (cs *ContainerdSuite) TearDownSuite(c *check.C) { // tell containerd to stop if cs.cd != nil { cs.cd.Process.Signal(os.Interrupt) done := false for done == false { select { case err := <-cs.syncChild: if err != nil { c.Errorf("master containerd did not exit cleanly: %v", err) } done = true case <-time.After(3 * time.Second): fmt.Println("Timeout while waiting for containerd to exit, killing it!") cs.cd.Process.Kill() } } } if cs.logFile != nil { cs.logFile.Close() } } func (cs *ContainerdSuite) SetContainerEventFilter(id string, filter func(event *types.Event)) { cs.eventFiltersMutex.Lock() cs.eventFilters[id] = filter cs.eventFiltersMutex.Unlock() } func (cs *ContainerdSuite) TearDownTest(c *check.C) { ctrs, err := cs.ListRunningContainers() if err != nil { c.Fatalf("Unable to retrieve running containers: %v", err) } // Kill all containers that survived for _, ctr := range ctrs { ch := make(chan interface{}) cs.SetContainerEventFilter(ctr.Id, func(e *types.Event) { if e.Type == "exit" && e.Pid == "init" { ch <- nil } }) if err := cs.KillContainer(ctr.Id); err != nil { fmt.Fprintf(os.Stderr, "Failed to cleanup leftover test containers: %v\n", err) } select { case <-ch: case <-time.After(3 * time.Second): fmt.Fprintf(os.Stderr, "TearDownTest: Containerd %v didn't die after 3 seconds\n", ctr.Id) } } } docker-containerd-tags-docker-1.13.1/integration-test/container_utils_test.go000066400000000000000000000157311304421264600274720ustar00rootroot00000000000000package main import ( "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "sort" "syscall" "time" "github.com/docker/containerd/api/grpc/types" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" "golang.org/x/sys/unix" ) func (cs *ContainerdSuite) GetLogs() string { b, _ := ioutil.ReadFile(cs.logFile.Name()) return string(b) } func (cs *ContainerdSuite) Events(from time.Time, storedOnly bool, id string) (types.API_EventsClient, error) { var ( ftsp *timestamp.Timestamp err error ) if !from.IsZero() { ftsp, err = ptypes.TimestampProto(from) if err != nil { return nil, err } } return cs.grpcClient.Events(context.Background(), &types.EventsRequest{Timestamp: ftsp, StoredOnly: storedOnly, Id: id}) } func (cs *ContainerdSuite) ListRunningContainers() ([]*types.Container, error) { resp, err := cs.grpcClient.State(context.Background(), &types.StateRequest{}) if err != nil { return nil, err } return resp.Containers, nil } func (cs *ContainerdSuite) SignalContainerProcess(id string, procID string, sig uint32) error { _, err := cs.grpcClient.Signal(context.Background(), &types.SignalRequest{ Id: id, Pid: procID, Signal: sig, }) return err } func (cs *ContainerdSuite) SignalContainer(id string, sig uint32) error { return cs.SignalContainerProcess(id, "init", sig) } func (cs *ContainerdSuite) KillContainer(id string) error { return cs.SignalContainerProcess(id, "init", uint32(syscall.SIGKILL)) } func (cs *ContainerdSuite) UpdateContainerResource(id string, rs *types.UpdateResource) error { _, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{ Id: id, Pid: "init", Status: "", Resources: rs, }) return err } func (cs *ContainerdSuite) PauseContainer(id string) error { _, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{ Id: id, Pid: "init", Status: "paused", }) return err } func (cs *ContainerdSuite) ResumeContainer(id string) error { _, err := cs.grpcClient.UpdateContainer(context.Background(), &types.UpdateContainerRequest{ Id: id, Pid: "init", Status: "running", }) return err } func (cs *ContainerdSuite) GetContainerStats(id string) (*types.StatsResponse, error) { stats, err := cs.grpcClient.Stats(context.Background(), &types.StatsRequest{ Id: id, }) return stats, err } type stdio struct { stdin string stdout string stderr string stdinf *os.File stdoutf *os.File stderrf *os.File stdoutBuffer bytes.Buffer stderrBuffer bytes.Buffer } type ContainerProcess struct { containerID string pid string bundle *Bundle io stdio eventsCh chan *types.Event cs *ContainerdSuite hasExited bool } func (c *ContainerProcess) openIo() (err error) { defer func() { if err != nil { c.Cleanup() } }() c.io.stdinf, err = os.OpenFile(c.io.stdin, os.O_RDWR, 0) if err != nil { return err } c.io.stdoutf, err = os.OpenFile(c.io.stdout, os.O_RDWR, 0) if err != nil { return err } go io.Copy(&c.io.stdoutBuffer, c.io.stdoutf) c.io.stderrf, err = os.OpenFile(c.io.stderr, os.O_RDWR, 0) if err != nil { return err } go io.Copy(&c.io.stderrBuffer, c.io.stderrf) return nil } func (c *ContainerProcess) GetEventsChannel() chan *types.Event { return c.eventsCh } func (c *ContainerProcess) GetNextEvent() *types.Event { if c.hasExited { return nil } e := <-c.eventsCh if e.Type == "exit" && e.Pid == c.pid { c.Cleanup() c.hasExited = true close(c.eventsCh) } return e } func (c *ContainerProcess) CloseStdin() error { _, err := c.cs.grpcClient.UpdateProcess(context.Background(), &types.UpdateProcessRequest{ Id: c.containerID, Pid: c.pid, CloseStdin: true, }) return err } func (c *ContainerProcess) Cleanup() { for _, f := range []*os.File{ c.io.stdinf, c.io.stdoutf, c.io.stderrf, } { if f != nil { f.Close() f = nil } } } func NewContainerProcess(cs *ContainerdSuite, bundle *Bundle, cid, pid string) (c *ContainerProcess, err error) { c = &ContainerProcess{ containerID: cid, pid: "init", bundle: bundle, eventsCh: make(chan *types.Event, 8), cs: cs, hasExited: false, } for name, path := range map[string]*string{ "stdin": &c.io.stdin, "stdout": &c.io.stdout, "stderr": &c.io.stderr, } { *path = filepath.Join(bundle.Path, "io", cid+"-"+pid+"-"+name) if err = unix.Mkfifo(*path, 0755); err != nil && !os.IsExist(err) { return nil, err } } if err = c.openIo(); err != nil { return nil, err } return c, nil } func (cs *ContainerdSuite) StartContainerWithEventFilter(id, bundleName string, filter func(*types.Event)) (c *ContainerProcess, err error) { bundle := GetBundle(bundleName) if bundle == nil { return nil, fmt.Errorf("No such bundle '%s'", bundleName) } c, err = NewContainerProcess(cs, bundle, id, "init") if err != nil { return nil, err } r := &types.CreateContainerRequest{ Id: id, BundlePath: filepath.Join(cs.cwd, bundle.Path), Stdin: filepath.Join(cs.cwd, c.io.stdin), Stdout: filepath.Join(cs.cwd, c.io.stdout), Stderr: filepath.Join(cs.cwd, c.io.stderr), } if filter == nil { filter = func(event *types.Event) { c.eventsCh <- event } } cs.SetContainerEventFilter(id, filter) if _, err := cs.grpcClient.CreateContainer(context.Background(), r); err != nil { c.Cleanup() return nil, err } return c, nil } func (cs *ContainerdSuite) StartContainer(id, bundleName string) (c *ContainerProcess, err error) { return cs.StartContainerWithEventFilter(id, bundleName, nil) } func (cs *ContainerdSuite) RunContainer(id, bundleName string) (c *ContainerProcess, err error) { c, err = cs.StartContainer(id, bundleName) if err != nil { return nil, err } for { e := c.GetNextEvent() if e.Type == "exit" && e.Pid == "init" { break } } return c, err } func (cs *ContainerdSuite) AddProcessToContainer(init *ContainerProcess, pid, cwd string, env, args []string, uid, gid uint32) (c *ContainerProcess, err error) { c, err = NewContainerProcess(cs, init.bundle, init.containerID, pid) if err != nil { return nil, err } pr := &types.AddProcessRequest{ Id: init.containerID, Pid: pid, Args: args, Cwd: cwd, Env: env, User: &types.User{ Uid: uid, Gid: gid, }, Stdin: filepath.Join(cs.cwd, c.io.stdin), Stdout: filepath.Join(cs.cwd, c.io.stdout), Stderr: filepath.Join(cs.cwd, c.io.stderr), } _, err = cs.grpcClient.AddProcess(context.Background(), pr) if err != nil { c.Cleanup() return nil, err } return c, nil } type containerSorter struct { c []*types.Container } func (s *containerSorter) Len() int { return len(s.c) } func (s *containerSorter) Swap(i, j int) { s.c[i], s.c[j] = s.c[j], s.c[i] } func (s *containerSorter) Less(i, j int) bool { return s.c[i].Id < s.c[j].Id } func sortContainers(c []*types.Container) { sort.Sort(&containerSorter{c}) } docker-containerd-tags-docker-1.13.1/integration-test/events_test.go000066400000000000000000000021111304421264600255600ustar00rootroot00000000000000package main import ( "fmt" "time" "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (cs *ContainerdSuite) TestEventsId(t *check.C) { if err := CreateBusyboxBundle("busybox-ls", []string{"ls"}); err != nil { t.Fatal(err) } from := time.Now() for i := 0; i < 10; i++ { _, err := cs.RunContainer(fmt.Sprintf("ls-%d", i), "busybox-ls") if err != nil { t.Fatal(err) } } containerID := "ls-4" events, err := cs.Events(from, true, containerID) if err != nil { t.Fatal(err) } evs := []*types.Event{} for { e, err := events.Recv() if err != nil { if err.Error() == "EOF" { break } t.Fatal(err) } evs = append(evs, e) } t.Assert(len(evs), checker.Equals, 2) for idx, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 0, Pid: "init", }, } { evt.Timestamp = evs[idx].Timestamp t.Assert(*evs[idx], checker.Equals, evt) } } docker-containerd-tags-docker-1.13.1/integration-test/exec_test.go000066400000000000000000000153101304421264600252050ustar00rootroot00000000000000// +build !solaris package main import ( "path/filepath" "syscall" "time" "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ) func (cs *ContainerdSuite) TestBusyboxTopExecEcho(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } var ( err error initp *ContainerProcess echop *ContainerProcess ) containerID := "top" initp, err = cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) echop, err = cs.AddProcessToContainer(initp, "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Ay Caramba! ; exit 1"}, 0, 0) t.Assert(err, checker.Equals, nil) for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "start-process", Id: containerID, Status: 0, Pid: "echo", }, { Type: "exit", Id: containerID, Status: 1, Pid: "echo", }, } { ch := initp.GetEventsChannel() e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } t.Assert(echop.io.stdoutBuffer.String(), checker.Equals, "Ay Caramba!") } func (cs *ContainerdSuite) TestBusyboxTopExecTop(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } var ( err error initp *ContainerProcess ) containerID := "top" initp, err = cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) execID := "top1" _, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0) t.Assert(err, checker.Equals, nil) for idx, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "start-process", Id: containerID, Status: 0, Pid: execID, }, { Type: "exit", Id: containerID, Status: 137, Pid: execID, }, } { ch := initp.GetEventsChannel() e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) if idx == 1 { // Process Started, kill it cs.SignalContainerProcess(containerID, "top1", uint32(syscall.SIGKILL)) } } // Container should still be running containers, err := cs.ListRunningContainers() if err != nil { t.Fatal(err) } t.Assert(len(containers), checker.Equals, 1) t.Assert(containers[0].Id, checker.Equals, "top") t.Assert(containers[0].Status, checker.Equals, "running") t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path)) } func (cs *ContainerdSuite) TestBusyboxTopExecTopKillInit(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } var ( err error initp *ContainerProcess ) containerID := "top" initp, err = cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) execID := "top1" _, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/usr/bin"}, []string{"top"}, 0, 0) t.Assert(err, checker.Equals, nil) ch := initp.GetEventsChannel() for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "start-process", Id: containerID, Status: 0, Pid: execID, }, } { e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } cs.SignalContainerProcess(containerID, "init", uint32(syscall.SIGTERM)) for i := 0; i < 2; i++ { e := <-ch switch e.Pid { case "init": evt := types.Event{ Type: "exit", Id: containerID, Status: 143, Pid: "init", Timestamp: e.Timestamp, } t.Assert(*e, checker.Equals, evt) case execID: evt := types.Event{ Type: "exit", Id: containerID, Status: 137, Pid: execID, Timestamp: e.Timestamp, } t.Assert(*e, checker.Equals, evt) default: t.Fatalf("Unexpected event %v", e) } } } func (cs *ContainerdSuite) TestBusyboxExecCreateDetachedChild(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } var ( err error initp *ContainerProcess ) containerID := "top" initp, err = cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) ch := initp.GetEventsChannel() for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, } { e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } execID := "sh-sleep" _, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/bin"}, []string{"sh", "-c", "sleep 1000 2>&- 1>&- 0<&- &"}, 0, 0) t.Assert(err, checker.Equals, nil) for _, evt := range []types.Event{ { Type: "start-process", Id: containerID, Status: 0, Pid: execID, }, { Type: "exit", Id: containerID, Status: 0, Pid: execID, }, } { e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } // Check that sleep is still running execOutput, err := cs.AddProcessToContainer(initp, "ps", "/", []string{"PATH=/bin"}, []string{"ps", "aux"}, 0, 0) t.Assert(err, checker.Equals, nil) t.Assert(execOutput.io.stdoutBuffer.String(), checker.Contains, "sleep 1000") } func (cs *ContainerdSuite) TestBusyboxExecCreateAttachedChild(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } var ( err error initp *ContainerProcess ) containerID := "top" initp, err = cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) ch := initp.GetEventsChannel() for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, } { e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } doneCh := make(chan struct{}) go func() { execID := "sh-sleep" _, err = cs.AddProcessToContainer(initp, execID, "/", []string{"PATH=/bin"}, []string{"sh", "-c", "sleep 5 &"}, 0, 0) t.Assert(err, checker.Equals, nil) for _, evt := range []types.Event{ { Type: "start-process", Id: containerID, Status: 0, Pid: execID, }, { Type: "exit", Id: containerID, Status: 0, Pid: execID, }, } { e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } close(doneCh) }() select { case <-doneCh: break case <-time.After(8 * time.Second): t.Fatal("exec did not exit within 5 seconds") } } docker-containerd-tags-docker-1.13.1/integration-test/start_linux_test.go000066400000000000000000000306721304421264600266450ustar00rootroot00000000000000package main import ( "fmt" "os" "os/exec" "path/filepath" "syscall" "time" "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" ocs "github.com/opencontainers/runtime-spec/specs-go" "google.golang.org/grpc" ) func (cs *ContainerdSuite) TestStartBusyboxLsSlash(t *check.C) { expectedOutput := `bin dev etc home lib lib64 linuxrc media mnt opt proc root run sbin sys tmp usr var ` if err := CreateBusyboxBundle("busybox-ls-slash", []string{"ls", "/"}); err != nil { t.Fatal(err) } c, err := cs.RunContainer("myls", "busybox-ls-slash") if err != nil { t.Fatal(err) } t.Assert(c.io.stdoutBuffer.String(), checker.Equals, expectedOutput) } func (cs *ContainerdSuite) TestStartBusyboxNoSuchFile(t *check.C) { expectedOutput := `exec: \"NoSuchFile\": executable file not found in $PATH` if err := CreateBusyboxBundle("busybox-no-such-file", []string{"NoSuchFile"}); err != nil { t.Fatal(err) } _, err := cs.RunContainer("NoSuchFile", "busybox-no-such-file") t.Assert(grpc.ErrorDesc(err), checker.Contains, expectedOutput) } func (cs *ContainerdSuite) TestStartBusyboxTop(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } containerID := "start-busybox-top" _, err := cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) containers, err := cs.ListRunningContainers() if err != nil { t.Fatal(err) } t.Assert(len(containers), checker.Equals, 1) t.Assert(containers[0].Id, checker.Equals, containerID) t.Assert(containers[0].Status, checker.Equals, "running") t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path)) } func (cs *ContainerdSuite) TestStartBusyboxLsEvents(t *check.C) { if err := CreateBusyboxBundle("busybox-ls", []string{"ls"}); err != nil { t.Fatal(err) } containerID := "ls-events" c, err := cs.StartContainer(containerID, "busybox-ls") if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 0, Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } func (cs *ContainerdSuite) TestStartBusyboxSleep(t *check.C) { if err := CreateBusyboxBundle("busybox-sleep-5", []string{"sleep", "5"}); err != nil { t.Fatal(err) } ch := make(chan interface{}) filter := func(e *types.Event) { if e.Type == "exit" && e.Pid == "init" { ch <- nil } } start := time.Now() _, err := cs.StartContainerWithEventFilter("sleep5", "busybox-sleep-5", filter) if err != nil { t.Fatal(err) } // We add a generous 20% marge of error select { case <-ch: t.Assert(uint64(time.Now().Sub(start)), checker.LessOrEqualThan, uint64(6*time.Second)) case <-time.After(6 * time.Second): t.Fatal("Container took more than 6 seconds to exit") } } func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } containerID := "top-kill" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } <-time.After(1 * time.Second) err = cs.KillContainer(containerID) if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 128 + uint32(syscall.SIGKILL), Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } containerID := "top-sigterm" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } <-time.After(1 * time.Second) err = cs.SignalContainer(containerID, uint32(syscall.SIGTERM)) if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 128 + uint32(syscall.SIGTERM), Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } func (cs *ContainerdSuite) TestStartBusyboxTrapUSR1(t *check.C) { if err := CreateBusyboxBundle("busybox-trap-usr1", []string{"sh", "-c", "trap 'echo -n booh!' SIGUSR1 ; sleep 60 & wait"}); err != nil { t.Fatal(err) } containerID := "trap-usr1" c, err := cs.StartContainer(containerID, "busybox-trap-usr1") if err != nil { t.Fatal(err) } <-time.After(1 * time.Second) if err := cs.SignalContainer(containerID, uint32(syscall.SIGUSR1)); err != nil { t.Fatal(err) } for { e := c.GetNextEvent() if e.Type == "exit" && e.Pid == "init" { break } } t.Assert(c.io.stdoutBuffer.String(), checker.Equals, "booh!") } func (cs *ContainerdSuite) TestStartBusyboxTopPauseResume(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } containerID := "top-pause-resume" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } if err := cs.PauseContainer(containerID); err != nil { t.Fatal(err) } if err := cs.ResumeContainer(containerID); err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "pause", Id: containerID, Status: 0, Pid: "", }, { Type: "resume", Id: containerID, Status: 0, Pid: "", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } // check that status is running containers, err := cs.ListRunningContainers() if err != nil { t.Fatal(err) } t.Assert(len(containers), checker.Equals, 1) t.Assert(containers[0].Id, checker.Equals, containerID) t.Assert(containers[0].Status, checker.Equals, "running") } func (cs *ContainerdSuite) TestOOM(t *check.C) { bundleName := "busybox-sh-512k-memlimit" if err := CreateBundleWithFilter("busybox", bundleName, []string{"sh", "-c", "x=oom-party-time; while true; do x=$x$x$x$x$x$x$x$x$x$x; done"}, func(spec *ocs.Spec) { // Limit to 512k for quick oom var limit uint64 = 8 * 1024 * 1024 spec.Linux.Resources.Memory = &ocs.Memory{ Limit: &limit, } if swapEnabled() { spec.Linux.Resources.Memory.Swap = &limit } }); err != nil { t.Fatal(err) } containerID := "sh-oom" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "oom", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 137, Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(60 * time.Second): t.Fatalf("Container took more than 60 seconds to %s", evt.Type) } } } func (cs *ContainerdSuite) TestRestart(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } totalCtr := 10 ctrs := make([]*ContainerProcess, totalCtr) for i := 0; i < totalCtr; i++ { containerID := fmt.Sprintf("top%d", i) c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } e := c.GetNextEvent() t.Assert(*e, checker.Equals, types.Event{ Type: "start-container", Id: containerID, Status: 0, Pid: "", Timestamp: e.Timestamp, }) ctrs[i] = c } // restart daemon gracefully (SIGINT) cs.RestartDaemon(false) // check that status is running containers, err := cs.ListRunningContainers() if err != nil { t.Fatal(err) } sortContainers(containers) t.Assert(len(containers), checker.Equals, totalCtr) for i := 0; i < totalCtr; i++ { t.Assert(containers[i].Id, checker.Equals, fmt.Sprintf("top%d", i)) t.Assert(containers[i].Status, checker.Equals, "running") } // Check that we can exec see docker/docker# execTarget := "top1" echop, err := cs.AddProcessToContainer(ctrs[1], "echo", "/", []string{"PATH=/bin"}, []string{"sh", "-c", "echo -n Success!"}, 0, 0) t.Assert(err, checker.Equals, nil) for _, evt := range []types.Event{ { Type: "start-process", Id: execTarget, Status: 0, Pid: "echo", }, { Type: "exit", Id: execTarget, Status: 0, Pid: "echo", }, } { ch := ctrs[1].GetEventsChannel() e := <-ch evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) } t.Assert(echop.io.stdoutBuffer.String(), checker.Equals, "Success!") // Now kill daemon (SIGKILL) cs.StopDaemon(true) // Sleep a second to allow the timestamp to change since // it's second based <-time.After(3 * time.Second) // Kill a couple of containers killedCtr := map[int]bool{4: true, 2: true} var f func(*types.Event) deathChans := make([]chan error, len(killedCtr)) deathChansIdx := 0 for i := range killedCtr { ch := make(chan error, 1) deathChans[deathChansIdx] = ch deathChansIdx++ syscall.Kill(int(containers[i].Pids[0]), syscall.SIGKILL) // Filter to be notified of their death containerID := fmt.Sprintf("top%d", i) f = func(event *types.Event) { expectedEvent := types.Event{ Type: "exit", Id: containerID, Status: 137, Pid: "init", } expectedEvent.Timestamp = event.Timestamp if ok := t.Check(*event, checker.Equals, expectedEvent); !ok { ch <- fmt.Errorf("Unexpected event: %#v", *event) } else { ch <- nil } } cs.SetContainerEventFilter(containerID, f) } cs.RestartDaemon(true) // Ensure we got our events for i := range deathChans { done := false for done == false { select { case err := <-deathChans[i]: t.Assert(err, checker.Equals, nil) done = true case <-time.After(3 * time.Second): t.Fatal("Exit event for container not received after 3 seconds") } } } // check that status is running containers, err = cs.ListRunningContainers() if err != nil { t.Fatal(err) } sortContainers(containers) t.Assert(len(containers), checker.Equals, totalCtr-len(killedCtr)) idShift := 0 for i := 0; i < totalCtr-len(killedCtr); i++ { if _, ok := killedCtr[i+idShift]; ok { idShift++ } t.Assert(containers[i].Id, checker.Equals, fmt.Sprintf("top%d", i+idShift)) t.Assert(containers[i].Status, checker.Equals, "running") } } func swapEnabled() bool { _, err := os.Stat("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") return err == nil } func (cs *ContainerdSuite) TestSigkillShimReuseName(t *check.C) { bundleName := "busybox-top" if err := CreateBusyboxBundle(bundleName, []string{"top"}); err != nil { t.Fatal(err) } containerID := "top" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } // Sigkill the shim exec.Command("pkill", "-9", "containerd-shim").Run() // Wait for it to be reaped for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 128 + 9, Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } // Start a new continer with the same name c, err = cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } } docker-containerd-tags-docker-1.13.1/integration-test/start_solaris_test.go000066400000000000000000000116351304421264600271600ustar00rootroot00000000000000package main import ( "path/filepath" "syscall" "time" "github.com/docker/containerd/api/grpc/types" "github.com/docker/docker/pkg/integration/checker" "github.com/go-check/check" "google.golang.org/grpc" ) func (cs *ContainerdSuite) TestStartBusyboxLsSlash(t *check.C) { expectedOutput := `bin dev etc export home lib mnt opt proc root sbin system tmp usr var ` if err := CreateSolarisBundle("busybox-ls-slash", []string{"ls", "/"}); err != nil { t.Fatal(err) } c, err := cs.RunContainer("myls", "busybox-ls-slash") if err != nil { t.Fatal(err) } t.Assert(c.io.stdoutBuffer.String(), checker.Equals, expectedOutput) } func (cs *ContainerdSuite) TestStartBusyboxNoSuchFile(t *check.C) { expectedOutput := `NoSuchFile: No such file or directory` if err := CreateSolarisBundle("busybox-no-such-file", []string{"NoSuchFile"}); err != nil { t.Fatal(err) } _, err := cs.RunContainer("NoSuchFile", "busybox-no-such-file") t.Assert(grpc.ErrorDesc(err), checker.Contains, expectedOutput) } func (cs *ContainerdSuite) TestStartBusyboxTop(t *check.C) { bundleName := "busybox-top" if err := CreateSolarisBundle(bundleName, []string{"sleep", "10"}); err != nil { t.Fatal(err) } containerID := "start-busybox-top" _, err := cs.StartContainer(containerID, bundleName) t.Assert(err, checker.Equals, nil) containers, err := cs.ListRunningContainers() if err != nil { t.Fatal(err) } t.Assert(len(containers), checker.Equals, 1) t.Assert(containers[0].Id, checker.Equals, containerID) t.Assert(containers[0].Status, checker.Equals, "running") t.Assert(containers[0].BundlePath, check.Equals, filepath.Join(cs.cwd, GetBundle(bundleName).Path)) } func (cs *ContainerdSuite) TestStartBusyboxLsEvents(t *check.C) { if err := CreateSolarisBundle("busybox-ls", []string{"ls"}); err != nil { t.Fatal(err) } containerID := "ls-events" c, err := cs.StartContainer(containerID, "busybox-ls") if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 0, Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp evt.Status = e.Status t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } func (cs *ContainerdSuite) TestStartBusyboxSleep(t *check.C) { if err := CreateSolarisBundle("busybox-sleep-5", []string{"sleep", "5"}); err != nil { t.Fatal(err) } ch := make(chan interface{}) filter := func(e *types.Event) { if e.Type == "exit" && e.Pid == "init" { ch <- nil } } start := time.Now() _, err := cs.StartContainerWithEventFilter("sleep5", "busybox-sleep-5", filter) if err != nil { t.Fatal(err) } // We add a generous 20% marge of error select { case <-ch: t.Assert(uint64(time.Now().Sub(start)), checker.LessOrEqualThan, uint64(15*time.Second)) case <-time.After(15 * time.Second): t.Fatal("Container took more than 15 seconds to exit") } } func (cs *ContainerdSuite) TestStartBusyboxTopKill(t *check.C) { bundleName := "busybox-top" if err := CreateSolarisBundle(bundleName, []string{"sleep", "10"}); err != nil { t.Fatal(err) } containerID := "top-kill" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } <-time.After(5 * time.Second) err = cs.KillContainer(containerID) if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 128 + uint32(syscall.SIGKILL), Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp evt.Status = e.Status t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } func (cs *ContainerdSuite) TestStartBusyboxTopSignalSigterm(t *check.C) { bundleName := "busybox-top" if err := CreateSolarisBundle(bundleName, []string{"sleep", "10"}); err != nil { t.Fatal(err) } containerID := "top-sigterm" c, err := cs.StartContainer(containerID, bundleName) if err != nil { t.Fatal(err) } <-time.After(5 * time.Second) err = cs.SignalContainer(containerID, uint32(syscall.SIGTERM)) if err != nil { t.Fatal(err) } for _, evt := range []types.Event{ { Type: "start-container", Id: containerID, Status: 0, Pid: "", }, { Type: "exit", Id: containerID, Status: 128 + uint32(syscall.SIGTERM), Pid: "init", }, } { ch := c.GetEventsChannel() select { case e := <-ch: evt.Timestamp = e.Timestamp evt.Status = e.Status t.Assert(*e, checker.Equals, evt) case <-time.After(2 * time.Second): t.Fatal("Container took more than 2 seconds to terminate") } } } docker-containerd-tags-docker-1.13.1/osutils/000077500000000000000000000000001304421264600210755ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/osutils/fds.go000066400000000000000000000005431304421264600222020ustar00rootroot00000000000000// +build !windows,!darwin package osutils import ( "io/ioutil" "path/filepath" "strconv" ) // GetOpenFds returns the number of open fds for the process provided by pid func GetOpenFds(pid int) (int, error) { dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) if err != nil { return -1, err } return len(dirs), nil } docker-containerd-tags-docker-1.13.1/osutils/prctl.go000066400000000000000000000032461304421264600225550ustar00rootroot00000000000000// +build linux // Package osutils provide access to the Get Child and Set Child prctl // flags. // See http://man7.org/linux/man-pages/man2/prctl.2.html package osutils import ( "syscall" "unsafe" ) // PR_SET_CHILD_SUBREAPER allows setting the child subreaper. // If arg2 is nonzero, set the "child subreaper" attribute of the // calling process; if arg2 is zero, unset the attribute. When a // process is marked as a child subreaper, all of the children // that it creates, and their descendants, will be marked as // having a subreaper. In effect, a subreaper fulfills the role // of init(1) for its descendant processes. Upon termination of // a process that is orphaned (i.e., its immediate parent has // already terminated) and marked as having a subreaper, the // nearest still living ancestor subreaper will receive a SIGCHLD // signal and be able to wait(2) on the process to discover its // termination status. const prSetChildSubreaper = 36 // PR_GET_CHILD_SUBREAPER allows retrieving the current child // subreaper. // Return the "child subreaper" setting of the caller, in the // location pointed to by (int *) arg2. const prGetChildSubreaper = 37 // GetSubreaper returns the subreaper setting for the calling process func GetSubreaper() (int, error) { var i uintptr if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 { return -1, err } return int(i), nil } // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 { return err } return nil } docker-containerd-tags-docker-1.13.1/osutils/prctl_solaris.go000066400000000000000000000006001304421264600243000ustar00rootroot00000000000000// +build solaris package osutils import ( "errors" ) //Solaris TODO // GetSubreaper returns the subreaper setting for the calling process func GetSubreaper() (int, error) { return 0, errors.New("osutils GetSubreaper not implemented on Solaris") } // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { return nil } docker-containerd-tags-docker-1.13.1/osutils/reaper.go000066400000000000000000000017271304421264600227110ustar00rootroot00000000000000// +build !windows package osutils import "syscall" // Exit is the wait4 information from an exited process type Exit struct { Pid int Status int } // Reap reaps all child processes for the calling process and returns their // exit information func Reap(wait bool) (exits []Exit, err error) { var ( ws syscall.WaitStatus rus syscall.Rusage ) flag := syscall.WNOHANG if wait { flag = 0 } for { pid, err := syscall.Wait4(-1, &ws, flag, &rus) if err != nil { if err == syscall.ECHILD { return exits, nil } return exits, err } if pid <= 0 { return exits, nil } exits = append(exits, Exit{ Pid: pid, Status: exitStatus(ws), }) } } const exitSignalOffset = 128 // exitStatus returns the correct exit status for a process based on if it // was signaled or exited cleanly func exitStatus(status syscall.WaitStatus) int { if status.Signaled() { return exitSignalOffset + int(status.Signal()) } return status.ExitStatus() } docker-containerd-tags-docker-1.13.1/runtime/000077500000000000000000000000001304421264600210565ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/runtime/container.go000066400000000000000000000416511304421264600233760ustar00rootroot00000000000000package runtime import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strings" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/specs" ocs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/net/context" "golang.org/x/sys/unix" ) // Container defines the operations allowed on a container type Container interface { // ID returns the container ID ID() string // Path returns the path to the bundle Path() string // Start starts the init process of the container Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) // Exec starts another process in an existing container Exec(context.Context, string, specs.ProcessSpec, Stdio) (Process, error) // Delete removes the container's state and any resources Delete() error // Processes returns all the containers processes that have been added Processes() ([]Process, error) // State returns the containers runtime state State() State // Resume resumes a paused container Resume() error // Pause pauses a running container Pause() error // RemoveProcess removes the specified process from the container RemoveProcess(string) error // Checkpoints returns all the checkpoints for a container Checkpoints(checkpointDir string) ([]Checkpoint, error) // Checkpoint creates a new checkpoint Checkpoint(checkpoint Checkpoint, checkpointDir string) error // DeleteCheckpoint deletes the checkpoint for the provided name DeleteCheckpoint(name string, checkpointDir string) error // Labels are user provided labels for the container Labels() []string // Pids returns all pids inside the container Pids() ([]int, error) // Stats returns realtime container stats and resource information Stats() (*Stat, error) // Name or path of the OCI compliant runtime used to execute the container Runtime() string // OOM signals the channel if the container received an OOM notification OOM() (OOM, error) // UpdateResource updates the containers resources to new values UpdateResources(*Resource) error // Status return the current status of the container. Status() (State, error) } // OOM wraps a container OOM. type OOM interface { io.Closer FD() int ContainerID() string Flush() Removed() bool } // Stdio holds the path to the 3 pipes used for the standard ios. type Stdio struct { Stdin string Stdout string Stderr string } // NewStdio wraps the given standard io path into an Stdio struct. // If a given parameter is the empty string, it is replaced by "/dev/null" func NewStdio(stdin, stdout, stderr string) Stdio { for _, s := range []*string{ &stdin, &stdout, &stderr, } { if *s == "" { *s = "/dev/null" } } return Stdio{ Stdin: stdin, Stdout: stdout, Stderr: stderr, } } // ContainerOpts keeps the options passed at container creation type ContainerOpts struct { Root string ID string Bundle string Runtime string RuntimeArgs []string Shim string Labels []string NoPivotRoot bool Timeout time.Duration } // New returns a new container func New(opts ContainerOpts) (Container, error) { c := &container{ root: opts.Root, id: opts.ID, bundle: opts.Bundle, labels: opts.Labels, processes: make(map[string]*process), runtime: opts.Runtime, runtimeArgs: opts.RuntimeArgs, shim: opts.Shim, noPivotRoot: opts.NoPivotRoot, timeout: opts.Timeout, } if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil { return nil, err } f, err := os.Create(filepath.Join(c.root, c.id, StateFile)) if err != nil { return nil, err } defer f.Close() if err := json.NewEncoder(f).Encode(state{ Bundle: c.bundle, Labels: c.labels, Runtime: c.runtime, RuntimeArgs: c.runtimeArgs, Shim: c.shim, NoPivotRoot: opts.NoPivotRoot, }); err != nil { return nil, err } return c, nil } // Load return a new container from the matchin state file on disk. func Load(root, id, shimName string, timeout time.Duration) (Container, error) { var s state f, err := os.Open(filepath.Join(root, id, StateFile)) if err != nil { return nil, err } defer f.Close() if err := json.NewDecoder(f).Decode(&s); err != nil { return nil, err } c := &container{ root: root, id: id, bundle: s.Bundle, labels: s.Labels, runtime: s.Runtime, runtimeArgs: s.RuntimeArgs, shim: s.Shim, noPivotRoot: s.NoPivotRoot, processes: make(map[string]*process), timeout: timeout, } if c.shim == "" { c.shim = shimName } dirs, err := ioutil.ReadDir(filepath.Join(root, id)) if err != nil { return nil, err } for _, d := range dirs { if !d.IsDir() { continue } pid := d.Name() s, err := readProcessState(filepath.Join(root, id, pid)) if err != nil { return nil, err } p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s) if err != nil { logrus.WithField("id", id).WithField("pid", pid).Debugf("containerd: error loading process %s", err) continue } c.processes[pid] = p } return c, nil } func readProcessState(dir string) (*ProcessState, error) { f, err := os.Open(filepath.Join(dir, "process.json")) if err != nil { return nil, err } defer f.Close() var s ProcessState if err := json.NewDecoder(f).Decode(&s); err != nil { return nil, err } return &s, nil } type container struct { // path to store runtime state information root string id string bundle string runtime string runtimeArgs []string shim string processes map[string]*process labels []string oomFds []int noPivotRoot bool timeout time.Duration } func (c *container) ID() string { return c.id } func (c *container) Path() string { return c.bundle } func (c *container) Labels() []string { return c.labels } func (c *container) readSpec() (*specs.Spec, error) { var spec specs.Spec f, err := os.Open(filepath.Join(c.bundle, "config.json")) if err != nil { return nil, err } defer f.Close() if err := json.NewDecoder(f).Decode(&spec); err != nil { return nil, err } return &spec, nil } func (c *container) Delete() error { var err error args := append(c.runtimeArgs, "delete", c.id) if b, derr := exec.Command(c.runtime, args...).CombinedOutput(); derr != nil && !strings.Contains(string(b), "does not exist") { err = fmt.Errorf("%s: %q", derr, string(b)) } if rerr := os.RemoveAll(filepath.Join(c.root, c.id)); rerr != nil { if err != nil { err = fmt.Errorf("%s; failed to remove %s: %s", err, filepath.Join(c.root, c.id), rerr) } else { err = rerr } } return err } func (c *container) Processes() ([]Process, error) { out := []Process{} for _, p := range c.processes { out = append(out, p) } return out, nil } func (c *container) RemoveProcess(pid string) error { delete(c.processes, pid) return os.RemoveAll(filepath.Join(c.root, c.id, pid)) } func (c *container) State() State { proc := c.processes[InitProcessID] if proc == nil { return Stopped } return proc.State() } func (c *container) Runtime() string { return c.runtime } func (c *container) Pause() error { args := c.runtimeArgs args = append(args, "pause", c.id) b, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return fmt.Errorf("%s: %q", err.Error(), string(b)) } return nil } func (c *container) Resume() error { args := c.runtimeArgs args = append(args, "resume", c.id) b, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return fmt.Errorf("%s: %q", err.Error(), string(b)) } return nil } func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) { if checkpointDir == "" { checkpointDir = filepath.Join(c.bundle, "checkpoints") } dirs, err := ioutil.ReadDir(checkpointDir) if err != nil { return nil, err } var out []Checkpoint for _, d := range dirs { if !d.IsDir() { continue } path := filepath.Join(checkpointDir, d.Name(), "config.json") data, err := ioutil.ReadFile(path) if err != nil { return nil, err } var cpt Checkpoint if err := json.Unmarshal(data, &cpt); err != nil { return nil, err } out = append(out, cpt) } return out, nil } func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error { if checkpointDir == "" { checkpointDir = filepath.Join(c.bundle, "checkpoints") } if err := os.MkdirAll(checkpointDir, 0755); err != nil { return err } path := filepath.Join(checkpointDir, cpt.Name) if err := os.Mkdir(path, 0755); err != nil { return err } f, err := os.Create(filepath.Join(path, "config.json")) if err != nil { return err } cpt.Created = time.Now() err = json.NewEncoder(f).Encode(cpt) f.Close() if err != nil { return err } args := []string{ "checkpoint", "--image-path", path, "--work-path", filepath.Join(path, "criu.work"), } add := func(flags ...string) { args = append(args, flags...) } add(c.runtimeArgs...) if !cpt.Exit { add("--leave-running") } if cpt.Shell { add("--shell-job") } if cpt.TCP { add("--tcp-established") } if cpt.UnixSockets { add("--ext-unix-sk") } for _, ns := range cpt.EmptyNS { add("--empty-ns", ns) } add(c.id) out, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return fmt.Errorf("%s: %q", err.Error(), string(out)) } return err } func (c *container) DeleteCheckpoint(name string, checkpointDir string) error { if checkpointDir == "" { checkpointDir = filepath.Join(c.bundle, "checkpoints") } return os.RemoveAll(filepath.Join(checkpointDir, name)) } func (c *container) Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) { processRoot := filepath.Join(c.root, c.id, InitProcessID) if err := os.Mkdir(processRoot, 0755); err != nil { return nil, err } cmd := exec.Command(c.shim, c.id, c.bundle, c.runtime, ) cmd.Dir = processRoot cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } spec, err := c.readSpec() if err != nil { return nil, err } config := &processConfig{ checkpoint: checkpointPath, root: processRoot, id: InitProcessID, c: c, stdio: s, spec: spec, processSpec: specs.ProcessSpec(spec.Process), } p, err := newProcess(config) if err != nil { return nil, err } if err := c.createCmd(ctx, InitProcessID, cmd, p); err != nil { return nil, err } return p, nil } func (c *container) Exec(ctx context.Context, pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) { processRoot := filepath.Join(c.root, c.id, pid) if err := os.Mkdir(processRoot, 0755); err != nil { return nil, err } defer func() { if err != nil { c.RemoveProcess(pid) } }() cmd := exec.Command(c.shim, c.id, c.bundle, c.runtime, ) cmd.Dir = processRoot cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } spec, err := c.readSpec() if err != nil { return nil, err } config := &processConfig{ exec: true, id: pid, root: processRoot, c: c, processSpec: pspec, spec: spec, stdio: s, } p, err := newProcess(config) if err != nil { return nil, err } if err := c.createCmd(ctx, pid, cmd, p); err != nil { return nil, err } return p, nil } func (c *container) createCmd(ctx context.Context, pid string, cmd *exec.Cmd, p *process) error { p.cmd = cmd if err := cmd.Start(); err != nil { close(p.cmdDoneCh) if exErr, ok := err.(*exec.Error); ok { if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist { return fmt.Errorf("%s not installed on system", c.shim) } } return err } // We need the pid file to have been written to run defer func() { go func() { err := p.cmd.Wait() if err == nil { p.cmdSuccess = true } if same, err := p.isSameProcess(); same && p.pid > 0 { // The process changed its PR_SET_PDEATHSIG, so force // kill it logrus.Infof("containerd: %s:%s (pid %v) has become an orphan, killing it", p.container.id, p.id, p.pid) err = unix.Kill(p.pid, syscall.SIGKILL) if err != nil && err != syscall.ESRCH { logrus.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err) } else { for { err = unix.Kill(p.pid, 0) if err != nil { break } time.Sleep(5 * time.Millisecond) } } } close(p.cmdDoneCh) }() }() ch := make(chan error) go func() { if err := c.waitForCreate(p, cmd); err != nil { ch <- err return } c.processes[pid] = p ch <- nil }() select { case <-ctx.Done(): cmd.Process.Kill() cmd.Wait() <-ch return ctx.Err() case err := <-ch: return err } return nil } func hostIDFromMap(id uint32, mp []ocs.IDMapping) int { for _, m := range mp { if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) { return int(m.HostID + (id - m.ContainerID)) } } return 0 } func (c *container) Stats() (*Stat, error) { now := time.Now() args := c.runtimeArgs args = append(args, "events", "--stats", c.id) out, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("%s: %q", err.Error(), out) } s := struct { Data *Stat `json:"data"` }{} if err := json.Unmarshal(out, &s); err != nil { return nil, err } s.Data.Timestamp = now return s.Data, nil } // Status implements the runtime Container interface. func (c *container) Status() (State, error) { args := c.runtimeArgs args = append(args, "state", c.id) out, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return "", fmt.Errorf("%s: %q", err.Error(), out) } // We only require the runtime json output to have a top level Status field. var s struct { Status State `json:"status"` } if err := json.Unmarshal(out, &s); err != nil { return "", err } return s.Status, nil } func (c *container) writeEventFD(root string, cfd, efd int) error { f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) if err != nil { return err } defer f.Close() _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) return err } type waitArgs struct { pid int err error } func (c *container) waitForCreate(p *process, cmd *exec.Cmd) error { wc := make(chan error, 1) go func() { for { if _, err := p.getPidFromFile(); err != nil { if os.IsNotExist(err) || err == errInvalidPidInt || err == errContainerNotFound { alive, err := isAlive(cmd) if err != nil { wc <- err return } if !alive { // runc could have failed to run the container so lets get the error // out of the logs or the shim could have encountered an error messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json")) if err != nil { wc <- err return } for _, m := range messages { if m.Level == "error" { wc <- fmt.Errorf("shim error: %v", m.Msg) return } } // no errors reported back from shim, check for runc/runtime errors messages, err = readLogMessages(filepath.Join(p.root, "log.json")) if err != nil { if os.IsNotExist(err) { err = ErrContainerNotStarted } wc <- err return } for _, m := range messages { if m.Level == "error" { wc <- fmt.Errorf("oci runtime error: %v", m.Msg) return } } wc <- ErrContainerNotStarted return } time.Sleep(15 * time.Millisecond) continue } wc <- err return } // the pid file was read successfully wc <- nil return } }() select { case err := <-wc: if err != nil { return err } err = p.saveStartTime() if err != nil && !os.IsNotExist(err) { logrus.Warnf("containerd: unable to save %s:%s starttime: %v", p.container.id, p.id, err) } return nil case <-time.After(c.timeout): cmd.Process.Kill() cmd.Wait() return ErrContainerStartTimeout } } // isAlive checks if the shim that launched the container is still alive func isAlive(cmd *exec.Cmd) (bool, error) { if _, err := syscall.Wait4(cmd.Process.Pid, nil, syscall.WNOHANG, nil); err == nil { return true, nil } if err := syscall.Kill(cmd.Process.Pid, 0); err != nil { if err == syscall.ESRCH { return false, nil } return false, err } return true, nil } type oom struct { id string root string eventfd int } func (o *oom) ContainerID() string { return o.id } func (o *oom) FD() int { return o.eventfd } func (o *oom) Flush() { buf := make([]byte, 8) syscall.Read(o.eventfd, buf) } func (o *oom) Removed() bool { _, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control")) return os.IsNotExist(err) } func (o *oom) Close() error { return syscall.Close(o.eventfd) } type message struct { Level string `json:"level"` Msg string `json:"msg"` } func readLogMessages(path string) ([]message, error) { var out []message f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() dec := json.NewDecoder(f) for { var m message if err := dec.Decode(&m); err != nil { if err == io.EOF { break } return nil, err } out = append(out, m) } return out, nil } docker-containerd-tags-docker-1.13.1/runtime/container_linux.go000066400000000000000000000100751304421264600246110ustar00rootroot00000000000000package runtime import ( "bufio" "bytes" "encoding/json" "fmt" "os" "os/exec" "path/filepath" "strings" "syscall" "github.com/docker/containerd/specs" ocs "github.com/opencontainers/runtime-spec/specs-go" ) func findCgroupMountpointAndRoot(pid int, subsystem string) (string, string, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { return "", "", err } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { txt := scanner.Text() fields := strings.Split(txt, " ") for _, opt := range strings.Split(fields[len(fields)-1], ",") { if opt == subsystem { return fields[4], fields[3], nil } } } if err := scanner.Err(); err != nil { return "", "", err } return "", "", fmt.Errorf("cgroup path for %s not found", subsystem) } func parseCgroupFile(path string) (map[string]string, error) { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() s := bufio.NewScanner(f) cgroups := make(map[string]string) for s.Scan() { if err := s.Err(); err != nil { return nil, err } text := s.Text() parts := strings.Split(text, ":") for _, subs := range strings.Split(parts[1], ",") { cgroups[subs] = parts[2] } } return cgroups, nil } func (c *container) OOM() (OOM, error) { p := c.processes[InitProcessID] if p == nil { return nil, fmt.Errorf("no init process found") } mountpoint, hostRoot, err := findCgroupMountpointAndRoot(os.Getpid(), "memory") if err != nil { return nil, err } cgroups, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", p.pid)) if err != nil { return nil, err } root, ok := cgroups["memory"] if !ok { return nil, fmt.Errorf("no memory cgroup for container %s", c.ID()) } // Take care of the case were we're running inside a container // ourself root = strings.TrimPrefix(root, hostRoot) return c.getMemoryEventFD(filepath.Join(mountpoint, root)) } func (c *container) Pids() ([]int, error) { var pids []int args := c.runtimeArgs args = append(args, "ps", "--format=json", c.id) out, err := exec.Command(c.runtime, args...).CombinedOutput() if err != nil { return nil, fmt.Errorf("%s: %q", err.Error(), out) } if err := json.Unmarshal(out, &pids); err != nil { return nil, err } return pids, nil } func u64Ptr(i uint64) *uint64 { return &i } func (c *container) UpdateResources(r *Resource) error { sr := ocs.Resources{ Memory: &ocs.Memory{ Limit: u64Ptr(uint64(r.Memory)), Reservation: u64Ptr(uint64(r.MemoryReservation)), Swap: u64Ptr(uint64(r.MemorySwap)), Kernel: u64Ptr(uint64(r.KernelMemory)), KernelTCP: u64Ptr(uint64(r.KernelTCPMemory)), }, CPU: &ocs.CPU{ Shares: u64Ptr(uint64(r.CPUShares)), Quota: u64Ptr(uint64(r.CPUQuota)), Period: u64Ptr(uint64(r.CPUPeriod)), Cpus: &r.CpusetCpus, Mems: &r.CpusetMems, }, BlockIO: &ocs.BlockIO{ Weight: &r.BlkioWeight, }, } srStr := bytes.NewBuffer(nil) if err := json.NewEncoder(srStr).Encode(&sr); err != nil { return err } args := c.runtimeArgs args = append(args, "update", "-r", "-", c.id) cmd := exec.Command(c.runtime, args...) cmd.Stdin = srStr b, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf(string(b)) } return nil } func getRootIDs(s *specs.Spec) (int, int, error) { if s == nil { return 0, 0, nil } var hasUserns bool for _, ns := range s.Linux.Namespaces { if ns.Type == ocs.UserNamespace { hasUserns = true break } } if !hasUserns { return 0, 0, nil } uid := hostIDFromMap(0, s.Linux.UIDMappings) gid := hostIDFromMap(0, s.Linux.GIDMappings) return uid, gid, nil } func (c *container) getMemoryEventFD(root string) (*oom, error) { f, err := os.Open(filepath.Join(root, "memory.oom_control")) if err != nil { return nil, err } defer f.Close() fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) if serr != 0 { return nil, serr } if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil { syscall.Close(int(fd)) return nil, err } return &oom{ root: root, id: c.id, eventfd: int(fd), }, nil } docker-containerd-tags-docker-1.13.1/runtime/container_solaris.go000066400000000000000000000021361304421264600251250ustar00rootroot00000000000000package runtime import ( "bytes" "encoding/json" "fmt" "os/exec" "strings" "github.com/docker/containerd/specs" ocs "github.com/opencontainers/runtime-spec/specs-go" ) func getRootIDs(s *specs.Spec) (int, int, error) { return 0, 0, nil } func (c *container) OOM() (OOM, error) { return nil, nil } func (c *container) Pids() ([]int, error) { var pids []int // TODO: This could be racy. Needs more investigation. //we get this information from runz state cmd := exec.Command(c.runtime, "state", c.id) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err := cmd.Run(); err != nil { if strings.Contains(errBuf.String(), "Container not found") { return nil, errContainerNotFound } return nil, fmt.Errorf("Error is: %+v\n", err) } response := ocs.State{} decoder := json.NewDecoder(outBuf) if err := decoder.Decode(&response); err != nil { return nil, fmt.Errorf("unable to decode json response: %+v", err) } pids = append(pids, response.Pid) return pids, nil } func (c *container) UpdateResources(r *Resource) error { return nil } docker-containerd-tags-docker-1.13.1/runtime/process.go000066400000000000000000000260151304421264600230670ustar00rootroot00000000000000package runtime import ( "encoding/json" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "strconv" "strings" "sync" "syscall" "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/specs" "golang.org/x/sys/unix" ) // Process holds the operation allowed on a container's process type Process interface { io.Closer // ID of the process. // This is either "init" when it is the container's init process or // it is a user provided id for the process similar to the container id ID() string // Start unblocks the associated container init process. // This should only be called on the process with ID "init" Start() error CloseStdin() error Resize(int, int) error // ExitFD returns the fd the provides an event when the process exits ExitFD() int // ExitStatus returns the exit status of the process or an error if it // has not exited ExitStatus() (uint32, error) // Spec returns the process spec that created the process Spec() specs.ProcessSpec // Signal sends the provided signal to the process Signal(os.Signal) error // Container returns the container that the process belongs to Container() Container // Stdio of the container Stdio() Stdio // SystemPid is the pid on the system SystemPid() int // State returns if the process is running or not State() State // Wait reaps the shim process if avaliable Wait() } type processConfig struct { id string root string processSpec specs.ProcessSpec spec *specs.Spec c *container stdio Stdio exec bool checkpoint string } func newProcess(config *processConfig) (*process, error) { p := &process{ root: config.root, id: config.id, container: config.c, spec: config.processSpec, stdio: config.stdio, cmdDoneCh: make(chan struct{}), state: Running, } uid, gid, err := getRootIDs(config.spec) if err != nil { return nil, err } f, err := os.Create(filepath.Join(config.root, "process.json")) if err != nil { return nil, err } defer f.Close() ps := ProcessState{ ProcessSpec: config.processSpec, Exec: config.exec, PlatformProcessState: PlatformProcessState{ Checkpoint: config.checkpoint, RootUID: uid, RootGID: gid, }, Stdin: config.stdio.Stdin, Stdout: config.stdio.Stdout, Stderr: config.stdio.Stderr, RuntimeArgs: config.c.runtimeArgs, NoPivotRoot: config.c.noPivotRoot, } if err := json.NewEncoder(f).Encode(ps); err != nil { return nil, err } exit, err := getExitPipe(filepath.Join(config.root, ExitFile)) if err != nil { return nil, err } control, err := getControlPipe(filepath.Join(config.root, ControlFile)) if err != nil { return nil, err } p.exitPipe = exit p.controlPipe = control return p, nil } func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) { p := &process{ root: root, id: id, container: c, spec: s.ProcessSpec, stdio: Stdio{ Stdin: s.Stdin, Stdout: s.Stdout, Stderr: s.Stderr, }, state: Stopped, } startTime, err := ioutil.ReadFile(filepath.Join(p.root, StartTimeFile)) if err != nil && !os.IsNotExist(err) { return nil, err } p.startTime = string(startTime) if _, err := p.getPidFromFile(); err != nil { return nil, err } if _, err := p.ExitStatus(); err != nil { if err == ErrProcessNotExited { exit, err := getExitPipe(filepath.Join(root, ExitFile)) if err != nil { return nil, err } p.exitPipe = exit control, err := getControlPipe(filepath.Join(root, ControlFile)) if err != nil { return nil, err } p.controlPipe = control p.state = Running return p, nil } return nil, err } return p, nil } func readProcStatField(pid int, field int) (string, error) { data, err := ioutil.ReadFile(filepath.Join(string(filepath.Separator), "proc", strconv.Itoa(pid), "stat")) if err != nil { return "", err } if field > 2 { // First, split out the name since he could contains spaces. parts := strings.Split(string(data), ") ") // Now split out the rest, we end up with 2 fields less parts = strings.Split(parts[1], " ") return parts[field-2-1], nil // field count start at 1 in manual } parts := strings.Split(string(data), " (") if field == 1 { return parts[0], nil } parts = strings.Split(parts[1], ") ") return parts[0], nil } type process struct { root string id string pid int exitPipe *os.File controlPipe *os.File container *container spec specs.ProcessSpec stdio Stdio cmd *exec.Cmd cmdSuccess bool cmdDoneCh chan struct{} state State stateLock sync.Mutex startTime string } func (p *process) ID() string { return p.id } func (p *process) Container() Container { return p.container } func (p *process) SystemPid() int { return p.pid } // ExitFD returns the fd of the exit pipe func (p *process) ExitFD() int { return int(p.exitPipe.Fd()) } func (p *process) CloseStdin() error { _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0) return err } func (p *process) Resize(w, h int) error { _, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h) return err } func (p *process) updateExitStatusFile(status uint32) (uint32, error) { p.stateLock.Lock() p.state = Stopped p.stateLock.Unlock() err := ioutil.WriteFile(filepath.Join(p.root, ExitStatusFile), []byte(fmt.Sprintf("%u", status)), 0644) return status, err } func (p *process) handleSigkilledShim(rst uint32, rerr error) (uint32, error) { if p.cmd == nil || p.cmd.Process == nil { e := unix.Kill(p.pid, 0) if e == syscall.ESRCH { logrus.Warnf("containerd: %s:%s (pid %d) does not exist", p.container.id, p.id, p.pid) // The process died while containerd was down (probably of // SIGKILL, but no way to be sure) return p.updateExitStatusFile(UnknownStatus) } // If it's not the same process, just mark it stopped and set // the status to the UnknownStatus value (i.e. 255) if same, err := p.isSameProcess(); !same { logrus.Warnf("containerd: %s:%s (pid %d) is not the same process anymore (%v)", p.container.id, p.id, p.pid, err) // Create the file so we get the exit event generated once monitor kicks in // without having to go through all this process again return p.updateExitStatusFile(UnknownStatus) } ppid, err := readProcStatField(p.pid, 4) if err != nil { return rst, fmt.Errorf("could not check process ppid: %v (%v)", err, rerr) } if ppid == "1" { logrus.Warnf("containerd: %s:%s shim died, killing associated process", p.container.id, p.id) unix.Kill(p.pid, syscall.SIGKILL) if err != nil && err != syscall.ESRCH { return UnknownStatus, fmt.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err) } // wait for the process to die for { e := unix.Kill(p.pid, 0) if e == syscall.ESRCH { break } time.Sleep(5 * time.Millisecond) } // Create the file so we get the exit event generated once monitor kicks in // without having to go through all this process again return p.updateExitStatusFile(128 + uint32(syscall.SIGKILL)) } return rst, rerr } // Possible that the shim was SIGKILLED e := unix.Kill(p.cmd.Process.Pid, 0) if e != syscall.ESRCH { return rst, rerr } // Ensure we got the shim ProcessState <-p.cmdDoneCh shimStatus := p.cmd.ProcessState.Sys().(syscall.WaitStatus) if shimStatus.Signaled() && shimStatus.Signal() == syscall.SIGKILL { logrus.Debugf("containerd: ExitStatus(container: %s, process: %s): shim was SIGKILL'ed reaping its child with pid %d", p.container.id, p.id, p.pid) rerr = nil rst = 128 + uint32(shimStatus.Signal()) p.stateLock.Lock() p.state = Stopped p.stateLock.Unlock() } return rst, rerr } func (p *process) ExitStatus() (rst uint32, rerr error) { data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile)) defer func() { if rerr != nil { rst, rerr = p.handleSigkilledShim(rst, rerr) } }() if err != nil { if os.IsNotExist(err) { return UnknownStatus, ErrProcessNotExited } return UnknownStatus, err } if len(data) == 0 { return UnknownStatus, ErrProcessNotExited } p.stateLock.Lock() p.state = Stopped p.stateLock.Unlock() i, err := strconv.ParseUint(string(data), 10, 32) return uint32(i), err } func (p *process) Spec() specs.ProcessSpec { return p.spec } func (p *process) Stdio() Stdio { return p.stdio } // Close closes any open files and/or resouces on the process func (p *process) Close() error { err := p.exitPipe.Close() if cerr := p.controlPipe.Close(); err == nil { err = cerr } return err } func (p *process) State() State { p.stateLock.Lock() defer p.stateLock.Unlock() return p.state } func (p *process) readStartTime() (string, error) { return readProcStatField(p.pid, 22) } func (p *process) saveStartTime() error { startTime, err := p.readStartTime() if err != nil { return err } p.startTime = startTime return ioutil.WriteFile(filepath.Join(p.root, StartTimeFile), []byte(startTime), 0644) } func (p *process) isSameProcess() (bool, error) { if p.pid == 0 { _, err := p.getPidFromFile() if err != nil { return false, err } } // for backward compat assume it's the same if startTime wasn't set if p.startTime == "" { // Sometimes the process dies before we can get the starttime, // check that the process actually exists if err := unix.Kill(p.pid, 0); err != syscall.ESRCH { return true, nil } return false, nil } startTime, err := p.readStartTime() if err != nil { return false, err } return startTime == p.startTime, nil } // Wait will reap the shim process func (p *process) Wait() { if p.cmdDoneCh != nil { <-p.cmdDoneCh } } func getExitPipe(path string) (*os.File, error) { if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { return nil, err } // add NONBLOCK in case the other side has already closed or else // this function would never return return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) } func getControlPipe(path string) (*os.File, error) { if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) { return nil, err } return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0) } // Signal sends the provided signal to the process func (p *process) Signal(s os.Signal) error { return syscall.Kill(p.pid, s.(syscall.Signal)) } // Start unblocks the associated container init process. // This should only be called on the process with ID "init" func (p *process) Start() error { if p.ID() == InitProcessID { var ( errC = make(chan error, 1) args = append(p.container.runtimeArgs, "start", p.container.id) cmd = exec.Command(p.container.runtime, args...) ) go func() { out, err := cmd.CombinedOutput() if err != nil { errC <- fmt.Errorf("%s: %q", err.Error(), out) } errC <- nil }() select { case err := <-errC: if err != nil { return err } case <-p.cmdDoneCh: if !p.cmdSuccess { if cmd.Process != nil { cmd.Process.Kill() } cmd.Wait() return ErrShimExited } err := <-errC if err != nil { return err } } } return nil } docker-containerd-tags-docker-1.13.1/runtime/process_linux.go000066400000000000000000000005361304421264600243060ustar00rootroot00000000000000// +build linux package runtime import ( "io/ioutil" "path/filepath" "strconv" ) func (p *process) getPidFromFile() (int, error) { data, err := ioutil.ReadFile(filepath.Join(p.root, "pid")) if err != nil { return -1, err } i, err := strconv.Atoi(string(data)) if err != nil { return -1, errInvalidPidInt } p.pid = i return i, nil } docker-containerd-tags-docker-1.13.1/runtime/process_solaris.go000066400000000000000000000016021304421264600246160ustar00rootroot00000000000000// +build solaris package runtime import ( "bytes" "encoding/json" "fmt" "os/exec" runtimespec "github.com/opencontainers/runtime-spec/specs-go" ) // On Solaris we already have a state file maintained by the framework. // This is read by runz state. We just call that instead of maintaining // a separate file. func (p *process) getPidFromFile() (int, error) { //we get this information from runz state cmd := exec.Command("runc", "state", p.container.ID()) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err := cmd.Run(); err != nil { // TODO: Improve logic return -1, errContainerNotFound } response := runtimespec.State{} decoder := json.NewDecoder(outBuf) if err := decoder.Decode(&response); err != nil { return -1, fmt.Errorf("unable to decode json response: %+v", err) } p.pid = response.Pid return p.pid, nil } docker-containerd-tags-docker-1.13.1/runtime/runtime.go000066400000000000000000000103521304421264600230710ustar00rootroot00000000000000package runtime import ( "errors" "time" "github.com/docker/containerd/specs" ) var ( // ErrContainerExited is returned when access to an exited // container is attempted ErrContainerExited = errors.New("containerd: container has exited") // ErrProcessNotExited is returned when trying to retrieve the exit // status of an alive process ErrProcessNotExited = errors.New("containerd: process has not exited") // ErrContainerNotStarted is returned when a container fails to // start without error from the shim or the OCI runtime ErrContainerNotStarted = errors.New("containerd: container not started") // ErrContainerStartTimeout is returned if a container takes too // long to start ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout") // ErrShimExited is returned if the shim or the contianer's init process // exits before completing ErrShimExited = errors.New("containerd: shim exited before container process was started") errNoPidFile = errors.New("containerd: no process pid file found") errInvalidPidInt = errors.New("containerd: process pid is invalid") errContainerNotFound = errors.New("containerd: container not found") errNotImplemented = errors.New("containerd: not implemented") ) const ( // ExitFile holds the name of the pipe used to monitor process // exit ExitFile = "exit" // ExitStatusFile holds the name of the file where the container // exit code is to be written ExitStatusFile = "exitStatus" // StateFile holds the name of the file where the container state // is written StateFile = "state.json" // ControlFile holds the name of the pipe used to control the shim ControlFile = "control" // InitProcessID holds the special ID used for the very first // container's process InitProcessID = "init" // StartTimeFile holds the name of the file in which the process // start time is saved StartTimeFile = "starttime" // UnknownStatus is the value returned when a process exit // status cannot be determined UnknownStatus = 255 ) // Checkpoint holds information regarding a container checkpoint type Checkpoint struct { // Timestamp is the time that checkpoint happened Created time.Time `json:"created"` // Name is the name of the checkpoint Name string `json:"name"` // TCP checkpoints open tcp connections TCP bool `json:"tcp"` // UnixSockets persists unix sockets in the checkpoint UnixSockets bool `json:"unixSockets"` // Shell persists tty sessions in the checkpoint Shell bool `json:"shell"` // Exit exits the container after the checkpoint is finished Exit bool `json:"exit"` // EmptyNS tells CRIU to omit a specified namespace EmptyNS []string `json:"emptyNS,omitempty"` } // PlatformProcessState container platform-specific fields in the ProcessState structure type PlatformProcessState struct { Checkpoint string `json:"checkpoint"` RootUID int `json:"rootUID"` RootGID int `json:"rootGID"` } // State represents a container state type State string // Resource regroups the various container limits that can be updated type Resource struct { CPUShares int64 BlkioWeight uint16 CPUPeriod int64 CPUQuota int64 CpusetCpus string CpusetMems string KernelMemory int64 KernelTCPMemory int64 Memory int64 MemoryReservation int64 MemorySwap int64 } // Possible container states const ( Paused = State("paused") Stopped = State("stopped") Running = State("running") ) type state struct { Bundle string `json:"bundle"` Labels []string `json:"labels"` Stdin string `json:"stdin"` Stdout string `json:"stdout"` Stderr string `json:"stderr"` Runtime string `json:"runtime"` RuntimeArgs []string `json:"runtimeArgs"` Shim string `json:"shim"` NoPivotRoot bool `json:"noPivotRoot"` } // ProcessState holds the process OCI specs along with various fields // required by containerd type ProcessState struct { specs.ProcessSpec Exec bool `json:"exec"` Stdin string `json:"containerdStdin"` Stdout string `json:"containerdStdout"` Stderr string `json:"containerdStderr"` RuntimeArgs []string `json:"runtimeArgs"` NoPivotRoot bool `json:"noPivotRoot"` PlatformProcessState } docker-containerd-tags-docker-1.13.1/runtime/runtime_test.go000066400000000000000000000076711304421264600241420ustar00rootroot00000000000000package runtime import ( "context" "flag" "fmt" "io" "os" "os/exec" "path/filepath" "syscall" "testing" "time" utils "github.com/docker/containerd/testutils" ) var ( devNull = "/dev/null" stdin io.WriteCloser runtimeTool = flag.String("runtime", "runc", "Runtime to use for this test") ) // Create containerd state and oci bundles directory func setup() error { if err := os.MkdirAll(utils.StateDir, 0755); err != nil { return err } if err := os.MkdirAll(utils.BundlesRoot, 0755); err != nil { return err } return nil } // Creates the bundleDir with rootfs, io fifo dir and a default spec. // On success, returns the bundlePath func setupBundle(bundleName string) (string, error) { bundlePath := filepath.Join(utils.BundlesRoot, bundleName) if err := os.MkdirAll(bundlePath, 0755); err != nil { fmt.Println("Unable to create bundlePath due to ", err) return "", err } io := filepath.Join(bundlePath, "io") if err := os.MkdirAll(io, 0755); err != nil { fmt.Println("Unable to create io dir due to ", err) return "", err } if err := utils.GenerateReferenceSpecs(bundlePath); err != nil { fmt.Println("Unable to generate OCI reference spec: ", err) return "", err } if err := utils.CreateBusyboxBundle(bundleName); err != nil { fmt.Println("CreateBusyboxBundle error: ", err) return "", err } return bundlePath, nil } func setupStdio(cwd string, bundlePath string, bundleName string) (Stdio, error) { s := NewStdio(devNull, devNull, devNull) pid := "init" for stdName, stdPath := range map[string]*string{ "stdin": &s.Stdin, "stdout": &s.Stdout, "stderr": &s.Stderr, } { *stdPath = filepath.Join(cwd, bundlePath, "io", bundleName+"-"+pid+"-"+stdName) if err := syscall.Mkfifo(*stdPath, 0755); err != nil && !os.IsExist(err) { fmt.Println("Mkfifo error: ", err) return s, err } } err := attachStdio(s) if err != nil { fmt.Println("attachStdio error: ", err) return s, err } return s, nil } func attachStdio(s Stdio) error { stdinf, err := os.OpenFile(s.Stdin, syscall.O_RDWR, 0) if err != nil { return err } stdin = stdinf stdoutf, err := os.OpenFile(s.Stdout, syscall.O_RDWR, 0) if err != nil { return err } go io.Copy(os.Stdout, stdoutf) stderrf, err := os.OpenFile(s.Stderr, syscall.O_RDWR, 0) if err != nil { return err } go io.Copy(os.Stderr, stderrf) return nil } func teardownBundle(bundleName string) { containerRoot := filepath.Join(utils.StateDir, bundleName) os.RemoveAll(containerRoot) bundlePath := filepath.Join(utils.BundlesRoot, bundleName) os.RemoveAll(bundlePath) return } // Remove containerd state and oci bundles directory func teardown() { os.RemoveAll(utils.StateDir) os.RemoveAll(utils.BundlesRoot) } func BenchmarkBusyboxSh(b *testing.B) { bundleName := "busybox-sh" wd := utils.GetTestOutDir() if err := os.Chdir(wd); err != nil { b.Fatalf("Could not change working directory: %v", err) } if err := setup(); err != nil { b.Fatalf("Error setting up test: %v", err) } defer teardown() for n := 0; n < b.N; n++ { bundlePath, err := setupBundle(bundleName) if err != nil { return } s, err := setupStdio(wd, bundlePath, bundleName) if err != nil { return } c, err := New(ContainerOpts{ Root: utils.StateDir, ID: bundleName, Bundle: filepath.Join(wd, bundlePath), Runtime: *runtimeTool, Shim: "containerd-shim", Timeout: 15 * time.Second, }) if err != nil { b.Fatalf("Error creating a New container: ", err) } benchmarkStartContainer(b, c, s, bundleName) teardownBundle(bundleName) } } func benchmarkStartContainer(b *testing.B, c Container, s Stdio, bundleName string) { p, err := c.Start(context.Background(), "", s) if err != nil { b.Fatalf("Error starting container %v", err) } kill := exec.Command(c.Runtime(), "kill", bundleName, "KILL") kill.Run() p.Wait() c.Delete() // wait for kill to finish. selected wait time is arbitrary time.Sleep(500 * time.Millisecond) } docker-containerd-tags-docker-1.13.1/runtime/stats.go000066400000000000000000000057221304421264600225510ustar00rootroot00000000000000package runtime import "time" // Stat holds a container statistics type Stat struct { // Timestamp is the time that the statistics where collected Timestamp time.Time CPU CPU `json:"cpu"` Memory Memory `json:"memory"` Pids Pids `json:"pids"` Blkio Blkio `json:"blkio"` Hugetlb map[string]Hugetlb `json:"hugetlb"` } // Hugetlb holds information regarding a container huge tlb usage type Hugetlb struct { Usage uint64 `json:"usage,omitempty"` Max uint64 `json:"max,omitempty"` Failcnt uint64 `json:"failcnt"` } // BlkioEntry represents a single record for a Blkio stat type BlkioEntry struct { Major uint64 `json:"major,omitempty"` Minor uint64 `json:"minor,omitempty"` Op string `json:"op,omitempty"` Value uint64 `json:"value,omitempty"` } // Blkio regroups all the Blkio related stats type Blkio struct { IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` } // Pids holds the stat of the pid usage of the machine type Pids struct { Current uint64 `json:"current,omitempty"` Limit uint64 `json:"limit,omitempty"` } // Throttling holds a cpu throttling information type Throttling struct { Periods uint64 `json:"periods,omitempty"` ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` ThrottledTime uint64 `json:"throttledTime,omitempty"` } // CPUUsage holds information regarding cpu usage type CPUUsage struct { // Units: nanoseconds. Total uint64 `json:"total,omitempty"` Percpu []uint64 `json:"percpu,omitempty"` Kernel uint64 `json:"kernel"` User uint64 `json:"user"` } // CPU regroups both a CPU usage and throttling information type CPU struct { Usage CPUUsage `json:"usage,omitempty"` Throttling Throttling `json:"throttling,omitempty"` } // MemoryEntry regroups statistic about a given type of memory type MemoryEntry struct { Limit uint64 `json:"limit"` Usage uint64 `json:"usage,omitempty"` Max uint64 `json:"max,omitempty"` Failcnt uint64 `json:"failcnt"` } // Memory holds information regarding the different type of memories available type Memory struct { Cache uint64 `json:"cache,omitempty"` Usage MemoryEntry `json:"usage,omitempty"` Swap MemoryEntry `json:"swap,omitempty"` Kernel MemoryEntry `json:"kernel,omitempty"` KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` Raw map[string]uint64 `json:"raw,omitempty"` } docker-containerd-tags-docker-1.13.1/specs/000077500000000000000000000000001304421264600205105ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/specs/spec_linux.go000066400000000000000000000004321304421264600232070ustar00rootroot00000000000000package specs import oci "github.com/opencontainers/runtime-spec/specs-go" type ( // ProcessSpec aliases the platform process specs ProcessSpec oci.Process // Spec aliases the platform oci spec Spec oci.Spec // Rlimit aliases the platform resource limit Rlimit oci.Rlimit ) docker-containerd-tags-docker-1.13.1/specs/spec_solaris.go000066400000000000000000000003301304421264600235210ustar00rootroot00000000000000package specs import ocs "github.com/opencontainers/runtime-spec/specs-go" type ( // ProcessSpec aliases the platform process specs ProcessSpec ocs.Process // Spec aliases the platform oci spec Spec ocs.Spec ) docker-containerd-tags-docker-1.13.1/supervisor/000077500000000000000000000000001304421264600216145ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/supervisor/add_process.go000066400000000000000000000023361304421264600244350ustar00rootroot00000000000000package supervisor import ( "os" "time" "github.com/docker/containerd/runtime" "github.com/docker/containerd/specs" "golang.org/x/net/context" ) // AddProcessTask holds everything necessary to add a process to a // container type AddProcessTask struct { baseTask ID string PID string Stdout string Stderr string Stdin string ProcessSpec *specs.ProcessSpec StartResponse chan StartResponse Ctx context.Context } func (s *Supervisor) addProcess(t *AddProcessTask) error { start := time.Now() ci, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } process, err := ci.container.Exec(t.Ctx, t.PID, *t.ProcessSpec, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) if err != nil { return err } s.newExecSyncChannel(t.ID, t.PID) if err := s.monitorProcess(process); err != nil { s.deleteExecSyncChannel(t.ID, t.PID) // Kill process process.Signal(os.Kill) ci.container.RemoveProcess(t.PID) return err } ExecProcessTimer.UpdateSince(start) t.StartResponse <- StartResponse{ExecPid: process.SystemPid()} s.notifySubscribers(Event{ Timestamp: time.Now(), Type: StateStartProcess, PID: t.PID, ID: t.ID, }) return nil } docker-containerd-tags-docker-1.13.1/supervisor/checkpoint.go000066400000000000000000000016221304421264600242730ustar00rootroot00000000000000// +build !windows package supervisor import "github.com/docker/containerd/runtime" // CreateCheckpointTask holds needed parameters to create a new checkpoint type CreateCheckpointTask struct { baseTask ID string CheckpointDir string Checkpoint *runtime.Checkpoint } func (s *Supervisor) createCheckpoint(t *CreateCheckpointTask) error { i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } return i.container.Checkpoint(*t.Checkpoint, t.CheckpointDir) } // DeleteCheckpointTask holds needed parameters to delete a checkpoint type DeleteCheckpointTask struct { baseTask ID string CheckpointDir string Checkpoint *runtime.Checkpoint } func (s *Supervisor) deleteCheckpoint(t *DeleteCheckpointTask) error { i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } return i.container.DeleteCheckpoint(t.Checkpoint.Name, t.CheckpointDir) } docker-containerd-tags-docker-1.13.1/supervisor/create.go000066400000000000000000000030151304421264600234050ustar00rootroot00000000000000package supervisor import ( "path/filepath" "time" "github.com/docker/containerd/runtime" "golang.org/x/net/context" ) // StartTask holds needed parameters to create a new container type StartTask struct { baseTask ID string BundlePath string Stdout string Stderr string Stdin string StartResponse chan StartResponse Labels []string NoPivotRoot bool Checkpoint *runtime.Checkpoint CheckpointDir string Runtime string RuntimeArgs []string Ctx context.Context } func (s *Supervisor) start(t *StartTask) error { start := time.Now() rt := s.runtime rtArgs := s.runtimeArgs if t.Runtime != "" { rt = t.Runtime rtArgs = t.RuntimeArgs } container, err := runtime.New(runtime.ContainerOpts{ Root: s.stateDir, ID: t.ID, Bundle: t.BundlePath, Runtime: rt, RuntimeArgs: rtArgs, Shim: s.shim, Labels: t.Labels, NoPivotRoot: t.NoPivotRoot, Timeout: s.timeout, }) if err != nil { return err } s.containers[t.ID] = &containerInfo{ container: container, } ContainersCounter.Inc(1) task := &startTask{ Err: t.ErrorCh(), Container: container, StartResponse: t.StartResponse, Stdin: t.Stdin, Stdout: t.Stdout, Stderr: t.Stderr, Ctx: t.Ctx, } if t.Checkpoint != nil { task.CheckpointPath = filepath.Join(t.CheckpointDir, t.Checkpoint.Name) } s.startTasks <- task ContainerCreateTimer.UpdateSince(start) return errDeferredResponse } docker-containerd-tags-docker-1.13.1/supervisor/create_solaris.go000066400000000000000000000002311304421264600251360ustar00rootroot00000000000000package supervisor type platformStartTask struct { } // Checkpoint not supported on Solaris func (task *startTask) setTaskCheckpoint(t *StartTask) { } docker-containerd-tags-docker-1.13.1/supervisor/delete.go000066400000000000000000000023221304421264600234040ustar00rootroot00000000000000package supervisor import ( "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/runtime" ) // DeleteTask holds needed parameters to remove a container type DeleteTask struct { baseTask ID string Status uint32 PID string NoEvent bool Process runtime.Process } func (s *Supervisor) delete(t *DeleteTask) error { if i, ok := s.containers[t.ID]; ok { start := time.Now() if err := s.deleteContainer(i.container); err != nil { logrus.WithField("error", err).Error("containerd: deleting container") } if t.Process != nil { t.Process.Wait() } if !t.NoEvent { execMap := s.getDeleteExecSyncMap(t.ID) go func() { // Wait for all exec processe events to be sent (we seem // to sometimes receive them after the init event) for _, ch := range execMap { <-ch } s.notifySubscribers(Event{ Type: StateExit, Timestamp: time.Now(), ID: t.ID, Status: t.Status, PID: t.PID, }) }() } ContainersCounter.Dec(1) ContainerDeleteTimer.UpdateSince(start) } return nil } func (s *Supervisor) deleteContainer(container runtime.Container) error { delete(s.containers, container.ID()) return container.Delete() } docker-containerd-tags-docker-1.13.1/supervisor/errors.go000066400000000000000000000023561304421264600234650ustar00rootroot00000000000000package supervisor import "errors" var ( // ErrContainerNotFound is returned when the container ID passed // for a given operation is invalid ErrContainerNotFound = errors.New("containerd: container not found") // ErrProcessNotFound is returned when the process ID passed for // a given operation is invalid ErrProcessNotFound = errors.New("containerd: process not found for container") // ErrUnknownContainerStatus is returned when the container status // cannot be determined ErrUnknownContainerStatus = errors.New("containerd: unknown container status ") // ErrUnknownTask is returned when an unknown Task type is // scheduled (should never happen). ErrUnknownTask = errors.New("containerd: unknown task type") // Internal errors errShutdown = errors.New("containerd: supervisor is shutdown") errRootNotAbs = errors.New("containerd: rootfs path is not an absolute path") errNoContainerForPid = errors.New("containerd: pid not registered for any container") // internal error where the handler will defer to another for the final response // // TODO: we could probably do a typed error with another error channel for this to make it // less like magic errDeferredResponse = errors.New("containerd: deferred response") ) docker-containerd-tags-docker-1.13.1/supervisor/exit.go000066400000000000000000000042761304421264600231250ustar00rootroot00000000000000package supervisor import ( "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/runtime" ) // ExitTask holds needed parameters to execute the exit task type ExitTask struct { baseTask Process runtime.Process } func (s *Supervisor) exit(t *ExitTask) error { start := time.Now() proc := t.Process status, err := proc.ExitStatus() if err != nil { logrus.WithFields(logrus.Fields{ "error": err, "pid": proc.ID(), "id": proc.Container().ID(), "systemPid": proc.SystemPid(), }).Error("containerd: get exit status") } logrus.WithFields(logrus.Fields{ "pid": proc.ID(), "status": status, "id": proc.Container().ID(), "systemPid": proc.SystemPid(), }).Debug("containerd: process exited") // if the process is the the init process of the container then // fire a separate event for this process if proc.ID() != runtime.InitProcessID { ne := &ExecExitTask{ ID: proc.Container().ID(), PID: proc.ID(), Status: status, Process: proc, } s.execExit(ne) return nil } container := proc.Container() ne := &DeleteTask{ ID: container.ID(), Status: status, PID: proc.ID(), Process: proc, } s.delete(ne) ExitProcessTimer.UpdateSince(start) return nil } // ExecExitTask holds needed parameters to execute the exec exit task type ExecExitTask struct { baseTask ID string PID string Status uint32 Process runtime.Process } func (s *Supervisor) execExit(t *ExecExitTask) error { container := t.Process.Container() // exec process: we remove this process without notifying the main event loop if err := container.RemoveProcess(t.PID); err != nil { logrus.WithField("error", err).Error("containerd: find container for pid") } synCh := s.getExecSyncChannel(t.ID, t.PID) // If the exec spawned children which are still using its IO // waiting here will block until they die or close their IO // descriptors. // Hence, we use a go routine to avoid blocking all other operations go func() { t.Process.Wait() s.notifySubscribers(Event{ Timestamp: time.Now(), ID: t.ID, Type: StateExit, PID: t.PID, Status: t.Status, }) close(synCh) }() return nil } docker-containerd-tags-docker-1.13.1/supervisor/get_containers.go000066400000000000000000000016611304421264600251530ustar00rootroot00000000000000package supervisor import "github.com/docker/containerd/runtime" // GetContainersTask holds needed parameters to retrieve a list of // containers type GetContainersTask struct { baseTask ID string GetState func(c runtime.Container) (interface{}, error) Containers []runtime.Container States []interface{} } func (s *Supervisor) getContainers(t *GetContainersTask) error { if t.ID != "" { ci, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } t.Containers = append(t.Containers, ci.container) if t.GetState != nil { st, err := t.GetState(ci.container) if err != nil { return err } t.States = append(t.States, st) } return nil } for _, ci := range s.containers { t.Containers = append(t.Containers, ci.container) if t.GetState != nil { st, err := t.GetState(ci.container) if err != nil { return err } t.States = append(t.States, st) } } return nil } docker-containerd-tags-docker-1.13.1/supervisor/machine.go000066400000000000000000000011401304421264600235430ustar00rootroot00000000000000// +build !solaris package supervisor import "github.com/cloudfoundry/gosigar" // Machine holds the current machine cpu count and ram size type Machine struct { Cpus int Memory int64 } // CollectMachineInformation returns information regarding the current // machine (e.g. CPU count, RAM amount) func CollectMachineInformation() (Machine, error) { m := Machine{} cpu := sigar.CpuList{} if err := cpu.Get(); err != nil { return m, err } m.Cpus = len(cpu.List) mem := sigar.Mem{} if err := mem.Get(); err != nil { return m, err } m.Memory = int64(mem.Total / 1024 / 1024) return m, nil } docker-containerd-tags-docker-1.13.1/supervisor/machine_solaris.go000066400000000000000000000015511304421264600253050ustar00rootroot00000000000000package supervisor /* #include */ import "C" import ( "errors" ) // Machine holds the current machine cpu count and ram size type Machine struct { Cpus int Memory int64 } // CollectMachineInformation returns information regarding the current // machine (e.g. CPU count, RAM amount) func CollectMachineInformation() (Machine, error) { m := Machine{} ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) if ncpus <= 0 { return m, errors.New("Unable to get number of cpus") } m.Cpus = int(ncpus) memTotal := getTotalMem() if memTotal < 0 { return m, errors.New("Unable to get total memory") } m.Memory = int64(memTotal / 1024 / 1024) return m, nil } // Get the system memory info using sysconf same as prtconf func getTotalMem() int64 { pagesize := C.sysconf(C._SC_PAGESIZE) npages := C.sysconf(C._SC_PHYS_PAGES) return int64(pagesize * npages) } docker-containerd-tags-docker-1.13.1/supervisor/metrics.go000066400000000000000000000035341304421264600236160ustar00rootroot00000000000000package supervisor import "github.com/rcrowley/go-metrics" var ( // ContainerCreateTimer holds the metrics timer associated with container creation ContainerCreateTimer = metrics.NewTimer() // ContainerDeleteTimer holds the metrics timer associated with container deletion ContainerDeleteTimer = metrics.NewTimer() // ContainerStartTimer holds the metrics timer associated with container start duration ContainerStartTimer = metrics.NewTimer() // ContainerStatsTimer holds the metrics timer associated with container stats generation ContainerStatsTimer = metrics.NewTimer() // ContainersCounter keeps track of the number of active containers ContainersCounter = metrics.NewCounter() // EventSubscriberCounter keeps track of the number of active event subscribers EventSubscriberCounter = metrics.NewCounter() // TasksCounter keeps track of the number of active supervisor tasks TasksCounter = metrics.NewCounter() // ExecProcessTimer holds the metrics timer associated with container exec ExecProcessTimer = metrics.NewTimer() // ExitProcessTimer holds the metrics timer associated with reporting container exit status ExitProcessTimer = metrics.NewTimer() // EpollFdCounter keeps trac of how many process are being monitored EpollFdCounter = metrics.NewCounter() ) // Metrics return the list of all available metrics func Metrics() map[string]interface{} { return map[string]interface{}{ "container-create-time": ContainerCreateTimer, "container-delete-time": ContainerDeleteTimer, "container-start-time": ContainerStartTimer, "container-stats-time": ContainerStatsTimer, "containers": ContainersCounter, "event-subscribers": EventSubscriberCounter, "tasks": TasksCounter, "exec-process-time": ExecProcessTimer, "exit-process-time": ExitProcessTimer, "epoll-fds": EpollFdCounter, } } docker-containerd-tags-docker-1.13.1/supervisor/monitor_linux.go000066400000000000000000000066251304421264600250620ustar00rootroot00000000000000package supervisor import ( "sync" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/containerd/archutils" "github.com/docker/containerd/runtime" ) // NewMonitor starts a new process monitor and returns it func NewMonitor() (*Monitor, error) { m := &Monitor{ receivers: make(map[int]interface{}), exits: make(chan runtime.Process, 1024), ooms: make(chan string, 1024), } fd, err := archutils.EpollCreate1(0) if err != nil { return nil, err } m.epollFd = fd go m.start() return m, nil } // Monitor represents a runtime.Process monitor type Monitor struct { m sync.Mutex receivers map[int]interface{} exits chan runtime.Process ooms chan string epollFd int } // Exits returns the channel used to notify of a process exit func (m *Monitor) Exits() chan runtime.Process { return m.exits } // OOMs returns the channel used to notify of a container exit due to OOM func (m *Monitor) OOMs() chan string { return m.ooms } // Monitor adds a process to the list of the one being monitored func (m *Monitor) Monitor(p runtime.Process) error { m.m.Lock() defer m.m.Unlock() fd := p.ExitFD() event := syscall.EpollEvent{ Fd: int32(fd), Events: syscall.EPOLLHUP, } if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { return err } EpollFdCounter.Inc(1) m.receivers[fd] = p return nil } // MonitorOOM adds a container to the list of the ones monitored for OOM func (m *Monitor) MonitorOOM(c runtime.Container) error { m.m.Lock() defer m.m.Unlock() o, err := c.OOM() if err != nil { return err } fd := o.FD() event := syscall.EpollEvent{ Fd: int32(fd), Events: syscall.EPOLLHUP | syscall.EPOLLIN, } if err := archutils.EpollCtl(m.epollFd, syscall.EPOLL_CTL_ADD, fd, &event); err != nil { return err } EpollFdCounter.Inc(1) m.receivers[fd] = o return nil } // Close cleans up resources allocated by NewMonitor() func (m *Monitor) Close() error { return syscall.Close(m.epollFd) } func (m *Monitor) processEvent(fd int, event uint32) { m.m.Lock() r := m.receivers[fd] switch t := r.(type) { case runtime.Process: if event == syscall.EPOLLHUP { delete(m.receivers, fd) if err := syscall.EpollCtl(m.epollFd, syscall.EPOLL_CTL_DEL, fd, &syscall.EpollEvent{ Events: syscall.EPOLLHUP, Fd: int32(fd), }); err != nil { logrus.WithField("error", err).Error("containerd: epoll remove fd") } if err := t.Close(); err != nil { logrus.WithField("error", err).Error("containerd: close process IO") } EpollFdCounter.Dec(1) // defer until lock is released defer func() { m.exits <- t }() } case runtime.OOM: // always flush the event fd t.Flush() if t.Removed() { delete(m.receivers, fd) // epoll will remove the fd from its set after it has been closed t.Close() EpollFdCounter.Dec(1) } else { // defer until lock is released defer func() { m.ooms <- t.ContainerID() }() } } // This cannot be a defer to avoid a deadlock in case the channels // above get full m.m.Unlock() } func (m *Monitor) start() { var events [128]syscall.EpollEvent for { n, err := archutils.EpollWait(m.epollFd, events[:], -1) if err != nil { if err == syscall.EINTR { continue } logrus.WithField("error", err).Fatal("containerd: epoll wait") } // process events for i := 0; i < n; i++ { m.processEvent(int(events[i].Fd), events[i].Events) } } } docker-containerd-tags-docker-1.13.1/supervisor/monitor_solaris.go000066400000000000000000000057441304421264600254000ustar00rootroot00000000000000// +build solaris,cgo package supervisor /* #include #include #include #include #include int portAssociate(int port, int fd) { if (port_associate(port, PORT_SOURCE_FD, fd, POLLIN | POLLHUP, NULL) < 0) { return 1; } } port_event_t* getEvent(int e_fd) { port_event_t *ev; ev = (port_event_t *)malloc(sizeof(port_event_t)); if (port_get(e_fd, ev, NULL) < 0) { return NULL; } return ev; } int getFd(uintptr_t x) { return *(int *)x; } void freeEvent( port_event_t *ev){ free(ev); } */ import "C" import ( "sync" "unsafe" "github.com/Sirupsen/logrus" "github.com/docker/containerd/runtime" ) // NewMonitor starts a new process monitor and returns it func NewMonitor() (*Monitor, error) { m := &Monitor{ receivers: make(map[int]interface{}), exits: make(chan runtime.Process, 1024), ooms: make(chan string, 1024), } fd, err := C.port_create() if err != nil { return nil, err } m.epollFd = int(fd) go m.start() return m, nil } // Monitor represents a runtime.Process monitor type Monitor struct { m sync.Mutex receivers map[int]interface{} exits chan runtime.Process ooms chan string epollFd int } // Exits returns the channel used to notify of a process exit func (m *Monitor) Exits() chan runtime.Process { return m.exits } // OOMs returns the channel used to notify of a container exit due to OOM func (m *Monitor) OOMs() chan string { return m.ooms } // Monitor adds a process to the list of the one being monitored func (m *Monitor) Monitor(p runtime.Process) error { m.m.Lock() defer m.m.Unlock() fd := p.ExitFD() if _, err := C.port_associate(C.int(m.epollFd), C.PORT_SOURCE_FD, C.uintptr_t(fd), C.POLLIN|C.POLLHUP, unsafe.Pointer(&fd)); err != nil { return err } EpollFdCounter.Inc(1) m.receivers[fd] = p return nil } // MonitorOOM adds a container to the list of the ones monitored for OOM // There is no OOM-Killer on Solaris therefore nothing to setup func (m *Monitor) MonitorOOM(c runtime.Container) error { return nil } // Close cleans up resources allocated by NewMonitor() func (m *Monitor) Close() error { _, err := C.close(C.int(m.epollFd)) return err } func (m *Monitor) start() { for { ev := C.getEvent(C.int(m.epollFd)) if ev == nil { continue } fd := int(C.getFd(C.uintptr_t(uintptr((ev.portev_user))))) if fd < 0 { logrus.Warnf("containerd: epoll wait") } m.m.Lock() r := m.receivers[fd] switch t := r.(type) { case runtime.Process: if ev.portev_events == C.POLLHUP { delete(m.receivers, fd) if err := t.Close(); err != nil { logrus.Warnf("containerd: close process IO") } EpollFdCounter.Dec(1) m.exits <- t } case runtime.OOM: // always flush the event fd t.Flush() if t.Removed() { delete(m.receivers, fd) // epoll will remove the fd from its set after it has been closed t.Close() EpollFdCounter.Dec(1) } else { m.ooms <- t.ContainerID() } } m.m.Unlock() C.freeEvent(ev) } } docker-containerd-tags-docker-1.13.1/supervisor/oom.go000066400000000000000000000006211304421264600227340ustar00rootroot00000000000000package supervisor import ( "time" "github.com/Sirupsen/logrus" ) // OOMTask holds needed parameters to report a container OOM type OOMTask struct { baseTask ID string } func (s *Supervisor) oom(t *OOMTask) error { logrus.WithField("id", t.ID).Debug("containerd: container oom") s.notifySubscribers(Event{ Timestamp: time.Now(), ID: t.ID, Type: StateOOM, }) return nil } docker-containerd-tags-docker-1.13.1/supervisor/signal.go000066400000000000000000000007661304421264600234310ustar00rootroot00000000000000package supervisor import ( "os" ) // SignalTask holds needed parameters to signal a container type SignalTask struct { baseTask ID string PID string Signal os.Signal } func (s *Supervisor) signal(t *SignalTask) error { i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } processes, err := i.container.Processes() if err != nil { return err } for _, p := range processes { if p.ID() == t.PID { return p.Signal(t.Signal) } } return ErrProcessNotFound } docker-containerd-tags-docker-1.13.1/supervisor/sort.go000066400000000000000000000007521304421264600231360ustar00rootroot00000000000000package supervisor import ( "sort" "github.com/docker/containerd/runtime" ) func sortProcesses(p []runtime.Process) { sort.Sort(&processSorter{p}) } type processSorter struct { processes []runtime.Process } func (s *processSorter) Len() int { return len(s.processes) } func (s *processSorter) Swap(i, j int) { s.processes[i], s.processes[j] = s.processes[j], s.processes[i] } func (s *processSorter) Less(i, j int) bool { return s.processes[j].ID() == runtime.InitProcessID } docker-containerd-tags-docker-1.13.1/supervisor/sort_test.go000066400000000000000000000026341304421264600241760ustar00rootroot00000000000000package supervisor import ( "flag" "os" "sort" "testing" "github.com/docker/containerd/runtime" "github.com/docker/containerd/specs" ) var ( runtimeTool = flag.String("runtime", "runc", "Runtime to use for this test") ) type testProcess struct { id string } func (p *testProcess) ID() string { return p.id } func (p *testProcess) Start() error { return nil } func (p *testProcess) CloseStdin() error { return nil } func (p *testProcess) Resize(w, h int) error { return nil } func (p *testProcess) Stdio() runtime.Stdio { return runtime.Stdio{} } func (p *testProcess) SystemPid() int { return -1 } func (p *testProcess) ExitFD() int { return -1 } func (p *testProcess) ExitStatus() (uint32, error) { return runtime.UnknownStatus, nil } func (p *testProcess) Container() runtime.Container { return nil } func (p *testProcess) Spec() specs.ProcessSpec { return specs.ProcessSpec{} } func (p *testProcess) Signal(os.Signal) error { return nil } func (p *testProcess) Close() error { return nil } func (p *testProcess) State() runtime.State { return runtime.Running } func (p *testProcess) Wait() { } func TestSortProcesses(t *testing.T) { p := []runtime.Process{ &testProcess{"ls"}, &testProcess{"other"}, &testProcess{"init"}, &testProcess{"other2"}, } s := &processSorter{p} sort.Sort(s) if id := p[len(p)-1].ID(); id != "init" { t.Fatalf("expected init but received %q", id) } } docker-containerd-tags-docker-1.13.1/supervisor/stats.go000066400000000000000000000011431304421264600233000ustar00rootroot00000000000000package supervisor import ( "time" "github.com/docker/containerd/runtime" ) // StatsTask holds needed parameters to retrieve a container statistics type StatsTask struct { baseTask ID string Stat chan *runtime.Stat } func (s *Supervisor) stats(t *StatsTask) error { start := time.Now() i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } // TODO: use workers for this go func() { s, err := i.container.Stats() if err != nil { t.ErrorCh() <- err return } t.ErrorCh() <- nil t.Stat <- s ContainerStatsTimer.UpdateSince(start) }() return errDeferredResponse } docker-containerd-tags-docker-1.13.1/supervisor/supervisor.go000066400000000000000000000265261304421264600243770ustar00rootroot00000000000000package supervisor import ( "encoding/json" "io" "io/ioutil" "os" "path/filepath" "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/runtime" ) const ( defaultBufferSize = 2048 // size of queue in eventloop ) // New returns an initialized Process supervisor. func New(stateDir string, runtimeName, shimName string, runtimeArgs []string, timeout time.Duration, retainCount int) (*Supervisor, error) { startTasks := make(chan *startTask, 10) if err := os.MkdirAll(stateDir, 0755); err != nil { return nil, err } machine, err := CollectMachineInformation() if err != nil { return nil, err } monitor, err := NewMonitor() if err != nil { return nil, err } s := &Supervisor{ stateDir: stateDir, containers: make(map[string]*containerInfo), startTasks: startTasks, machine: machine, subscribers: make(map[chan Event]struct{}), tasks: make(chan Task, defaultBufferSize), monitor: monitor, runtime: runtimeName, runtimeArgs: runtimeArgs, shim: shimName, timeout: timeout, containerExecSync: make(map[string]map[string]chan struct{}), } if err := setupEventLog(s, retainCount); err != nil { return nil, err } go s.exitHandler() go s.oomHandler() if err := s.restore(); err != nil { return nil, err } return s, nil } type containerInfo struct { container runtime.Container } func setupEventLog(s *Supervisor, retainCount int) error { if err := readEventLog(s); err != nil { return err } logrus.WithField("count", len(s.eventLog)).Debug("containerd: read past events") events := s.Events(time.Time{}, false, "") return eventLogger(s, filepath.Join(s.stateDir, "events.log"), events, retainCount) } func eventLogger(s *Supervisor, path string, events chan Event, retainCount int) error { f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) if err != nil { return err } go func() { var ( count = len(s.eventLog) enc = json.NewEncoder(f) ) for e := range events { // if we have a specified retain count make sure the truncate the event // log if it grows past the specified number of events to keep. if retainCount > 0 { if count > retainCount { logrus.Debug("truncating event log") // close the log file if f != nil { f.Close() } slice := retainCount - 1 l := len(s.eventLog) if slice >= l { slice = l } s.eventLock.Lock() s.eventLog = s.eventLog[len(s.eventLog)-slice:] s.eventLock.Unlock() if f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755); err != nil { logrus.WithField("error", err).Error("containerd: open event to journal") continue } enc = json.NewEncoder(f) count = 0 for _, le := range s.eventLog { if err := enc.Encode(le); err != nil { logrus.WithField("error", err).Error("containerd: write event to journal") } } } } s.eventLock.Lock() s.eventLog = append(s.eventLog, e) s.eventLock.Unlock() count++ if err := enc.Encode(e); err != nil { logrus.WithField("error", err).Error("containerd: write event to journal") } } }() return nil } func readEventLog(s *Supervisor) error { f, err := os.Open(filepath.Join(s.stateDir, "events.log")) if err != nil { if os.IsNotExist(err) { return nil } return err } defer f.Close() dec := json.NewDecoder(f) for { var e eventV1 if err := dec.Decode(&e); err != nil { if err == io.EOF { break } return err } // We need to take care of -1 Status for backward compatibility ev := e.Event ev.Status = uint32(e.Status) if ev.Status > runtime.UnknownStatus { ev.Status = runtime.UnknownStatus } s.eventLog = append(s.eventLog, ev) } return nil } // Supervisor represents a container supervisor type Supervisor struct { // stateDir is the directory on the system to store container runtime state information. stateDir string // name of the OCI compatible runtime used to execute containers runtime string runtimeArgs []string shim string containers map[string]*containerInfo startTasks chan *startTask // we need a lock around the subscribers map only because additions and deletions from // the map are via the API so we cannot really control the concurrency subscriberLock sync.RWMutex subscribers map[chan Event]struct{} machine Machine tasks chan Task monitor *Monitor eventLog []Event eventLock sync.Mutex timeout time.Duration // This is used to ensure that exec process death events are sent // before the init process death containerExecSyncLock sync.Mutex containerExecSync map[string]map[string]chan struct{} } // Stop closes all startTasks and sends a SIGTERM to each container's pid1 then waits for they to // terminate. After it has handled all the SIGCHILD events it will close the signals chan // and exit. Stop is a non-blocking call and will return after the containers have been signaled func (s *Supervisor) Stop() { // Close the startTasks channel so that no new containers get started close(s.startTasks) } // Close closes any open files in the supervisor but expects that Stop has been // callsed so that no more containers are started. func (s *Supervisor) Close() error { return nil } // Event represents a container event type Event struct { ID string `json:"id"` Type string `json:"type"` Timestamp time.Time `json:"timestamp"` PID string `json:"pid,omitempty"` Status uint32 `json:"status,omitempty"` } type eventV1 struct { Event Status int `json:"status,omitempty"` } // Events returns an event channel that external consumers can use to receive updates // on container events func (s *Supervisor) Events(from time.Time, storedOnly bool, id string) chan Event { c := make(chan Event, defaultBufferSize) if storedOnly { defer s.Unsubscribe(c) } s.subscriberLock.Lock() defer s.subscriberLock.Unlock() if !from.IsZero() { // replay old event s.eventLock.Lock() past := s.eventLog[:] s.eventLock.Unlock() for _, e := range past { if e.Timestamp.After(from) { if id == "" || e.ID == id { c <- e } } } } if storedOnly { close(c) } else { EventSubscriberCounter.Inc(1) s.subscribers[c] = struct{}{} } return c } // Unsubscribe removes the provided channel from receiving any more events func (s *Supervisor) Unsubscribe(sub chan Event) { s.subscriberLock.Lock() defer s.subscriberLock.Unlock() if _, ok := s.subscribers[sub]; ok { delete(s.subscribers, sub) close(sub) EventSubscriberCounter.Dec(1) } } // notifySubscribers will send the provided event to the external subscribers // of the events channel func (s *Supervisor) notifySubscribers(e Event) { s.subscriberLock.RLock() defer s.subscriberLock.RUnlock() for sub := range s.subscribers { // do a non-blocking send for the channel select { case sub <- e: default: logrus.WithField("event", e.Type).Warn("containerd: event not sent to subscriber") } } } // Start is a non-blocking call that runs the supervisor for monitoring contianer processes and // executing new containers. // // This event loop is the only thing that is allowed to modify state of containers and processes // therefore it is save to do operations in the handlers that modify state of the system or // state of the Supervisor func (s *Supervisor) Start() error { logrus.WithFields(logrus.Fields{ "stateDir": s.stateDir, "runtime": s.runtime, "runtimeArgs": s.runtimeArgs, "memory": s.machine.Memory, "cpus": s.machine.Cpus, }).Debug("containerd: supervisor running") go func() { for i := range s.tasks { s.handleTask(i) } }() return nil } // Machine returns the machine information for which the // supervisor is executing on. func (s *Supervisor) Machine() Machine { return s.machine } // SendTask sends the provided event the the supervisors main event loop func (s *Supervisor) SendTask(evt Task) { TasksCounter.Inc(1) s.tasks <- evt } func (s *Supervisor) exitHandler() { for p := range s.monitor.Exits() { e := &ExitTask{ Process: p, } s.SendTask(e) } } func (s *Supervisor) oomHandler() { for id := range s.monitor.OOMs() { e := &OOMTask{ ID: id, } s.SendTask(e) } } func (s *Supervisor) monitorProcess(p runtime.Process) error { return s.monitor.Monitor(p) } func (s *Supervisor) restore() error { dirs, err := ioutil.ReadDir(s.stateDir) if err != nil { return err } for _, d := range dirs { if !d.IsDir() { continue } id := d.Name() container, err := runtime.Load(s.stateDir, id, s.shim, s.timeout) if err != nil { return err } processes, err := container.Processes() if err != nil { return err } ContainersCounter.Inc(1) s.containers[id] = &containerInfo{ container: container, } if err := s.monitor.MonitorOOM(container); err != nil && err != runtime.ErrContainerExited { logrus.WithField("error", err).Error("containerd: notify OOM events") } s.newExecSyncMap(container.ID()) logrus.WithField("id", id).Debug("containerd: container restored") var exitedProcesses []runtime.Process for _, p := range processes { if p.State() == runtime.Running { if err := s.monitorProcess(p); err != nil { return err } } else { exitedProcesses = append(exitedProcesses, p) } } if len(exitedProcesses) > 0 { // sort processes so that init is fired last because that is how the kernel sends the // exit events sortProcesses(exitedProcesses) for _, p := range exitedProcesses { e := &ExitTask{ Process: p, } s.SendTask(e) } } } return nil } func (s *Supervisor) handleTask(i Task) { var err error switch t := i.(type) { case *AddProcessTask: err = s.addProcess(t) case *CreateCheckpointTask: err = s.createCheckpoint(t) case *DeleteCheckpointTask: err = s.deleteCheckpoint(t) case *StartTask: err = s.start(t) case *DeleteTask: err = s.delete(t) case *ExitTask: err = s.exit(t) case *GetContainersTask: err = s.getContainers(t) case *SignalTask: err = s.signal(t) case *StatsTask: err = s.stats(t) case *UpdateTask: err = s.updateContainer(t) case *UpdateProcessTask: err = s.updateProcess(t) case *OOMTask: err = s.oom(t) default: err = ErrUnknownTask } if err != errDeferredResponse { i.ErrorCh() <- err close(i.ErrorCh()) } } func (s *Supervisor) newExecSyncMap(containerID string) { s.containerExecSyncLock.Lock() s.containerExecSync[containerID] = make(map[string]chan struct{}) s.containerExecSyncLock.Unlock() } func (s *Supervisor) newExecSyncChannel(containerID, pid string) { s.containerExecSyncLock.Lock() s.containerExecSync[containerID][pid] = make(chan struct{}) s.containerExecSyncLock.Unlock() } func (s *Supervisor) deleteExecSyncChannel(containerID, pid string) { s.containerExecSyncLock.Lock() delete(s.containerExecSync[containerID], pid) s.containerExecSyncLock.Unlock() } func (s *Supervisor) getExecSyncChannel(containerID, pid string) chan struct{} { s.containerExecSyncLock.Lock() ch := s.containerExecSync[containerID][pid] s.containerExecSyncLock.Unlock() return ch } func (s *Supervisor) getDeleteExecSyncMap(containerID string) map[string]chan struct{} { s.containerExecSyncLock.Lock() chs := s.containerExecSync[containerID] delete(s.containerExecSync, containerID) s.containerExecSyncLock.Unlock() return chs } docker-containerd-tags-docker-1.13.1/supervisor/supervisor_test.go000066400000000000000000000023221304421264600254220ustar00rootroot00000000000000package supervisor import ( "encoding/json" "io/ioutil" "os" "path/filepath" "testing" "time" "github.com/docker/containerd/runtime" ) func TestEventLogCompat(t *testing.T) { tmpDir, err := ioutil.TempDir("", "") if err != nil { t.Errorf("Failed to create temp dir: %v", err) } path := filepath.Join(tmpDir, "events.log") eventf, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND|os.O_TRUNC, 0755) if err != nil { t.Errorf("Failed to create event logs: %v", err) } s := &Supervisor{stateDir: tmpDir} enc := json.NewEncoder(eventf) for _, ev := range []eventV1{ { Event: Event{ ID: "abc", Type: "event", Timestamp: time.Now(), PID: "42", }, Status: -1, }, { Event: Event{ ID: "abc", Type: "event", Timestamp: time.Now(), PID: "42", }, Status: 42, }, } { enc.Encode(ev) } eventf.Close() err = readEventLog(s) if err != nil { t.Errorf("Failed to read event logs: %v", err) } if s.eventLog[0].Status != runtime.UnknownStatus { t.Errorf("Improper event status: %v", s.eventLog[0].Status) } if s.eventLog[1].Status != 42 { t.Errorf("Improper event status: %v", s.eventLog[1].Status) } } docker-containerd-tags-docker-1.13.1/supervisor/task.go000066400000000000000000000012241304421264600231040ustar00rootroot00000000000000package supervisor import ( "sync" "github.com/docker/containerd/runtime" ) // StartResponse is the response containing a started container type StartResponse struct { ExecPid int Container runtime.Container } // Task executes an action returning an error chan with either nil or // the error from executing the task type Task interface { // ErrorCh returns a channel used to report and error from an async task ErrorCh() chan error } type baseTask struct { errCh chan error mu sync.Mutex } func (t *baseTask) ErrorCh() chan error { t.mu.Lock() defer t.mu.Unlock() if t.errCh == nil { t.errCh = make(chan error, 1) } return t.errCh } docker-containerd-tags-docker-1.13.1/supervisor/types.go000066400000000000000000000004371304421264600233130ustar00rootroot00000000000000package supervisor // State constants used in Event types const ( StateStart = "start-container" StatePause = "pause" StateResume = "resume" StateExit = "exit" StateStartProcess = "start-process" StateOOM = "oom" StateLive = "live" ) docker-containerd-tags-docker-1.13.1/supervisor/update.go000066400000000000000000000035121304421264600234260ustar00rootroot00000000000000package supervisor import ( "time" "github.com/docker/containerd/runtime" ) // UpdateTask holds needed parameters to update a container resource constraints type UpdateTask struct { baseTask ID string State runtime.State Resources *runtime.Resource } func (s *Supervisor) updateContainer(t *UpdateTask) error { i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } container := i.container if t.State != "" { switch t.State { case runtime.Running: if err := container.Resume(); err != nil { return err } s.notifySubscribers(Event{ ID: t.ID, Type: StateResume, Timestamp: time.Now(), }) case runtime.Paused: if err := container.Pause(); err != nil { return err } s.notifySubscribers(Event{ ID: t.ID, Type: StatePause, Timestamp: time.Now(), }) default: return ErrUnknownContainerStatus } return nil } if t.Resources != nil { return container.UpdateResources(t.Resources) } return nil } // UpdateProcessTask holds needed parameters to update a container // process terminal size or close its stdin type UpdateProcessTask struct { baseTask ID string PID string CloseStdin bool Width int Height int } func (s *Supervisor) updateProcess(t *UpdateProcessTask) error { i, ok := s.containers[t.ID] if !ok { return ErrContainerNotFound } processes, err := i.container.Processes() if err != nil { return err } var process runtime.Process for _, p := range processes { if p.ID() == t.PID { process = p break } } if process == nil { return ErrProcessNotFound } if t.CloseStdin { if err := process.CloseStdin(); err != nil { return err } } if t.Width > 0 || t.Height > 0 { if err := process.Resize(t.Width, t.Height); err != nil { return err } } return nil } docker-containerd-tags-docker-1.13.1/supervisor/worker.go000066400000000000000000000046661304421264600234700ustar00rootroot00000000000000package supervisor import ( "sync" "time" "github.com/Sirupsen/logrus" "github.com/docker/containerd/runtime" "golang.org/x/net/context" ) // Worker interface type Worker interface { Start() } type startTask struct { Container runtime.Container CheckpointPath string Stdin string Stdout string Stderr string Err chan error StartResponse chan StartResponse Ctx context.Context } // NewWorker return a new initialized worker func NewWorker(s *Supervisor, wg *sync.WaitGroup) Worker { return &worker{ s: s, wg: wg, } } type worker struct { wg *sync.WaitGroup s *Supervisor } // Start runs a loop in charge of starting new containers func (w *worker) Start() { defer w.wg.Done() for t := range w.s.startTasks { started := time.Now() process, err := t.Container.Start(t.Ctx, t.CheckpointPath, runtime.NewStdio(t.Stdin, t.Stdout, t.Stderr)) if err != nil { logrus.WithFields(logrus.Fields{ "error": err, "id": t.Container.ID(), }).Error("containerd: start container") t.Err <- err evt := &DeleteTask{ ID: t.Container.ID(), NoEvent: true, Process: process, } w.s.SendTask(evt) continue } if err := w.s.monitor.MonitorOOM(t.Container); err != nil && err != runtime.ErrContainerExited { if process.State() != runtime.Stopped { logrus.WithField("error", err).Error("containerd: notify OOM events") } } if err := w.s.monitorProcess(process); err != nil { logrus.WithField("error", err).Error("containerd: add process to monitor") t.Err <- err evt := &DeleteTask{ ID: t.Container.ID(), NoEvent: true, Process: process, } w.s.SendTask(evt) continue } // only call process start if we aren't restoring from a checkpoint // if we have restored from a checkpoint then the process is already started if t.CheckpointPath == "" { if err := process.Start(); err != nil { logrus.WithField("error", err).Error("containerd: start init process") t.Err <- err evt := &DeleteTask{ ID: t.Container.ID(), NoEvent: true, Process: process, } w.s.SendTask(evt) continue } } ContainerStartTimer.UpdateSince(started) w.s.newExecSyncMap(t.Container.ID()) t.Err <- nil t.StartResponse <- StartResponse{ Container: t.Container, } w.s.notifySubscribers(Event{ Timestamp: time.Now(), ID: t.Container.ID(), Type: StateStart, }) } } docker-containerd-tags-docker-1.13.1/testutils/000077500000000000000000000000001304421264600214335ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/testutils/testutils.go000066400000000000000000000043061304421264600240250ustar00rootroot00000000000000package testutils import ( "fmt" "os" "os/exec" "path/filepath" "strings" ) // GetTestOutDir returns the output directory for testing and benchmark artifacts func GetTestOutDir() string { out, _ := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() repoRoot := string(out) prefix := filepath.Join(strings.TrimSpace(repoRoot), "output") return prefix } var ( // ArchivesDir holds the location of the available rootfs ArchivesDir = filepath.Join("test-artifacts", "archives") // BundlesRoot holds the location where OCI Bundles are stored BundlesRoot = filepath.Join("test-artifacts", "oci-bundles") // OutputDirFormat holds the standard format used when creating a // new test output directory OutputDirFormat = filepath.Join("test-artifacts", "runs", "%s") // RefOciSpecsPath holds the path to the generic OCI config RefOciSpecsPath = filepath.Join(BundlesRoot, "config.json") // StateDir holds the path to the directory used by the containerd // started by tests StateDir = "/run/containerd-bench-test" ) // untarRootfs untars the given `source` tarPath into `destination/rootfs` func untarRootfs(source string, destination string) error { rootfs := filepath.Join(destination, "rootfs") if err := os.MkdirAll(rootfs, 0755); err != nil { fmt.Println("untarRootfs os.MkdirAll failed with err %v", err) return nil } tar := exec.Command("tar", "-C", rootfs, "-xf", source) return tar.Run() } // GenerateReferenceSpecs generates a default OCI specs via `runc spec` func GenerateReferenceSpecs(destination string) error { if _, err := os.Stat(filepath.Join(destination, "config.json")); err == nil { return nil } specs := exec.Command("runc", "spec") specs.Dir = destination return specs.Run() } // CreateBundle generates a valid OCI bundle from the given rootfs func CreateBundle(source, name string) error { bundlePath := filepath.Join(BundlesRoot, name) if err := untarRootfs(filepath.Join(ArchivesDir, source+".tar"), bundlePath); err != nil { return fmt.Errorf("Failed to untar %s.tar: %v", source, err) } return nil } // CreateBusyboxBundle generates a bundle based on the busybox rootfs func CreateBusyboxBundle(name string) error { return CreateBundle("busybox", name) } docker-containerd-tags-docker-1.13.1/vendor/000077500000000000000000000000001304421264600206705ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/000077500000000000000000000000001304421264600214575ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/000077500000000000000000000000001304421264600235165ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/000077500000000000000000000000001304421264600247655ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/000077500000000000000000000000001304421264600262345ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/LICENSE000066400000000000000000000250151304421264600272440ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/pkg/000077500000000000000000000000001304421264600270155ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/pkg/listeners/000077500000000000000000000000001304421264600310255ustar00rootroot00000000000000listeners_unix.go000066400000000000000000000046441304421264600343600ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/pkg/listeners// +build !windows package listeners import ( "crypto/tls" "fmt" "net" "strconv" "github.com/Sirupsen/logrus" "github.com/coreos/go-systemd/activation" "github.com/docker/go-connections/sockets" ) // Init creates new listeners for the server. // TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { ls := []net.Listener{} switch proto { case "fd": fds, err := listenFD(addr, tlsConfig) if err != nil { return nil, err } ls = append(ls, fds...) case "tcp": l, err := sockets.NewTCPSocket(addr, tlsConfig) if err != nil { return nil, err } ls = append(ls, l) case "unix": l, err := sockets.NewUnixSocket(addr, socketGroup) if err != nil { return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) } ls = append(ls, l) default: return nil, fmt.Errorf("invalid protocol format: %q", proto) } return ls, nil } // listenFD returns the specified socket activated files as a slice of // net.Listeners or all of the activated files if "*" is given. func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { var ( err error listeners []net.Listener ) // socket activation if tlsConfig != nil { listeners, err = activation.TLSListeners(false, tlsConfig) } else { listeners, err = activation.Listeners(false) } if err != nil { return nil, err } if len(listeners) == 0 { return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") } // default to all fds just like unix:// and tcp:// if addr == "" || addr == "*" { return listeners, nil } fdNum, err := strconv.Atoi(addr) if err != nil { return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) } fdOffset := fdNum - 3 if len(listeners) < int(fdOffset)+1 { return nil, fmt.Errorf("too few socket activated files passed in by systemd") } if listeners[fdOffset] == nil { return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) } for i, ls := range listeners { if i == fdOffset || ls == nil { continue } if err := ls.Close(); err != nil { // TODO: We shouldn't log inside a library. Remove this or error out. logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) } } return []net.Listener{listeners[fdOffset]}, nil } listeners_windows.go000066400000000000000000000024161304421264600350620ustar00rootroot00000000000000docker-containerd-tags-docker-1.13.1/vendor/src/github.com/docker/docker/pkg/listenerspackage listeners import ( "crypto/tls" "fmt" "net" "strings" "github.com/Microsoft/go-winio" "github.com/docker/go-connections/sockets" ) // Init creates new listeners for the server. func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { ls := []net.Listener{} switch proto { case "tcp": l, err := sockets.NewTCPSocket(addr, tlsConfig) if err != nil { return nil, err } ls = append(ls, l) case "npipe": // allow Administrators and SYSTEM, plus whatever additional users or groups were specified sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" if socketGroup != "" { for _, g := range strings.Split(socketGroup, ",") { sid, err := winio.LookupSidByName(g) if err != nil { return nil, err } sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) } } c := winio.PipeConfig{ SecurityDescriptor: sddl, MessageMode: true, // Use message mode so that CloseWrite() is supported InputBufferSize: 65536, // Use 64KB buffers to improve performance OutputBufferSize: 65536, } l, err := winio.ListenPipe(addr, &c) if err != nil { return nil, err } ls = append(ls, l) default: return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") } return ls, nil } docker-containerd-tags-docker-1.13.1/version.go000066400000000000000000000010401304421264600214020ustar00rootroot00000000000000package containerd import "fmt" // VersionMajor holds the release major number const VersionMajor = 0 // VersionMinor holds the release minor number const VersionMinor = 2 // VersionPatch holds the release patch number const VersionPatch = 3 // Version holds the combination of major minor and patch as a string // of format Major.Minor.Patch var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch) // GitCommit is filled with the Git revision being used to build the // program at linking time var GitCommit = ""