docker-2.5.1/ 0000775 0001750 0001750 00000000000 13147142650 014125 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/MANIFEST.in 0000664 0001750 0001750 00000000340 13061630562 015657 0 ustar joffrey joffrey 0000000 0000000 include test-requirements.txt
include requirements.txt
include README.md
include README.rst
include LICENSE
recursive-include tests *.py
recursive-include tests/unit/testdata *
recursive-include tests/integration/testdata *
docker-2.5.1/docker.egg-info/ 0000775 0001750 0001750 00000000000 13147142650 017066 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker.egg-info/top_level.txt 0000664 0001750 0001750 00000000007 13147142650 021615 0 ustar joffrey joffrey 0000000 0000000 docker
docker-2.5.1/docker.egg-info/requires.txt 0000664 0001750 0001750 00000000406 13147142650 021466 0 ustar joffrey joffrey 0000000 0000000 requests!=2.11.0,!=2.12.2,!=2.18.0,>=2.5.2
six>=1.4.0
websocket-client>=0.32.0
docker-pycreds>=0.2.1
[:python_version < "3.3"]
ipaddress>=1.0.16
[:python_version < "3.5"]
backports.ssl_match_hostname>=3.5
[tls]
pyOpenSSL>=0.14
cryptography>=1.3.4
idna>=2.0.0
docker-2.5.1/docker.egg-info/dependency_links.txt 0000664 0001750 0001750 00000000001 13147142650 023134 0 ustar joffrey joffrey 0000000 0000000
docker-2.5.1/docker.egg-info/PKG-INFO 0000664 0001750 0001750 00000007153 13147142650 020171 0 ustar joffrey joffrey 0000000 0000000 Metadata-Version: 1.1
Name: docker
Version: 2.5.1
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
License: Apache License 2.0
Description: Docker SDK for Python
=====================
|Build Status|
A Python library for the Docker Engine API. It lets you do anything the
``docker`` command does, but from within Python apps – run containers,
manage containers, manage Swarms, etc.
Installation
------------
The latest stable version `is available on
PyPI `__. Either add ``docker`` to
your ``requirements.txt`` file or install with pip:
::
pip install docker
If you are intending to connect to a docker host via TLS, add
``docker[tls]`` to your requirements instead, or install with pip:
::
pip install docker[tls]
Usage
-----
Connect to Docker using the default socket or the configuration in your
environment:
.. code:: python
import docker
client = docker.from_env()
You can run containers:
.. code:: python
>>> client.containers.run("ubuntu:latest", "echo hello world")
'hello world\n'
You can run containers in the background:
.. code:: python
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
You can manage containers:
.. code:: python
>>> client.containers.list()
[, , ...]
>>> container = client.containers.get('45e6d2de7c54')
>>> container.attrs['Config']['Image']
"bfirsh/reticulate-splines"
>>> container.logs()
"Reticulating spline 1...\n"
>>> container.stop()
You can stream logs:
.. code:: python
>>> for line in container.logs(stream=True):
... print line.strip()
Reticulating spline 2...
Reticulating spline 3...
...
You can manage images:
.. code:: python
>>> client.images.pull('nginx')
>>> client.images.list()
[, , ...]
`Read the full documentation `__ to
see everything you can do.
.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
docker-2.5.1/docker.egg-info/not-zip-safe 0000664 0001750 0001750 00000000001 13051443767 021323 0 ustar joffrey joffrey 0000000 0000000
docker-2.5.1/docker.egg-info/SOURCES.txt 0000664 0001750 0001750 00000006477 13147142650 020770 0 ustar joffrey joffrey 0000000 0000000 LICENSE
MANIFEST.in
README.md
README.rst
requirements.txt
setup.cfg
setup.py
test-requirements.txt
docker/__init__.py
docker/auth.py
docker/client.py
docker/constants.py
docker/errors.py
docker/tls.py
docker/version.py
docker.egg-info/PKG-INFO
docker.egg-info/SOURCES.txt
docker.egg-info/dependency_links.txt
docker.egg-info/not-zip-safe
docker.egg-info/requires.txt
docker.egg-info/top_level.txt
docker/api/__init__.py
docker/api/build.py
docker/api/client.py
docker/api/container.py
docker/api/daemon.py
docker/api/exec_api.py
docker/api/image.py
docker/api/network.py
docker/api/plugin.py
docker/api/secret.py
docker/api/service.py
docker/api/swarm.py
docker/api/volume.py
docker/models/__init__.py
docker/models/containers.py
docker/models/images.py
docker/models/networks.py
docker/models/nodes.py
docker/models/plugins.py
docker/models/resource.py
docker/models/secrets.py
docker/models/services.py
docker/models/swarm.py
docker/models/volumes.py
docker/transport/__init__.py
docker/transport/npipeconn.py
docker/transport/npipesocket.py
docker/transport/ssladapter.py
docker/transport/unixconn.py
docker/types/__init__.py
docker/types/base.py
docker/types/containers.py
docker/types/healthcheck.py
docker/types/networks.py
docker/types/services.py
docker/types/swarm.py
docker/utils/__init__.py
docker/utils/build.py
docker/utils/decorators.py
docker/utils/fnmatch.py
docker/utils/json_stream.py
docker/utils/ports.py
docker/utils/socket.py
docker/utils/utils.py
tests/__init__.py
tests/helpers.py
tests/integration/__init__.py
tests/integration/api_build_test.py
tests/integration/api_client_test.py
tests/integration/api_container_test.py
tests/integration/api_exec_test.py
tests/integration/api_healthcheck_test.py
tests/integration/api_image_test.py
tests/integration/api_network_test.py
tests/integration/api_plugin_test.py
tests/integration/api_secret_test.py
tests/integration/api_service_test.py
tests/integration/api_swarm_test.py
tests/integration/api_volume_test.py
tests/integration/base.py
tests/integration/client_test.py
tests/integration/conftest.py
tests/integration/errors_test.py
tests/integration/models_containers_test.py
tests/integration/models_images_test.py
tests/integration/models_networks_test.py
tests/integration/models_nodes_test.py
tests/integration/models_resources_test.py
tests/integration/models_services_test.py
tests/integration/models_swarm_test.py
tests/integration/models_volumes_test.py
tests/integration/regression_test.py
tests/integration/testdata/dummy-plugin/config.json
tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt
tests/unit/__init__.py
tests/unit/api_build_test.py
tests/unit/api_container_test.py
tests/unit/api_exec_test.py
tests/unit/api_image_test.py
tests/unit/api_network_test.py
tests/unit/api_test.py
tests/unit/api_volume_test.py
tests/unit/auth_test.py
tests/unit/client_test.py
tests/unit/dockertypes_test.py
tests/unit/errors_test.py
tests/unit/fake_api.py
tests/unit/fake_api_client.py
tests/unit/fake_stat.py
tests/unit/models_containers_test.py
tests/unit/models_images_test.py
tests/unit/models_networks_test.py
tests/unit/models_resources_test.py
tests/unit/models_services_test.py
tests/unit/ssladapter_test.py
tests/unit/swarm_test.py
tests/unit/utils_json_stream_test.py
tests/unit/utils_test.py
tests/unit/testdata/certs/ca.pem
tests/unit/testdata/certs/cert.pem
tests/unit/testdata/certs/key.pem docker-2.5.1/test-requirements.txt 0000664 0001750 0001750 00000000112 13021666666 020371 0 ustar joffrey joffrey 0000000 0000000 mock==1.0.1
pytest==2.9.1
coverage==3.7.1
pytest-cov==2.1.0
flake8==2.4.1
docker-2.5.1/README.md 0000664 0001750 0001750 00000003470 13147140431 015403 0 ustar joffrey joffrey 0000000 0000000 # Docker SDK for Python
[](https://travis-ci.org/docker/docker-py)
A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc.
## Installation
The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Either add `docker` to your `requirements.txt` file or install with pip:
pip install docker
If you are intending to connect to a docker host via TLS, add `docker[tls]` to your requirements instead, or install with pip:
pip install docker[tls]
## Usage
Connect to Docker using the default socket or the configuration in your environment:
```python
import docker
client = docker.from_env()
```
You can run containers:
```python
>>> client.containers.run("ubuntu:latest", "echo hello world")
'hello world\n'
```
You can run containers in the background:
```python
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
```
You can manage containers:
```python
>>> client.containers.list()
[, , ...]
>>> container = client.containers.get('45e6d2de7c54')
>>> container.attrs['Config']['Image']
"bfirsh/reticulate-splines"
>>> container.logs()
"Reticulating spline 1...\n"
>>> container.stop()
```
You can stream logs:
```python
>>> for line in container.logs(stream=True):
... print line.strip()
Reticulating spline 2...
Reticulating spline 3...
...
```
You can manage images:
```python
>>> client.images.pull('nginx')
>>> client.images.list()
[, , ...]
```
[Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do.
docker-2.5.1/README.rst 0000664 0001750 0001750 00000004005 13147142650 015613 0 ustar joffrey joffrey 0000000 0000000 Docker SDK for Python
=====================
|Build Status|
A Python library for the Docker Engine API. It lets you do anything the
``docker`` command does, but from within Python apps – run containers,
manage containers, manage Swarms, etc.
Installation
------------
The latest stable version `is available on
PyPI `__. Either add ``docker`` to
your ``requirements.txt`` file or install with pip:
::
pip install docker
If you are intending to connect to a docker host via TLS, add
``docker[tls]`` to your requirements instead, or install with pip:
::
pip install docker[tls]
Usage
-----
Connect to Docker using the default socket or the configuration in your
environment:
.. code:: python
import docker
client = docker.from_env()
You can run containers:
.. code:: python
>>> client.containers.run("ubuntu:latest", "echo hello world")
'hello world\n'
You can run containers in the background:
.. code:: python
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
You can manage containers:
.. code:: python
>>> client.containers.list()
[, , ...]
>>> container = client.containers.get('45e6d2de7c54')
>>> container.attrs['Config']['Image']
"bfirsh/reticulate-splines"
>>> container.logs()
"Reticulating spline 1...\n"
>>> container.stop()
You can stream logs:
.. code:: python
>>> for line in container.logs(stream=True):
... print line.strip()
Reticulating spline 2...
Reticulating spline 3...
...
You can manage images:
.. code:: python
>>> client.images.pull('nginx')
>>> client.images.list()
[, , ...]
`Read the full documentation `__ to
see everything you can do.
.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
docker-2.5.1/setup.py 0000664 0001750 0001750 00000006147 13147140743 015650 0 ustar joffrey joffrey 0000000 0000000 #!/usr/bin/env python
from __future__ import print_function
import codecs
import os
import sys
import pip
from setuptools import setup, find_packages
if 'docker-py' in [x.project_name for x in pip.get_installed_distributions()]:
print(
'ERROR: "docker-py" needs to be uninstalled before installing this'
' package:\npip uninstall docker-py', file=sys.stderr
)
sys.exit(1)
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.2, != 2.11.0, != 2.12.2, != 2.18.0',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
]
if sys.platform == 'win32':
requirements.append('pypiwin32 >= 219')
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
# While not imported explicitly, the ipaddress module is required for
# ssl_match_hostname to verify hosts match with certificates via
# ServerAltname: https://pypi.python.org/pypi/backports.ssl_match_hostname
':python_version < "3.3"': 'ipaddress >= 1.0.16',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
# TODO: if pip installing both "requests" and "requests[security]", the
# extra package from the "security" option are not installed (see
# https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
# installing the extra dependencies, install the following instead:
# 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
'tls': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
}
version = None
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
long_description = ''
try:
with codecs.open('./README.rst', encoding='utf-8') as readme_rst:
long_description = readme_rst.read()
except IOError:
# README.rst is only generated on release. Its absence should not prevent
# setup.py from working properly.
pass
setup(
name="docker",
version=version,
description="A Python library for the Docker Engine API.",
long_description=long_description,
url='https://github.com/docker/docker-py',
packages=find_packages(exclude=["tests.*", "tests"]),
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
maintainer='Joffrey F',
maintainer_email='joffrey@docker.com',
)
docker-2.5.1/PKG-INFO 0000664 0001750 0001750 00000007153 13147142650 015230 0 ustar joffrey joffrey 0000000 0000000 Metadata-Version: 1.1
Name: docker
Version: 2.5.1
Summary: A Python library for the Docker Engine API.
Home-page: https://github.com/docker/docker-py
Author: Joffrey F
Author-email: joffrey@docker.com
License: Apache License 2.0
Description: Docker SDK for Python
=====================
|Build Status|
A Python library for the Docker Engine API. It lets you do anything the
``docker`` command does, but from within Python apps – run containers,
manage containers, manage Swarms, etc.
Installation
------------
The latest stable version `is available on
PyPI `__. Either add ``docker`` to
your ``requirements.txt`` file or install with pip:
::
pip install docker
If you are intending to connect to a docker host via TLS, add
``docker[tls]`` to your requirements instead, or install with pip:
::
pip install docker[tls]
Usage
-----
Connect to Docker using the default socket or the configuration in your
environment:
.. code:: python
import docker
client = docker.from_env()
You can run containers:
.. code:: python
>>> client.containers.run("ubuntu:latest", "echo hello world")
'hello world\n'
You can run containers in the background:
.. code:: python
>>> client.containers.run("bfirsh/reticulate-splines", detach=True)
You can manage containers:
.. code:: python
>>> client.containers.list()
[, , ...]
>>> container = client.containers.get('45e6d2de7c54')
>>> container.attrs['Config']['Image']
"bfirsh/reticulate-splines"
>>> container.logs()
"Reticulating spline 1...\n"
>>> container.stop()
You can stream logs:
.. code:: python
>>> for line in container.logs(stream=True):
... print line.strip()
Reticulating spline 2...
Reticulating spline 3...
...
You can manage images:
.. code:: python
>>> client.images.pull('nginx')
>>> client.images.list()
[, , ...]
`Read the full documentation `__ to
see everything you can do.
.. |Build Status| image:: https://travis-ci.org/docker/docker-py.svg?branch=master
:target: https://travis-ci.org/docker/docker-py
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Other Environment
Classifier: Intended Audience :: Developers
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Topic :: Utilities
Classifier: License :: OSI Approved :: Apache Software License
docker-2.5.1/setup.cfg 0000664 0001750 0001750 00000000212 13147142650 015741 0 ustar joffrey joffrey 0000000 0000000 [bdist_wheel]
universal = 1
[metadata]
description_file = README.rst
license = Apache License 2.0
[egg_info]
tag_build =
tag_date = 0
docker-2.5.1/requirements.txt 0000664 0001750 0001750 00000000440 13147140743 017410 0 ustar joffrey joffrey 0000000 0000000 appdirs==1.4.3
asn1crypto==0.22.0
backports.ssl-match-hostname==3.5.0.1
cffi==1.10.0
cryptography==1.9
docker-pycreds==0.2.1
enum34==1.1.6
idna==2.5
ipaddress==1.0.18
packaging==16.8
pycparser==2.17
pyOpenSSL==17.0.0
pyparsing==2.2.0
requests==2.14.2
six==1.10.0
websocket-client==0.40.0
docker-2.5.1/LICENSE 0000664 0001750 0001750 00000025006 13021666666 015146 0 ustar joffrey joffrey 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
docker-2.5.1/tests/ 0000775 0001750 0001750 00000000000 13147142650 015267 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/ 0000775 0001750 0001750 00000000000 13147142650 017612 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/regression_test.py 0000664 0001750 0001750 00000004423 13106703741 023405 0 ustar joffrey joffrey 0000000 0000000 import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, BUSYBOX
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with self.assertRaises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
self.assertEqual(exc.exception.response.status_code, 500)
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(BUSYBOX, ['true'])
)
result = self.client.containers(all=True, trunc=True)
self.assertEqual(len(result[0]['Id']), 12)
def test_647_support_doubleslash_in_image_names(self):
with self.assertRaises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(BUSYBOX, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(BUSYBOX, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
BUSYBOX, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.assertEqual(
self.client.port(ctnr, 2000)[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/tcp')[0]['HostPort'],
six.text_type(tcp_port)
)
self.assertEqual(
self.client.port(ctnr, '2000/udp')[0]['HostPort'],
six.text_type(udp_port)
)
docker-2.5.1/tests/integration/client_test.py 0000664 0001750 0001750 00000001270 13064275451 022505 0 ustar joffrey joffrey 0000000 0000000 import unittest
import docker
from ..helpers import requires_api_version
from .base import TEST_API_VERSION
class ClientTest(unittest.TestCase):
client = docker.from_env(version=TEST_API_VERSION)
def test_info(self):
info = self.client.info()
assert 'ID' in info
assert 'Name' in info
def test_ping(self):
assert self.client.ping() is True
def test_version(self):
assert 'Version' in self.client.version()
@requires_api_version('1.25')
def test_df(self):
data = self.client.df()
assert 'LayersSize' in data
assert 'Containers' in data
assert 'Volumes' in data
assert 'Images' in data
docker-2.5.1/tests/integration/api_plugin_test.py 0000664 0001750 0001750 00000012062 13106703755 023357 0 ustar joffrey joffrey 0000000 0000000 import os
import docker
import pytest
from .base import BaseAPIIntegrationTest, TEST_API_VERSION
from ..helpers import requires_api_version
SSHFS = 'vieux/sshfs:latest'
@requires_api_version('1.25')
class PluginTest(BaseAPIIntegrationTest):
@classmethod
def teardown_class(cls):
c = docker.APIClient(
version=TEST_API_VERSION, timeout=60,
**docker.utils.kwargs_from_env()
)
try:
c.remove_plugin(SSHFS, force=True)
except docker.errors.APIError:
pass
def teardown_method(self, method):
try:
self.client.disable_plugin(SSHFS)
except docker.errors.APIError:
pass
for p in self.tmp_plugins:
try:
self.client.remove_plugin(p, force=True)
except docker.errors.APIError:
pass
def ensure_plugin_installed(self, plugin_name):
try:
return self.client.inspect_plugin(plugin_name)
except docker.errors.NotFound:
prv = self.client.plugin_privileges(plugin_name)
for d in self.client.pull_plugin(plugin_name, prv):
pass
return self.client.inspect_plugin(plugin_name)
def test_enable_plugin(self):
pl_data = self.ensure_plugin_installed(SSHFS)
assert pl_data['Enabled'] is False
assert self.client.enable_plugin(SSHFS)
pl_data = self.client.inspect_plugin(SSHFS)
assert pl_data['Enabled'] is True
with pytest.raises(docker.errors.APIError):
self.client.enable_plugin(SSHFS)
def test_disable_plugin(self):
pl_data = self.ensure_plugin_installed(SSHFS)
assert pl_data['Enabled'] is False
assert self.client.enable_plugin(SSHFS)
pl_data = self.client.inspect_plugin(SSHFS)
assert pl_data['Enabled'] is True
self.client.disable_plugin(SSHFS)
pl_data = self.client.inspect_plugin(SSHFS)
assert pl_data['Enabled'] is False
with pytest.raises(docker.errors.APIError):
self.client.disable_plugin(SSHFS)
def test_inspect_plugin(self):
self.ensure_plugin_installed(SSHFS)
data = self.client.inspect_plugin(SSHFS)
assert 'Config' in data
assert 'Name' in data
assert data['Name'] == SSHFS
def test_plugin_privileges(self):
prv = self.client.plugin_privileges(SSHFS)
assert isinstance(prv, list)
for item in prv:
assert 'Name' in item
assert 'Value' in item
assert 'Description' in item
def test_list_plugins(self):
self.ensure_plugin_installed(SSHFS)
data = self.client.plugins()
assert len(data) > 0
plugin = [p for p in data if p['Name'] == SSHFS][0]
assert 'Config' in plugin
def test_configure_plugin(self):
pl_data = self.ensure_plugin_installed(SSHFS)
assert pl_data['Enabled'] is False
self.client.configure_plugin(SSHFS, {
'DEBUG': '1'
})
pl_data = self.client.inspect_plugin(SSHFS)
assert 'Env' in pl_data['Settings']
assert 'DEBUG=1' in pl_data['Settings']['Env']
self.client.configure_plugin(SSHFS, ['DEBUG=0'])
pl_data = self.client.inspect_plugin(SSHFS)
assert 'DEBUG=0' in pl_data['Settings']['Env']
def test_remove_plugin(self):
pl_data = self.ensure_plugin_installed(SSHFS)
assert pl_data['Enabled'] is False
assert self.client.remove_plugin(SSHFS) is True
def test_force_remove_plugin(self):
self.ensure_plugin_installed(SSHFS)
self.client.enable_plugin(SSHFS)
assert self.client.inspect_plugin(SSHFS)['Enabled'] is True
assert self.client.remove_plugin(SSHFS, force=True) is True
def test_install_plugin(self):
try:
self.client.remove_plugin(SSHFS, force=True)
except docker.errors.APIError:
pass
prv = self.client.plugin_privileges(SSHFS)
logs = [d for d in self.client.pull_plugin(SSHFS, prv)]
assert filter(lambda x: x['status'] == 'Download complete', logs)
assert self.client.inspect_plugin(SSHFS)
assert self.client.enable_plugin(SSHFS)
@requires_api_version('1.26')
def test_upgrade_plugin(self):
pl_data = self.ensure_plugin_installed(SSHFS)
assert pl_data['Enabled'] is False
prv = self.client.plugin_privileges(SSHFS)
logs = [d for d in self.client.upgrade_plugin(SSHFS, SSHFS, prv)]
assert filter(lambda x: x['status'] == 'Download complete', logs)
assert self.client.inspect_plugin(SSHFS)
assert self.client.enable_plugin(SSHFS)
def test_create_plugin(self):
plugin_data_dir = os.path.join(
os.path.dirname(__file__), 'testdata/dummy-plugin'
)
assert self.client.create_plugin(
'docker-sdk-py/dummy', plugin_data_dir
)
self.tmp_plugins.append('docker-sdk-py/dummy')
data = self.client.inspect_plugin('docker-sdk-py/dummy')
assert data['Config']['Entrypoint'] == ['/dummy']
docker-2.5.1/tests/integration/models_services_test.py 0000664 0001750 0001750 00000006407 13065047571 024425 0 ustar joffrey joffrey 0000000 0000000 import unittest
import docker
import pytest
from .. import helpers
from .base import TEST_API_VERSION
class ServiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
client = docker.from_env(version=TEST_API_VERSION)
helpers.force_leave_swarm(client)
client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
@classmethod
def tearDownClass(cls):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
# create arguments
name=name,
labels={'foo': 'bar'},
# ContainerSpec arguments
image="alpine",
command="sleep 300",
container_labels={'container': 'label'}
)
assert service.name == name
assert service.attrs['Spec']['Labels']['foo'] == 'bar'
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert "alpine" in container_spec['Image']
assert container_spec['Labels'] == {'container': 'label'}
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
service = client.services.create(
name=name,
image="alpine",
command="sleep 300"
)
service = client.services.get(service.id)
assert service.name == name
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
assert service in client.services.list()
service.remove()
assert service not in client.services.list()
def test_tasks(self):
client = docker.from_env(version=TEST_API_VERSION)
service1 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
service2 = client.services.create(
name=helpers.random_name(),
image="alpine",
command="sleep 300"
)
tasks = []
while len(tasks) == 0:
tasks = service1.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service1.id
tasks = []
while len(tasks) == 0:
tasks = service2.tasks()
assert len(tasks) == 1
assert tasks[0]['ServiceID'] == service2.id
@pytest.mark.skip(reason="Makes Swarm unstable?")
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
service = client.services.create(
# create arguments
name=helpers.random_name(),
# ContainerSpec arguments
image="alpine",
command="sleep 300"
)
service.update(
# create argument
name=service.name,
# ContainerSpec argument
command="sleep 600"
)
service.reload()
container_spec = service.attrs['Spec']['TaskTemplate']['ContainerSpec']
assert container_spec['Command'] == ["sleep", "600"]
docker-2.5.1/tests/integration/testdata/ 0000775 0001750 0001750 00000000000 13147142650 021423 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/testdata/dummy-plugin/ 0000775 0001750 0001750 00000000000 13147142650 024052 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/testdata/dummy-plugin/rootfs/ 0000775 0001750 0001750 00000000000 13147142650 025366 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/testdata/dummy-plugin/rootfs/dummy/ 0000775 0001750 0001750 00000000000 13147142650 026521 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt 0000664 0001750 0001750 00000000000 13061630562 030166 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/testdata/dummy-plugin/config.json 0000664 0001750 0001750 00000000644 13061630562 026215 0 ustar joffrey joffrey 0000000 0000000 {
"description": "Dummy test plugin for docker python SDK",
"documentation": "https://github.com/docker/docker-py",
"entrypoint": ["/dummy"],
"network": {
"type": "host"
},
"interface" : {
"types": ["docker.volumedriver/1.0"],
"socket": "dummy.sock"
},
"env": [
{
"name":"DEBUG",
"settable":["value"],
"value":"0"
}
]
}
docker-2.5.1/tests/integration/api_secret_test.py 0000664 0001750 0001750 00000004314 13061630562 023342 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import docker
import pytest
from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
@requires_api_version('1.25')
class SecretAPITest(BaseAPIIntegrationTest):
def setUp(self):
super(SecretAPITest, self).setUp()
self.init_swarm()
def tearDown(self):
super(SecretAPITest, self).tearDown()
force_leave_swarm(self.client)
def test_create_secret(self):
secret_id = self.client.create_secret(
'favorite_character', 'sakuya izayoi'
)
self.tmp_secrets.append(secret_id)
assert 'ID' in secret_id
data = self.client.inspect_secret(secret_id)
assert data['Spec']['Name'] == 'favorite_character'
def test_create_secret_unicode_data(self):
secret_id = self.client.create_secret(
'favorite_character', u'いざよいさくや'
)
self.tmp_secrets.append(secret_id)
assert 'ID' in secret_id
data = self.client.inspect_secret(secret_id)
assert data['Spec']['Name'] == 'favorite_character'
def test_inspect_secret(self):
secret_name = 'favorite_character'
secret_id = self.client.create_secret(
secret_name, 'sakuya izayoi'
)
self.tmp_secrets.append(secret_id)
data = self.client.inspect_secret(secret_id)
assert data['Spec']['Name'] == secret_name
assert 'ID' in data
assert 'Version' in data
def test_remove_secret(self):
secret_name = 'favorite_character'
secret_id = self.client.create_secret(
secret_name, 'sakuya izayoi'
)
self.tmp_secrets.append(secret_id)
assert self.client.remove_secret(secret_id)
with pytest.raises(docker.errors.NotFound):
self.client.inspect_secret(secret_id)
def test_list_secrets(self):
secret_name = 'favorite_character'
secret_id = self.client.create_secret(
secret_name, 'sakuya izayoi'
)
self.tmp_secrets.append(secret_id)
data = self.client.secrets(filters={'names': ['favorite_character']})
assert len(data) == 1
assert data[0]['ID'] == secret_id['ID']
docker-2.5.1/tests/integration/api_build_test.py 0000664 0001750 0001750 00000023200 13147140760 023150 0 ustar joffrey joffrey 0000000 0000000 import io
import os
import shutil
import tempfile
from docker import errors
import pytest
import six
from .base import BaseAPIIntegrationTest
from ..helpers import requires_api_version, requires_experimental
class BuildTest(BaseAPIIntegrationTest):
def test_build_streaming(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
stream = self.client.build(fileobj=script, stream=True, decode=True)
logs = []
for chunk in stream:
logs.append(chunk)
assert len(logs) > 0
def test_build_from_stringio(self):
if six.PY3:
return
script = io.StringIO(six.text_type('\n').join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]))
stream = self.client.build(fileobj=script, stream=True)
logs = ''
for chunk in stream:
if six.PY3:
chunk = chunk.decode('utf-8')
logs += chunk
self.assertNotEqual(logs, '')
@requires_api_version('1.8')
def test_build_with_dockerignore(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
with open(os.path.join(base_dir, '.dockerignore'), 'w') as f:
f.write("\n".join([
'ignored',
'Dockerfile',
'.dockerignore',
'!ignored/subdir/excepted-file',
'', # empty line
]))
with open(os.path.join(base_dir, 'not-ignored'), 'w') as f:
f.write("this file should not be ignored")
subdir = os.path.join(base_dir, 'ignored', 'subdir')
os.makedirs(subdir)
with open(os.path.join(subdir, 'file'), 'w') as f:
f.write("this file should be ignored")
with open(os.path.join(subdir, 'excepted-file'), 'w') as f:
f.write("this file should not be ignored")
tag = 'docker-py-test-build-with-dockerignore'
stream = self.client.build(
path=base_dir,
tag=tag,
)
for chunk in stream:
pass
c = self.client.create_container(tag, ['find', '/test', '-type', 'f'])
self.client.start(c)
self.client.wait(c)
logs = self.client.logs(c)
if six.PY3:
logs = logs.decode('utf-8')
self.assertEqual(
sorted(list(filter(None, logs.split('\n')))),
sorted(['/test/ignored/subdir/excepted-file',
'/test/not-ignored']),
)
@requires_api_version('1.21')
def test_build_with_buildargs(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'ARG test',
'USER $test'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, tag='buildargs', buildargs={'test': 'OK'}
)
self.tmp_imgs.append('buildargs')
for chunk in stream:
pass
info = self.client.inspect_image('buildargs')
self.assertEqual(info['Config']['User'], 'OK')
@requires_api_version('1.22')
def test_build_shmsize(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
'CMD sh -c "echo \'Hello, World!\'"',
]).encode('ascii'))
tag = 'shmsize'
shmsize = 134217728
stream = self.client.build(
fileobj=script, tag=tag, shmsize=shmsize
)
self.tmp_imgs.append(tag)
for chunk in stream:
pass
# There is currently no way to get the shmsize
# that was used to build the image
@requires_api_version('1.23')
def test_build_labels(self):
script = io.BytesIO('\n'.join([
'FROM scratch',
]).encode('ascii'))
labels = {'test': 'OK'}
stream = self.client.build(
fileobj=script, tag='labels', labels=labels
)
self.tmp_imgs.append('labels')
for chunk in stream:
pass
info = self.client.inspect_image('labels')
self.assertEqual(info['Config']['Labels'], labels)
@requires_api_version('1.25')
def test_build_with_cache_from(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'ENV FOO=bar',
'RUN touch baz',
'RUN touch bax',
]).encode('ascii'))
stream = self.client.build(fileobj=script, tag='build1')
self.tmp_imgs.append('build1')
for chunk in stream:
pass
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['build1'],
decode=True
)
self.tmp_imgs.append('build2')
counter = 0
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 3
self.client.remove_image('build2')
counter = 0
stream = self.client.build(
fileobj=script, tag='build2', cache_from=['nosuchtag'],
decode=True
)
for chunk in stream:
if 'Using cache' in chunk.get('stream', ''):
counter += 1
assert counter == 0
@requires_api_version('1.29')
def test_build_container_with_target(self):
script = io.BytesIO('\n'.join([
'FROM busybox as first',
'RUN mkdir -p /tmp/test',
'RUN touch /tmp/silence.tar.gz',
'FROM alpine:latest',
'WORKDIR /root/'
'COPY --from=first /tmp/silence.tar.gz .',
'ONBUILD RUN echo "This should not be in the final image"'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, target='first', tag='build1'
)
self.tmp_imgs.append('build1')
for chunk in stream:
pass
info = self.client.inspect_image('build1')
self.assertEqual(info['Config']['OnBuild'], [])
@requires_api_version('1.25')
def test_build_with_network_mode(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN wget http://google.com'
]).encode('ascii'))
stream = self.client.build(
fileobj=script, network_mode='bridge',
tag='dockerpytest_bridgebuild'
)
self.tmp_imgs.append('dockerpytest_bridgebuild')
for chunk in stream:
pass
assert self.client.inspect_image('dockerpytest_bridgebuild')
script.seek(0)
stream = self.client.build(
fileobj=script, network_mode='none',
tag='dockerpytest_nonebuild', nocache=True, decode=True
)
self.tmp_imgs.append('dockerpytest_nonebuild')
logs = [chunk for chunk in stream]
assert 'errorDetail' in logs[-1]
assert logs[-1]['errorDetail']['code'] == 1
with pytest.raises(errors.NotFound):
self.client.inspect_image('dockerpytest_nonebuild')
@requires_experimental(until=None)
@requires_api_version('1.25')
def test_build_squash(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN echo blah > /file_1',
'RUN echo blahblah > /file_2',
'RUN echo blahblahblah > /file_3'
]).encode('ascii'))
def build_squashed(squash):
tag = 'squash' if squash else 'nosquash'
stream = self.client.build(
fileobj=script, tag=tag, squash=squash
)
self.tmp_imgs.append(tag)
for chunk in stream:
pass
return self.client.inspect_image(tag)
non_squashed = build_squashed(False)
squashed = build_squashed(True)
self.assertEqual(len(non_squashed['RootFS']['Layers']), 4)
self.assertEqual(len(squashed['RootFS']['Layers']), 2)
def test_build_stderr_data(self):
control_chars = ['\x1b[91m', '\x1b[0m']
snippet = 'Ancient Temple (Mystic Oriental Dream ~ Ancient Temple)'
script = io.BytesIO(b'\n'.join([
b'FROM busybox',
'RUN sh -c ">&2 echo \'{0}\'"'.format(snippet).encode('utf-8')
]))
stream = self.client.build(
fileobj=script, stream=True, decode=True, nocache=True
)
lines = []
for chunk in stream:
lines.append(chunk.get('stream'))
expected = '{0}{2}\n{1}'.format(
control_chars[0], control_chars[1], snippet
)
self.assertTrue(any([line == expected for line in lines]))
def test_build_gzip_encoding(self):
base_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base_dir)
with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f:
f.write("\n".join([
'FROM busybox',
'ADD . /test',
]))
stream = self.client.build(
path=base_dir, stream=True, decode=True, nocache=True,
gzip=True
)
lines = []
for chunk in stream:
lines.append(chunk)
assert 'Successfully built' in lines[-1]['stream']
def test_build_gzip_custom_encoding(self):
with self.assertRaises(errors.DockerException):
self.client.build(path='.', gzip=True, encoding='text/html')
docker-2.5.1/tests/integration/api_swarm_test.py 0000664 0001750 0001750 00000014171 13124577310 023211 0 ustar joffrey joffrey 0000000 0000000 import copy
import docker
import pytest
from ..helpers import force_leave_swarm, requires_api_version
from .base import BaseAPIIntegrationTest
class SwarmTest(BaseAPIIntegrationTest):
def setUp(self):
super(SwarmTest, self).setUp()
force_leave_swarm(self.client)
def tearDown(self):
super(SwarmTest, self).tearDown()
force_leave_swarm(self.client)
@requires_api_version('1.24')
def test_init_swarm_simple(self):
assert self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_force_new_cluster(self):
pytest.skip('Test stalls the engine on 1.12.0')
assert self.init_swarm()
version_1 = self.client.inspect_swarm()['Version']['Index']
assert self.client.init_swarm(force_new_cluster=True)
version_2 = self.client.inspect_swarm()['Version']['Index']
assert version_2 != version_1
@requires_api_version('1.24')
def test_init_already_in_cluster(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError):
self.init_swarm()
@requires_api_version('1.24')
def test_init_swarm_custom_raft_spec(self):
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
assert self.init_swarm(swarm_spec=spec)
swarm_info = self.client.inspect_swarm()
assert swarm_info['Spec']['Raft']['SnapshotInterval'] == 5000
assert swarm_info['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
@requires_api_version('1.24')
def test_leave_swarm(self):
assert self.init_swarm()
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.leave_swarm()
exc_info.value.response.status_code == 500
assert self.client.leave_swarm(force=True)
with pytest.raises(docker.errors.APIError) as exc_info:
self.client.inspect_swarm()
exc_info.value.response.status_code == 406
assert self.client.leave_swarm(force=True)
@requires_api_version('1.24')
def test_update_swarm(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200,
node_cert_expiry=7776000000000000
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'],
swarm_spec=spec, rotate_worker_token=True
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Raft']['SnapshotInterval'] == 5000
assert (
swarm_info_2['Spec']['Raft']['LogEntriesForSlowFollowers'] == 1200
)
assert (
swarm_info_1['JoinTokens']['Manager'] ==
swarm_info_2['JoinTokens']['Manager']
)
assert (
swarm_info_1['JoinTokens']['Worker'] !=
swarm_info_2['JoinTokens']['Worker']
)
@requires_api_version('1.24')
def test_update_swarm_name(self):
assert self.init_swarm()
swarm_info_1 = self.client.inspect_swarm()
spec = self.client.create_swarm_spec(
node_cert_expiry=7776000000000000, name='reimuhakurei'
)
assert self.client.update_swarm(
version=swarm_info_1['Version']['Index'], swarm_spec=spec
)
swarm_info_2 = self.client.inspect_swarm()
assert (
swarm_info_1['Version']['Index'] !=
swarm_info_2['Version']['Index']
)
assert swarm_info_2['Spec']['Name'] == 'reimuhakurei'
@requires_api_version('1.24')
def test_list_nodes(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
assert 'ID' in node
assert 'Spec' in node
assert node['Spec']['Role'] == 'manager'
filtered_list = self.client.nodes(filters={
'id': node['ID']
})
assert len(filtered_list) == 1
filtered_list = self.client.nodes(filters={
'role': 'worker'
})
assert len(filtered_list) == 0
@requires_api_version('1.24')
def test_inspect_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
assert len(nodes_list) == 1
node = nodes_list[0]
node_data = self.client.inspect_node(node['ID'])
assert node['ID'] == node_data['ID']
assert node['Version'] == node_data['Version']
@requires_api_version('1.24')
def test_update_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node = nodes_list[0]
orig_spec = node['Spec']
# add a new label
new_spec = copy.deepcopy(orig_spec)
new_spec['Labels'] = {'new.label': 'new value'}
self.client.update_node(node_id=node['ID'],
version=node['Version']['Index'],
node_spec=new_spec)
updated_node = self.client.inspect_node(node['ID'])
assert new_spec == updated_node['Spec']
# Revert the changes
self.client.update_node(node_id=node['ID'],
version=updated_node['Version']['Index'],
node_spec=orig_spec)
reverted_node = self.client.inspect_node(node['ID'])
assert orig_spec == reverted_node['Spec']
@requires_api_version('1.24')
def test_remove_main_node(self):
assert self.init_swarm()
nodes_list = self.client.nodes()
node_id = nodes_list[0]['ID']
with pytest.raises(docker.errors.NotFound):
self.client.remove_node('foobar01')
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id)
assert e.value.response.status_code >= 400
with pytest.raises(docker.errors.APIError) as e:
self.client.remove_node(node_id, True)
assert e.value.response.status_code >= 400
docker-2.5.1/tests/integration/api_client_test.py 0000664 0001750 0001750 00000007533 13106703745 023345 0 ustar joffrey joffrey 0000000 0000000 import base64
import os
import tempfile
import time
import unittest
import warnings
import docker
from docker.utils import kwargs_from_env
from .base import BaseAPIIntegrationTest
class InformationTest(BaseAPIIntegrationTest):
def test_version(self):
res = self.client.version()
self.assertIn('GoVersion', res)
self.assertIn('Version', res)
self.assertEqual(len(res['Version'].split('.')), 3)
def test_info(self):
res = self.client.info()
self.assertIn('Containers', res)
self.assertIn('Images', res)
self.assertIn('Debug', res)
class LoadConfigTest(BaseAPIIntegrationTest):
def test_load_legacy_config(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(cfg_path, 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('Auth'), None)
def test_load_json_config(self):
folder = tempfile.mkdtemp()
self.tmp_folders.append(folder)
cfg_path = os.path.join(folder, '.dockercfg')
f = open(os.path.join(folder, '.dockercfg'), 'w')
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
email_ = 'sakuya@scarlet.net'
f.write('{{"{0}": {{"auth": "{1}", "email": "{2}"}}}}\n'.format(
docker.auth.INDEX_URL, auth_, email_))
f.close()
cfg = docker.auth.load_config(cfg_path)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('Auth'), None)
class AutoDetectVersionTest(unittest.TestCase):
def test_client_init(self):
client = docker.APIClient(version='auto', **kwargs_from_env())
client_version = client._version
api_version = client.version(api_version=False)['ApiVersion']
self.assertEqual(client_version, api_version)
api_version_2 = client.version()['ApiVersion']
self.assertEqual(client_version, api_version_2)
client.close()
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
self.client = docker.api.APIClient(
version=docker.constants.MINIMUM_DOCKER_API_VERSION,
base_url='http://192.168.10.2:4243',
timeout=self.timeout
)
def test_timeout(self):
start = time.time()
res = None
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
except:
pass
end = time.time()
self.assertTrue(res is None)
self.assertTrue(end - start < 2 * self.timeout)
class UnixconnTest(unittest.TestCase):
"""
Test UNIX socket connection adapter.
"""
def test_resource_warnings(self):
"""
Test no warnings are produced when using the client.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
client = docker.APIClient(version='auto', **kwargs_from_env())
client.images()
client.close()
del client
assert len(w) == 0, \
"No warnings produced: {0}".format(w[0].message)
docker-2.5.1/tests/integration/models_volumes_test.py 0000664 0001750 0001750 00000002122 13051443744 024257 0 ustar joffrey joffrey 0000000 0000000 import docker
from .base import BaseIntegrationTest, TEST_API_VERSION
class VolumesTest(BaseIntegrationTest):
def test_create_get(self):
client = docker.from_env(version=TEST_API_VERSION)
volume = client.volumes.create(
'dockerpytest_1',
driver='local',
labels={'labelkey': 'labelvalue'}
)
self.tmp_volumes.append(volume.id)
assert volume.id
assert volume.name == 'dockerpytest_1'
assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'}
volume = client.volumes.get(volume.id)
assert volume.name == 'dockerpytest_1'
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
volume = client.volumes.create('dockerpytest_1')
self.tmp_volumes.append(volume.id)
assert volume in client.volumes.list()
assert volume in client.volumes.list(filters={'name': 'dockerpytest_'})
assert volume not in client.volumes.list(filters={'name': 'foobar'})
volume.remove()
assert volume not in client.volumes.list()
docker-2.5.1/tests/integration/models_containers_test.py 0000664 0001750 0001750 00000025044 13145377337 024752 0 ustar joffrey joffrey 0000000 0000000 import docker
import tempfile
from .base import BaseIntegrationTest, TEST_API_VERSION
from ..helpers import random_name
class ContainerCollectionTest(BaseIntegrationTest):
def test_run(self):
client = docker.from_env(version=TEST_API_VERSION)
self.assertEqual(
client.containers.run("alpine", "echo hello world", remove=True),
b'hello world\n'
)
def test_run_detach(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert container.attrs['Config']['Image'] == "alpine"
assert container.attrs['Config']['Cmd'] == ['sleep', '300']
def test_run_with_error(self):
client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.ContainerError) as cm:
client.containers.run("alpine", "cat /test", remove=True)
assert cm.exception.exit_status == 1
assert "cat /test" in str(cm.exception)
assert "alpine" in str(cm.exception)
assert "No such file or directory" in str(cm.exception)
def test_run_with_image_that_does_not_exist(self):
client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.ImageNotFound):
client.containers.run("dockerpytest_does_not_exist")
def test_run_with_volume(self):
client = docker.from_env(version=TEST_API_VERSION)
path = tempfile.mkdtemp()
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
volumes=["%s:/insidecontainer" % path],
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
volumes=["%s:/insidecontainer" % path]
)
self.assertEqual(out, b'hello\n')
def test_run_with_named_volume(self):
client = docker.from_env(version=TEST_API_VERSION)
client.volumes.create(name="somevolume")
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /insidecontainer/test'",
volumes=["somevolume:/insidecontainer"],
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
out = client.containers.run(
"alpine", "cat /insidecontainer/test",
volumes=["somevolume:/insidecontainer"]
)
self.assertEqual(out, b'hello\n')
def test_run_with_network(self):
net_name = random_name()
client = docker.from_env(version=TEST_API_VERSION)
client.networks.create(net_name)
self.tmp_networks.append(net_name)
container = client.containers.run(
'alpine', 'echo hello world', network=net_name,
detach=True
)
self.tmp_containers.append(container.id)
attrs = container.attrs
assert 'NetworkSettings' in attrs
assert 'Networks' in attrs['NetworkSettings']
assert list(attrs['NetworkSettings']['Networks'].keys()) == [net_name]
def test_run_with_none_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
"alpine", "echo hello",
log_config=dict(type='none')
)
self.assertEqual(out, None)
def test_run_with_json_file_driver(self):
client = docker.from_env(version=TEST_API_VERSION)
out = client.containers.run(
"alpine", "echo hello",
log_config=dict(type='json-file')
)
self.assertEqual(out, b'hello\n')
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
assert client.containers.get(container.id).attrs[
'Config']['Image'] == "alpine"
def test_list(self):
client = docker.from_env(version=TEST_API_VERSION)
container_id = client.containers.run(
"alpine", "sleep 300", detach=True).id
self.tmp_containers.append(container_id)
containers = [c for c in client.containers.list() if c.id ==
container_id]
assert len(containers) == 1
container = containers[0]
assert container.attrs['Config']['Image'] == 'alpine'
container.kill()
container.remove()
assert container_id not in [c.id for c in client.containers.list()]
class ContainerTest(BaseIntegrationTest):
def test_commit(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test'",
detach=True
)
self.tmp_containers.append(container.id)
container.wait()
image = container.commit()
self.assertEqual(
client.containers.run(image.id, "cat /test", remove=True),
b"hello\n"
)
def test_diff(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "touch /test", detach=True)
self.tmp_containers.append(container.id)
container.wait()
assert container.diff() == [{'Path': '/test', 'Kind': 1}]
def test_exec_run(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run(
"alpine", "sh -c 'echo \"hello\" > /test; sleep 60'", detach=True
)
self.tmp_containers.append(container.id)
assert container.exec_run("cat /test") == b"hello\n"
def test_kill(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
while container.status != 'running':
container.reload()
assert container.status == 'running'
container.kill()
container.reload()
assert container.status == 'exited'
def test_logs(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello world",
detach=True)
self.tmp_containers.append(container.id)
container.wait()
assert container.logs() == b"hello world\n"
def test_pause(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
container.pause()
container.reload()
assert container.status == "paused"
container.unpause()
container.reload()
assert container.status == "running"
def test_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", detach=True)
self.tmp_containers.append(container.id)
assert container.id in [c.id for c in client.containers.list(all=True)]
container.wait()
container.remove()
containers = client.containers.list(all=True)
assert container.id not in [c.id for c in containers]
def test_rename(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "echo hello", name="test1",
detach=True)
self.tmp_containers.append(container.id)
assert container.name == "test1"
container.rename("test2")
container.reload()
assert container.name == "test2"
def test_restart(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
container.restart()
container.reload()
second_started_at = container.attrs['State']['StartedAt']
assert first_started_at != second_started_at
def test_start(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.create("alpine", "sleep 50", detach=True)
self.tmp_containers.append(container.id)
assert container.status == "created"
container.start()
container.reload()
assert container.status == "running"
def test_stats(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 100", detach=True)
self.tmp_containers.append(container.id)
stats = container.stats(stream=False)
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
assert key in stats
def test_stop(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "top", detach=True)
self.tmp_containers.append(container.id)
assert container.status in ("running", "created")
container.stop(timeout=2)
container.reload()
assert container.status == "exited"
def test_top(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True)
self.tmp_containers.append(container.id)
top = container.top()
assert len(top['Processes']) == 1
assert 'sleep 60' in top['Processes'][0]
def test_update(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 60", detach=True,
cpu_shares=2)
self.tmp_containers.append(container.id)
assert container.attrs['HostConfig']['CpuShares'] == 2
container.update(cpu_shares=3)
container.reload()
assert container.attrs['HostConfig']['CpuShares'] == 3
def test_wait(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sh -c 'exit 0'",
detach=True)
self.tmp_containers.append(container.id)
assert container.wait() == 0
container = client.containers.run("alpine", "sh -c 'exit 1'",
detach=True)
self.tmp_containers.append(container.id)
assert container.wait() == 1
docker-2.5.1/tests/integration/errors_test.py 0000664 0001750 0001750 00000001162 13106703741 022536 0 ustar joffrey joffrey 0000000 0000000 from docker.errors import APIError
from .base import BaseAPIIntegrationTest, BUSYBOX
class ErrorsTest(BaseAPIIntegrationTest):
def test_api_error_parses_json(self):
container = self.client.create_container(BUSYBOX, ['sleep', '10'])
self.client.start(container['Id'])
with self.assertRaises(APIError) as cm:
self.client.remove_container(container['Id'])
explanation = cm.exception.explanation
assert 'You cannot remove a running container' in explanation
assert '{"message":' not in explanation
self.client.remove_container(container['Id'], force=True)
docker-2.5.1/tests/integration/api_image_test.py 0000664 0001750 0001750 00000026635 13147140516 023151 0 ustar joffrey joffrey 0000000 0000000 import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from ..helpers import requires_api_version
from .base import BaseAPIIntegrationTest, BUSYBOX
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull(
'hello-world', tag='latest', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
def test_commit_with_changes(self):
cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
)
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(cid['Id'])
assert '8000/tcp' in img['Config']['ExposedPorts']
assert img['Config']['Cmd'] == ['bash']
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
logs = self.client.remove_image(img_id, force=True)
self.assertIn({"Deleted": img_id}, logs)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
# Docs say output is available in 1.23, but this test fails on 1.12.0
@requires_api_version('1.24')
def test_get_load_image(self):
test_img = 'hello-world:latest'
self.client.pull(test_img)
data = self.client.get_image(test_img)
assert data
output = self.client.load_image(data)
assert any([
line for line in output
if 'Loaded image: {}'.format(test_img) in line.get('stream', '')
])
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
@requires_api_version('1.25')
class PruneImagesTest(BaseAPIIntegrationTest):
def test_prune_images(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
# Ensure busybox does not get pruned
ctnr = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.tmp_containers.append(ctnr)
self.client.pull('hello-world', tag='latest')
self.tmp_imgs.append('hello-world')
img_id = self.client.inspect_image('hello-world')['Id']
result = self.client.prune_images()
assert img_id not in [
img.get('Deleted') for img in result['ImagesDeleted']
]
result = self.client.prune_images({'dangling': False})
assert result['SpaceReclaimed'] > 0
assert 'hello-world:latest' in [
img.get('Untagged') for img in result['ImagesDeleted']
]
assert img_id in [
img.get('Deleted') for img in result['ImagesDeleted']
]
docker-2.5.1/tests/integration/api_healthcheck_test.py 0000664 0001750 0001750 00000004454 13124577310 024326 0 ustar joffrey joffrey 0000000 0000000 from .base import BaseAPIIntegrationTest, BUSYBOX
from .. import helpers
SECOND = 1000000000
def wait_on_health_status(client, container, status):
def condition():
res = client.inspect_container(container)
return res['State']['Health']['Status'] == status
return helpers.wait_on_condition(condition)
class HealthcheckTest(BaseAPIIntegrationTest):
@helpers.requires_api_version('1.24')
def test_healthcheck_shell_command(self):
container = self.client.create_container(
BUSYBOX, 'top', healthcheck=dict(test='echo "hello world"'))
self.tmp_containers.append(container)
res = self.client.inspect_container(container)
assert res['Config']['Healthcheck']['Test'] == \
['CMD-SHELL', 'echo "hello world"']
@helpers.requires_api_version('1.24')
def test_healthcheck_passes(self):
container = self.client.create_container(
BUSYBOX, 'top', healthcheck=dict(
test="true",
interval=1 * SECOND,
timeout=1 * SECOND,
retries=1,
))
self.tmp_containers.append(container)
self.client.start(container)
wait_on_health_status(self.client, container, "healthy")
@helpers.requires_api_version('1.24')
def test_healthcheck_fails(self):
container = self.client.create_container(
BUSYBOX, 'top', healthcheck=dict(
test="false",
interval=1 * SECOND,
timeout=1 * SECOND,
retries=1,
))
self.tmp_containers.append(container)
self.client.start(container)
wait_on_health_status(self.client, container, "unhealthy")
@helpers.requires_api_version('1.29')
def test_healthcheck_start_period(self):
container = self.client.create_container(
BUSYBOX, 'top', healthcheck=dict(
test="echo 'x' >> /counter.txt && "
"test `cat /counter.txt | wc -l` -ge 3",
interval=1 * SECOND,
timeout=1 * SECOND,
retries=1,
start_period=3 * SECOND
)
)
self.tmp_containers.append(container)
self.client.start(container)
wait_on_health_status(self.client, container, "healthy")
docker-2.5.1/tests/integration/conftest.py 0000664 0001750 0001750 00000001472 13106703745 022020 0 ustar joffrey joffrey 0000000 0000000 from __future__ import print_function
import sys
import warnings
import docker.errors
from docker.utils import kwargs_from_env
import pytest
from .base import BUSYBOX
@pytest.fixture(autouse=True, scope='session')
def setup_test_session():
warnings.simplefilter('error')
c = docker.APIClient(version='auto', **kwargs_from_env())
try:
c.inspect_image(BUSYBOX)
except docker.errors.NotFound:
print("\npulling {0}".format(BUSYBOX), file=sys.stderr)
for data in c.pull(BUSYBOX, stream=True, decode=True):
status = data.get("status")
progress = data.get("progress")
detail = "{0} - {1}".format(status, progress)
print(detail, file=sys.stderr)
# Double make sure we now have busybox
c.inspect_image(BUSYBOX)
c.close()
docker-2.5.1/tests/integration/models_images_test.py 0000664 0001750 0001750 00000006776 13142163435 024052 0 ustar joffrey joffrey 0000000 0000000 import io
import docker
import pytest
from .base import BaseIntegrationTest, TEST_API_VERSION
class ImageCollectionTest(BaseIntegrationTest):
def test_build(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
))
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
@pytest.mark.xfail(reason='Engine 1.13 responds with status 500')
def test_build_with_error(self):
client = docker.from_env(version=TEST_API_VERSION)
with self.assertRaises(docker.errors.BuildError) as cm:
client.images.build(fileobj=io.BytesIO(
"FROM alpine\n"
"NOTADOCKERFILECOMMAND".encode('ascii')
))
assert str(cm.exception) == ("Unknown instruction: "
"NOTADOCKERFILECOMMAND")
def test_build_with_multiple_success(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.build(
tag='some-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo hello world".encode('ascii')
)
)
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"hello world\n"
def test_build_with_success_build_output(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.build(
tag='dup-txt-tag', fileobj=io.BytesIO(
"FROM alpine\n"
"CMD echo Successfully built abcd1234".encode('ascii')
)
)
self.tmp_imgs.append(image.id)
assert client.containers.run(image) == b"Successfully built abcd1234\n"
def test_list(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert image.id in get_ids(client.images.list())
def test_list_with_repository(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert image.id in get_ids(client.images.list('alpine'))
assert image.id in get_ids(client.images.list('alpine:latest'))
def test_pull(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
assert 'alpine:latest' in image.attrs['RepoTags']
def test_pull_with_tag(self):
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine', tag='3.3')
assert 'alpine:3.3' in image.attrs['RepoTags']
class ImageTest(BaseIntegrationTest):
def test_tag_and_remove(self):
repo = 'dockersdk.tests.images.test_tag'
tag = 'some-tag'
identifier = '{}:{}'.format(repo, tag)
client = docker.from_env(version=TEST_API_VERSION)
image = client.images.pull('alpine:latest')
result = image.tag(repo, tag)
assert result is True
self.tmp_imgs.append(identifier)
assert image.id in get_ids(client.images.list(repo))
assert image.id in get_ids(client.images.list(identifier))
client.images.remove(identifier)
assert image.id not in get_ids(client.images.list(repo))
assert image.id not in get_ids(client.images.list(identifier))
assert image.id in get_ids(client.images.list('alpine:latest'))
def get_ids(images):
return [i.id for i in images]
docker-2.5.1/tests/integration/api_container_test.py 0000664 0001750 0001750 00000140323 13147142632 024041 0 ustar joffrey joffrey 0000000 0000000 import os
import signal
import tempfile
import docker
from docker.constants import IS_WINDOWS_PLATFORM
from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
import pytest
import six
from .base import BUSYBOX, BaseAPIIntegrationTest
from .. import helpers
from ..helpers import requires_api_version
class ListContainersTest(BaseAPIIntegrationTest):
def test_list_containers(self):
res0 = self.client.containers(all=True)
size = len(res0)
res1 = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res1)
self.client.start(res1['Id'])
self.tmp_containers.append(res1['Id'])
res2 = self.client.containers(all=True)
self.assertEqual(size + 1, len(res2))
retrieved = [x for x in res2 if x['Id'].startswith(res1['Id'])]
self.assertEqual(len(retrieved), 1)
retrieved = retrieved[0]
self.assertIn('Command', retrieved)
self.assertEqual(retrieved['Command'], six.text_type('true'))
self.assertIn('Image', retrieved)
self.assertRegex(retrieved['Image'], r'busybox:.*')
self.assertIn('Status', retrieved)
class CreateContainerTest(BaseAPIIntegrationTest):
def test_create(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
def test_create_with_host_pid_mode(self):
ctnr = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
pid_mode='host', network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
self.assertIn('PidMode', host_config)
self.assertEqual(host_config['PidMode'], 'host')
def test_create_with_links(self):
res0 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'cat',
detach=True, stdin_open=True,
environment={'FOO': '1'})
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# we don't want the first /
link_path1 = self.client.inspect_container(container1_id)['Name'][1:]
link_alias1 = 'mylink1'
link_env_prefix1 = link_alias1.upper()
link_path2 = self.client.inspect_container(container2_id)['Name'][1:]
link_alias2 = 'mylink2'
link_env_prefix2 = link_alias2.upper()
res2 = self.client.create_container(
BUSYBOX, 'env', host_config=self.client.create_host_config(
links={link_path1: link_alias1, link_path2: link_alias2},
network_mode='bridge'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
self.assertEqual(self.client.wait(container3_id), 0)
logs = self.client.logs(container3_id)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn('{0}_NAME='.format(link_env_prefix1), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix1), logs)
self.assertIn('{0}_NAME='.format(link_env_prefix2), logs)
self.assertIn('{0}_ENV_FOO=1'.format(link_env_prefix2), logs)
def test_create_with_restart_policy(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '2'],
host_config=self.client.create_host_config(
restart_policy={"Name": "always", "MaximumRetryCount": 0},
network_mode='none'
)
)
id = container['Id']
self.client.start(id)
self.client.wait(id)
with self.assertRaises(docker.errors.APIError) as exc:
self.client.remove_container(id)
err = exc.exception.explanation
self.assertIn(
'You cannot remove ', err
)
self.client.remove_container(id, force=True)
def test_create_container_with_volumes_from(self):
vol_names = ['foobar_vol0', 'foobar_vol1']
res0 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[0]
)
container1_id = res0['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
res1 = self.client.create_container(
BUSYBOX, 'true', name=vol_names[1]
)
container2_id = res1['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
with self.assertRaises(docker.errors.DockerException):
self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
volumes_from=vol_names
)
res2 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True,
host_config=self.client.create_host_config(
volumes_from=vol_names, network_mode='none'
)
)
container3_id = res2['Id']
self.tmp_containers.append(container3_id)
self.client.start(container3_id)
info = self.client.inspect_container(res2['Id'])
self.assertCountEqual(info['HostConfig']['VolumesFrom'], vol_names)
def create_container_readonly_fs(self):
ctnr = self.client.create_container(
BUSYBOX, ['mkdir', '/shrine'],
host_config=self.client.create_host_config(
read_only=True, network_mode='none'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
res = self.client.wait(ctnr)
self.assertNotEqual(res, 0)
def create_container_with_name(self):
res = self.client.create_container(BUSYBOX, 'true', name='foobar')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
self.assertEqual('/foobar', inspect['Name'])
def create_container_privileged(self):
res = self.client.create_container(
BUSYBOX, 'true', host_config=self.client.create_host_config(
privileged=True, network_mode='none'
)
)
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
# Since Nov 2013, the Privileged flag is no longer part of the
# container's config exposed via the API (safety concerns?).
#
if 'Privileged' in inspect['Config']:
self.assertEqual(inspect['Config']['Privileged'], True)
def test_create_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], mac_address=mac_address_expected)
id = container['Id']
self.client.start(container)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
self.client.kill(id)
@requires_api_version('1.20')
def test_group_id_ints(self):
container = self.client.create_container(
BUSYBOX, 'id -G',
host_config=self.client.create_host_config(group_add=[1000, 1001])
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
@requires_api_version('1.20')
def test_group_id_strings(self):
container = self.client.create_container(
BUSYBOX, 'id -G', host_config=self.client.create_host_config(
group_add=['1000', '1001']
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.wait(container)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
groups = logs.strip().split(' ')
self.assertIn('1000', groups)
self.assertIn('1001', groups)
def test_valid_log_driver_and_log_opt(self):
log_config = docker.types.LogConfig(
type='json-file',
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], log_config.type)
self.assertEqual(container_log_config['Config'], log_config.config)
def test_invalid_log_driver_raises_exception(self):
log_config = docker.types.LogConfig(
type='asdf-nope',
config={}
)
expected_msg = "logger: no log driver named 'asdf-nope' is registered"
with pytest.raises(docker.errors.APIError) as excinfo:
# raises an internal server error 500
container = self.client.create_container(
BUSYBOX, ['true'], host_config=self.client.create_host_config(
log_config=log_config
)
)
self.client.start(container)
assert excinfo.value.explanation == expected_msg
def test_valid_no_log_driver_specified(self):
log_config = docker.types.LogConfig(
type="",
config={'max-file': '100'}
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], log_config.config)
def test_valid_no_config_specified(self):
log_config = docker.types.LogConfig(
type="json-file",
config=None
)
container = self.client.create_container(
BUSYBOX, ['true'],
host_config=self.client.create_host_config(log_config=log_config)
)
self.tmp_containers.append(container['Id'])
self.client.start(container)
info = self.client.inspect_container(container)
container_log_config = info['HostConfig']['LogConfig']
self.assertEqual(container_log_config['Type'], "json-file")
self.assertEqual(container_log_config['Config'], {})
def test_create_with_memory_constraints_with_str(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(
memswap_limit='1G',
mem_limit='700M'
)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
for limit in ['Memory', 'MemorySwap']:
self.assertIn(limit, host_config)
def test_create_with_memory_constraints_with_int(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(mem_swappiness=40)
)
self.assertIn('Id', ctnr)
self.tmp_containers.append(ctnr['Id'])
self.client.start(ctnr)
inspect = self.client.inspect_container(ctnr)
self.assertIn('HostConfig', inspect)
host_config = inspect['HostConfig']
self.assertIn('MemorySwappiness', host_config)
def test_create_with_environment_variable_no_value(self):
container = self.client.create_container(
BUSYBOX,
['echo'],
environment={'Foo': None, 'Other': 'one', 'Blank': ''},
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container['Id'])
assert (
sorted(config['Config']['Env']) ==
sorted(['Foo', 'Other=one', 'Blank='])
)
@requires_api_version('1.22')
def test_create_with_tmpfs(self):
tmpfs = {
'/tmp1': 'size=3M'
}
container = self.client.create_container(
BUSYBOX,
['echo'],
host_config=self.client.create_host_config(
tmpfs=tmpfs))
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['HostConfig']['Tmpfs'] == tmpfs
@requires_api_version('1.24')
def test_create_with_isolation(self):
container = self.client.create_container(
BUSYBOX, ['echo'], host_config=self.client.create_host_config(
isolation='default'
)
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['HostConfig']['Isolation'] == 'default'
@requires_api_version('1.25')
def test_create_with_auto_remove(self):
host_config = self.client.create_host_config(
auto_remove=True
)
container = self.client.create_container(
BUSYBOX, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['HostConfig']['AutoRemove'] is True
@requires_api_version('1.25')
def test_create_with_stop_timeout(self):
container = self.client.create_container(
BUSYBOX, ['echo', 'test'], stop_timeout=25
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['Config']['StopTimeout'] == 25
@requires_api_version('1.24')
@pytest.mark.xfail(True, reason='Not supported on most drivers')
def test_create_with_storage_opt(self):
host_config = self.client.create_host_config(
storage_opt={'size': '120G'}
)
container = self.client.create_container(
BUSYBOX, ['echo', 'test'], host_config=host_config
)
self.tmp_containers.append(container)
config = self.client.inspect_container(container)
assert config['HostConfig']['StorageOpt'] == {
'size': '120G'
}
@requires_api_version('1.25')
def test_create_with_init(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(
init=True
)
)
self.tmp_containers.append(ctnr['Id'])
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['Init'] is True
@pytest.mark.xfail(True, reason='init-path removed in 17.05.0')
@requires_api_version('1.25')
def test_create_with_init_path(self):
ctnr = self.client.create_container(
BUSYBOX, 'true',
host_config=self.client.create_host_config(
init_path="/usr/libexec/docker-init"
)
)
self.tmp_containers.append(ctnr['Id'])
config = self.client.inspect_container(ctnr)
assert config['HostConfig']['InitPath'] == "/usr/libexec/docker-init"
class VolumeBindTest(BaseAPIIntegrationTest):
def setUp(self):
super(VolumeBindTest, self).setUp()
self.mount_dest = '/mnt'
# Get a random pathname - we don't need it to exist locally
self.mount_origin = tempfile.mkdtemp()
self.filename = 'shared.txt'
self.run_with_volume(
False,
BUSYBOX,
['touch', os.path.join(self.mount_dest, self.filename)],
)
@pytest.mark.xfail(
IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
)
def test_create_with_binds_rw(self):
container = self.run_with_volume(
False,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, True)
@pytest.mark.xfail(
IS_WINDOWS_PLATFORM, reason='Test not designed for Windows platform'
)
def test_create_with_binds_ro(self):
self.run_with_volume(
False,
BUSYBOX,
['touch', os.path.join(self.mount_dest, self.filename)],
)
container = self.run_with_volume(
True,
BUSYBOX,
['ls', self.mount_dest],
)
logs = self.client.logs(container)
if six.PY3:
logs = logs.decode('utf-8')
self.assertIn(self.filename, logs)
inspect_data = self.client.inspect_container(container)
self.check_container_data(inspect_data, False)
def check_container_data(self, inspect_data, rw):
if docker.utils.compare_version('1.20', self.client._version) < 0:
self.assertIn('Volumes', inspect_data)
self.assertIn(self.mount_dest, inspect_data['Volumes'])
self.assertEqual(
self.mount_origin, inspect_data['Volumes'][self.mount_dest]
)
self.assertIn(self.mount_dest, inspect_data['VolumesRW'])
self.assertFalse(inspect_data['VolumesRW'][self.mount_dest])
else:
self.assertIn('Mounts', inspect_data)
filtered = list(filter(
lambda x: x['Destination'] == self.mount_dest,
inspect_data['Mounts']
))
self.assertEqual(len(filtered), 1)
mount_data = filtered[0]
self.assertEqual(mount_data['Source'], self.mount_origin)
self.assertEqual(mount_data['RW'], rw)
def run_with_volume(self, ro, *args, **kwargs):
return self.run_container(
*args,
volumes={self.mount_dest: {}},
host_config=self.client.create_host_config(
binds={
self.mount_origin: {
'bind': self.mount_dest,
'ro': ro,
},
},
network_mode='none'
),
**kwargs
)
@requires_api_version('1.20')
class ArchiveTest(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
BUSYBOX, 'sh -c "echo {0} > /vol1/data.txt"'.format(data),
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.client.wait(ctnr)
with tempfile.NamedTemporaryFile() as destination:
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
for d in strm:
destination.write(d)
destination.seek(0)
retrieved_data = helpers.untar_file(destination, 'data.txt')
if six.PY3:
retrieved_data = retrieved_data.decode('utf-8')
self.assertEqual(data, retrieved_data.strip())
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
BUSYBOX, 'sh -c "echo -n {0} > /vol1/data.txt"'.format(data),
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.client.wait(ctnr)
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
self.assertIn('name', stat)
self.assertEqual(stat['name'], 'data.txt')
self.assertIn('size', stat)
self.assertEqual(stat['size'], len(data))
def test_copy_file_to_container(self):
data = b'Deaf To All But The Song'
with tempfile.NamedTemporaryFile(delete=False) as test_file:
test_file.write(data)
test_file.seek(0)
ctnr = self.client.create_container(
BUSYBOX,
'cat {0}'.format(
os.path.join('/vol1/', os.path.basename(test_file.name))
),
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with helpers.simple_tar(test_file.name) as test_tar:
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
data = data.decode('utf-8')
self.assertEqual(logs.strip(), data)
def test_copy_directory_to_container(self):
files = ['a.py', 'b.py', 'foo/b.py']
dirs = ['foo', 'bar']
base = helpers.make_tree(dirs, files)
ctnr = self.client.create_container(
BUSYBOX, 'ls -p /vol1', volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with docker.utils.tar(base) as test_tar:
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
results = logs.strip().split()
self.assertIn('a.py', results)
self.assertIn('b.py', results)
self.assertIn('foo/', results)
self.assertIn('bar/', results)
class RenameContainerTest(BaseAPIIntegrationTest):
def test_rename_container(self):
version = self.client.version()['Version']
name = 'hong_meiling'
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.rename(res, name)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Name', inspect)
if version == '1.5.0':
self.assertEqual(name, inspect['Name'])
else:
self.assertEqual('/{0}'.format(name), inspect['Name'])
class StartContainerTest(BaseAPIIntegrationTest):
def test_start_container(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res['Id'])
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
def test_start_container_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, 'true')
self.assertIn('Id', res)
self.tmp_containers.append(res['Id'])
self.client.start(res)
inspect = self.client.inspect_container(res['Id'])
self.assertIn('Config', inspect)
self.assertIn('Id', inspect)
self.assertTrue(inspect['Id'].startswith(res['Id']))
self.assertIn('Image', inspect)
self.assertIn('State', inspect)
self.assertIn('Running', inspect['State'])
if not inspect['State']['Running']:
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], 0)
def test_run_shlex_commands(self):
commands = [
'true',
'echo "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'echo -n "The Young Descendant of Tepes & Septette for the '
'Dead Princess"',
'/bin/sh -c "echo Hello World"',
'/bin/sh -c \'echo "Hello World"\'',
'echo "\"Night of Nights\""',
'true && echo "Night of Nights"'
]
for cmd in commands:
container = self.client.create_container(BUSYBOX, cmd)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0, msg=cmd)
class WaitTest(BaseAPIIntegrationTest):
def test_wait(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(id)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
def test_wait_with_dict_instead_of_id(self):
res = self.client.create_container(BUSYBOX, ['sleep', '3'])
id = res['Id']
self.tmp_containers.append(id)
self.client.start(res)
exitcode = self.client.wait(res)
self.assertEqual(exitcode, 0)
inspect = self.client.inspect_container(res)
self.assertIn('Running', inspect['State'])
self.assertEqual(inspect['State']['Running'], False)
self.assertIn('ExitCode', inspect['State'])
self.assertEqual(inspect['State']['ExitCode'], exitcode)
class LogsTest(BaseAPIIntegrationTest):
def test_logs(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
def test_logs_tail_option(self):
snippet = '''Line1
Line2'''
container = self.client.create_container(
BUSYBOX, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id, tail=1)
self.assertEqual(logs, 'Line2\n'.encode(encoding='ascii'))
def test_logs_streaming_and_follow(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
logs = six.binary_type()
for chunk in self.client.logs(id, stream=True, follow=True):
logs += chunk
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
def test_logs_with_dict_instead_of_id(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo {0}'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(container)
self.assertEqual(logs, (snippet + '\n').encode(encoding='ascii'))
def test_logs_with_tail_0(self):
snippet = 'Flowering Nights (Sakuya Iyazoi)'
container = self.client.create_container(
BUSYBOX, 'echo "{0}"'.format(snippet)
)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
logs = self.client.logs(id, tail=0)
self.assertEqual(logs, ''.encode(encoding='ascii'))
class DiffTest(BaseAPIIntegrationTest):
def test_diff(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(id)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
def test_diff_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exitcode = self.client.wait(id)
self.assertEqual(exitcode, 0)
diff = self.client.diff(container)
test_diff = [x for x in diff if x.get('Path', None) == '/test']
self.assertEqual(len(test_diff), 1)
self.assertIn('Kind', test_diff[0])
self.assertEqual(test_diff[0]['Kind'], 1)
class StopTest(BaseAPIIntegrationTest):
def test_stop(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.stop(id, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
def test_stop_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
self.client.stop(container, timeout=2)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
class KillTest(BaseAPIIntegrationTest):
def test_kill(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
def test_kill_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(container)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False)
def test_kill_with_signal(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.tmp_containers.append(id)
self.client.start(id)
self.client.kill(
id, signal=signal.SIGKILL if not IS_WINDOWS_PLATFORM else 9
)
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
def test_kill_with_signal_name(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal='SIGKILL')
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
def test_kill_with_signal_integer(self):
id = self.client.create_container(BUSYBOX, ['sleep', '60'])
self.client.start(id)
self.tmp_containers.append(id)
self.client.kill(id, signal=9)
exitcode = self.client.wait(id)
self.assertNotEqual(exitcode, 0)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertNotEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], False, state)
class PortTest(BaseAPIIntegrationTest):
def test_port(self):
port_bindings = {
'1111': ('127.0.0.1', '4567'),
'2222': ('127.0.0.1', '4568')
}
container = self.client.create_container(
BUSYBOX, ['sleep', '60'], ports=list(port_bindings.keys()),
host_config=self.client.create_host_config(
port_bindings=port_bindings, network_mode='bridge'
)
)
id = container['Id']
self.client.start(container)
# Call the port function on each biding and compare expected vs actual
for port in port_bindings:
actual_bindings = self.client.port(container, port)
port_binding = actual_bindings.pop()
ip, host_port = port_binding['HostIp'], port_binding['HostPort']
self.assertEqual(ip, port_bindings[port][0])
self.assertEqual(host_port, port_bindings[port][1])
self.client.kill(id)
class ContainerTopTest(BaseAPIIntegrationTest):
def test_top(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60']
)
self.tmp_containers.append(container)
self.client.start(container)
res = self.client.top(container)
if IS_WINDOWS_PLATFORM:
assert res['Titles'] == ['PID', 'USER', 'TIME', 'COMMAND']
else:
assert res['Titles'] == [
'UID', 'PID', 'PPID', 'C', 'STIME', 'TTY', 'TIME', 'CMD'
]
assert len(res['Processes']) == 1
assert res['Processes'][0][-1] == 'sleep 60'
self.client.kill(container)
@pytest.mark.skipif(
IS_WINDOWS_PLATFORM, reason='No psargs support on windows'
)
def test_top_with_psargs(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'])
self.tmp_containers.append(container)
self.client.start(container)
res = self.client.top(container, 'waux')
self.assertEqual(
res['Titles'],
['USER', 'PID', '%CPU', '%MEM', 'VSZ', 'RSS',
'TTY', 'STAT', 'START', 'TIME', 'COMMAND'],
)
self.assertEqual(len(res['Processes']), 1)
self.assertEqual(res['Processes'][0][10], 'sleep 60')
class RestartContainerTest(BaseAPIIntegrationTest):
def test_restart(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(id, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
def test_restart_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.assertIn('Id', container)
id = container['Id']
self.client.start(container)
self.tmp_containers.append(id)
info = self.client.inspect_container(id)
self.assertIn('State', info)
self.assertIn('StartedAt', info['State'])
start_time1 = info['State']['StartedAt']
self.client.restart(container, timeout=2)
info2 = self.client.inspect_container(id)
self.assertIn('State', info2)
self.assertIn('StartedAt', info2['State'])
start_time2 = info2['State']['StartedAt']
self.assertNotEqual(start_time1, start_time2)
self.assertIn('Running', info2['State'])
self.assertEqual(info2['State']['Running'], True)
self.client.kill(id)
class RemoveContainerTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(id)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
def test_remove_with_dict_instead_of_id(self):
container = self.client.create_container(BUSYBOX, ['true'])
id = container['Id']
self.client.start(id)
self.client.wait(id)
self.client.remove_container(container)
containers = self.client.containers(all=True)
res = [x for x in containers if 'Id' in x and x['Id'].startswith(id)]
self.assertEqual(len(res), 0)
class AttachContainerTest(BaseAPIIntegrationTest):
def test_run_container_streaming(self):
container = self.client.create_container(BUSYBOX, '/bin/sh',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
sock = self.client.attach_socket(container, ws=False)
self.assertTrue(sock.fileno() > -1)
def test_run_container_reading_socket(self):
line = 'hi there and stuff and things, words!'
# `echo` appends CRLF, `printf` doesn't
command = "printf '{0}'".format(line)
container = self.client.create_container(BUSYBOX, command,
detach=True, tty=False)
self.tmp_containers.append(container)
opts = {"stdout": 1, "stream": 1, "logs": 1}
pty_stdout = self.client.attach_socket(container, opts)
self.addCleanup(pty_stdout.close)
self.client.start(container)
next_size = next_frame_size(pty_stdout)
self.assertEqual(next_size, len(line))
data = read_exactly(pty_stdout, next_size)
self.assertEqual(data.decode('utf-8'), line)
def test_attach_no_stream(self):
container = self.client.create_container(
BUSYBOX, 'echo hello'
)
self.tmp_containers.append(container)
self.client.start(container)
output = self.client.attach(container, stream=False, logs=True)
assert output == 'hello\n'.encode(encoding='ascii')
class PauseTest(BaseAPIIntegrationTest):
def test_pause_unpause(self):
container = self.client.create_container(BUSYBOX, ['sleep', '9999'])
id = container['Id']
self.tmp_containers.append(id)
self.client.start(container)
self.client.pause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], True)
self.client.unpause(id)
container_info = self.client.inspect_container(id)
self.assertIn('State', container_info)
state = container_info['State']
self.assertIn('ExitCode', state)
self.assertEqual(state['ExitCode'], 0)
self.assertIn('Running', state)
self.assertEqual(state['Running'], True)
self.assertIn('Paused', state)
self.assertEqual(state['Paused'], False)
class PruneTest(BaseAPIIntegrationTest):
@requires_api_version('1.25')
def test_prune_containers(self):
container1 = self.client.create_container(
BUSYBOX, ['sh', '-c', 'echo hello > /data.txt']
)
container2 = self.client.create_container(BUSYBOX, ['sleep', '9999'])
self.client.start(container1)
self.client.start(container2)
self.client.wait(container1)
result = self.client.prune_containers()
assert container1['Id'] in result['ContainersDeleted']
assert result['SpaceReclaimed'] > 0
assert container2['Id'] not in result['ContainersDeleted']
class GetContainerStatsTest(BaseAPIIntegrationTest):
@requires_api_version('1.19')
def test_get_container_stats_no_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
response = self.client.stats(container, stream=0)
self.client.kill(container)
self.assertEqual(type(response), dict)
for key in ['read', 'networks', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
self.assertIn(key, response)
@requires_api_version('1.17')
def test_get_container_stats_stream(self):
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
)
self.tmp_containers.append(container)
self.client.start(container)
stream = self.client.stats(container)
for chunk in stream:
self.assertEqual(type(chunk), dict)
for key in ['read', 'network', 'precpu_stats', 'cpu_stats',
'memory_stats', 'blkio_stats']:
self.assertIn(key, chunk)
class ContainerUpdateTest(BaseAPIIntegrationTest):
@requires_api_version('1.22')
def test_update_container(self):
old_mem_limit = 400 * 1024 * 1024
new_mem_limit = 300 * 1024 * 1024
container = self.client.create_container(
BUSYBOX, 'top', host_config=self.client.create_host_config(
mem_limit=old_mem_limit
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.update_container(container, mem_limit=new_mem_limit)
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['Memory'], new_mem_limit)
@requires_api_version('1.23')
def test_restart_policy_update(self):
old_restart_policy = {
'MaximumRetryCount': 0,
'Name': 'always'
}
new_restart_policy = {
'MaximumRetryCount': 42,
'Name': 'on-failure'
}
container = self.client.create_container(
BUSYBOX, ['sleep', '60'],
host_config=self.client.create_host_config(
restart_policy=old_restart_policy
)
)
self.tmp_containers.append(container)
self.client.start(container)
self.client.update_container(container,
restart_policy=new_restart_policy)
inspect_data = self.client.inspect_container(container)
self.assertEqual(
inspect_data['HostConfig']['RestartPolicy']['MaximumRetryCount'],
new_restart_policy['MaximumRetryCount']
)
self.assertEqual(
inspect_data['HostConfig']['RestartPolicy']['Name'],
new_restart_policy['Name']
)
class ContainerCPUTest(BaseAPIIntegrationTest):
@requires_api_version('1.18')
def test_container_cpu_shares(self):
cpu_shares = 512
container = self.client.create_container(
BUSYBOX, 'ls', host_config=self.client.create_host_config(
cpu_shares=cpu_shares
)
)
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['CpuShares'], 512)
@requires_api_version('1.18')
def test_container_cpuset(self):
cpuset_cpus = "0,1"
container = self.client.create_container(
BUSYBOX, 'ls', host_config=self.client.create_host_config(
cpuset_cpus=cpuset_cpus
)
)
self.tmp_containers.append(container)
self.client.start(container)
inspect_data = self.client.inspect_container(container)
self.assertEqual(inspect_data['HostConfig']['CpusetCpus'], cpuset_cpus)
@requires_api_version('1.25')
def test_create_with_runtime(self):
container = self.client.create_container(
BUSYBOX, ['echo', 'test'], runtime='runc'
)
self.tmp_containers.append(container['Id'])
config = self.client.inspect_container(container)
assert config['HostConfig']['Runtime'] == 'runc'
class LinkTest(BaseAPIIntegrationTest):
def test_remove_link(self):
# Create containers
container1 = self.client.create_container(
BUSYBOX, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
# Create Link
# we don't want the first /
link_path = self.client.inspect_container(container1_id)['Name'][1:]
link_alias = 'mylink'
container2 = self.client.create_container(
BUSYBOX, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}
)
)
container2_id = container2['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = '%s/%s' % (linked_name, link_alias)
self.client.remove_container(link_name, link=True)
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
self.assertEqual(len(retrieved), 0)
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
self.assertEqual(len(retrieved), 2)
docker-2.5.1/tests/integration/models_resources_test.py 0000664 0001750 0001750 00000001145 13051443744 024603 0 ustar joffrey joffrey 0000000 0000000 import docker
from .base import BaseIntegrationTest, TEST_API_VERSION
class ModelTest(BaseIntegrationTest):
def test_reload(self):
client = docker.from_env(version=TEST_API_VERSION)
container = client.containers.run("alpine", "sleep 300", detach=True)
self.tmp_containers.append(container.id)
first_started_at = container.attrs['State']['StartedAt']
container.kill()
container.start()
assert container.attrs['State']['StartedAt'] == first_started_at
container.reload()
assert container.attrs['State']['StartedAt'] != first_started_at
docker-2.5.1/tests/integration/models_swarm_test.py 0000664 0001750 0001750 00000002227 13051443744 023724 0 ustar joffrey joffrey 0000000 0000000 import unittest
import docker
from .. import helpers
from .base import TEST_API_VERSION
class SwarmTest(unittest.TestCase):
def setUp(self):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def tearDown(self):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_init_update_leave(self):
client = docker.from_env(version=TEST_API_VERSION)
client.swarm.init(
advertise_addr='eth0', snapshot_interval=5000,
listen_addr=helpers.swarm_listen_addr()
)
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000
client.swarm.update(snapshot_interval=10000)
assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000
assert client.swarm.leave(force=True)
with self.assertRaises(docker.errors.APIError) as cm:
client.swarm.reload()
assert (
# FIXME: test for both until
# https://github.com/docker/docker/issues/29192 is resolved
cm.exception.response.status_code == 406 or
cm.exception.response.status_code == 503
)
docker-2.5.1/tests/integration/models_networks_test.py 0000664 0001750 0001750 00000004442 13051443744 024450 0 ustar joffrey joffrey 0000000 0000000 import docker
from .. import helpers
from .base import BaseIntegrationTest, TEST_API_VERSION
class ImageCollectionTest(BaseIntegrationTest):
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name, labels={'foo': 'bar'})
self.tmp_networks.append(network.id)
assert network.name == name
assert network.attrs['Labels']['foo'] == "bar"
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network_id = client.networks.create(name).id
self.tmp_networks.append(network_id)
network = client.networks.get(network_id)
assert network.name == name
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name)
self.tmp_networks.append(network.id)
assert network.id in [n.id for n in client.networks.list()]
assert network.id not in [
n.id for n in
client.networks.list(ids=["fdhjklfdfdshjkfds"])
]
assert network.id in [
n.id for n in
client.networks.list(ids=[network.id])
]
assert network.id not in [
n.id for n in
client.networks.list(names=["fdshjklfdsjhkl"])
]
assert network.id in [
n.id for n in
client.networks.list(names=[name])
]
network.remove()
assert network.id not in [n.id for n in client.networks.list()]
class ImageTest(BaseIntegrationTest):
def test_connect_disconnect(self):
client = docker.from_env(version=TEST_API_VERSION)
network = client.networks.create(helpers.random_name())
self.tmp_networks.append(network.id)
container = client.containers.create("alpine", "sleep 300")
self.tmp_containers.append(container.id)
assert network.containers == []
network.connect(container)
container.start()
assert client.networks.get(network.id).containers == [container]
network.disconnect(container)
assert network.containers == []
assert client.networks.get(network.id).containers == []
docker-2.5.1/tests/integration/api_network_test.py 0000664 0001750 0001750 00000041145 13124577310 023552 0 ustar joffrey joffrey 0000000 0000000 import docker
from docker.types import IPAMConfig, IPAMPool
import pytest
from ..helpers import random_name, requires_api_version
from .base import BaseAPIIntegrationTest, BUSYBOX
class TestNetworks(BaseAPIIntegrationTest):
def tearDown(self):
super(TestNetworks, self).tearDown()
self.client.leave_swarm(force=True)
def create_network(self, *args, **kwargs):
net_name = random_name()
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
@requires_api_version('1.21')
def test_list_networks(self):
networks = self.client.networks()
net_name, net_id = self.create_network()
networks = self.client.networks()
self.assertTrue(net_id in [n['Id'] for n in networks])
networks_by_name = self.client.networks(names=[net_name])
self.assertEqual([n['Id'] for n in networks_by_name], [net_id])
networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
self.assertEqual([n['Id'] for n in networks_by_partial_id], [net_id])
@requires_api_version('1.21')
def test_inspect_network(self):
net_name, net_id = self.create_network()
net = self.client.inspect_network(net_id)
self.assertEqual(net['Id'], net_id)
self.assertEqual(net['Name'], net_name)
self.assertEqual(net['Driver'], 'bridge')
self.assertEqual(net['Scope'], 'local')
self.assertEqual(net['IPAM']['Driver'], 'default')
@requires_api_version('1.21')
def test_create_network_with_ipam_config(self):
_, net_id = self.create_network(
ipam=IPAMConfig(
driver='default',
pool_configs=[
IPAMPool(
subnet="172.28.0.0/16",
iprange="172.28.5.0/24",
gateway="172.28.5.254",
aux_addresses={
"a": "172.28.1.5",
"b": "172.28.1.6",
"c": "172.28.1.7",
},
),
],
),
)
net = self.client.inspect_network(net_id)
ipam = net['IPAM']
assert ipam.pop('Options', None) is None
assert ipam['Driver'] == 'default'
assert ipam['Config'] == [{
'Subnet': "172.28.0.0/16",
'IPRange': "172.28.5.0/24",
'Gateway': "172.28.5.254",
'AuxiliaryAddresses': {
"a": "172.28.1.5",
"b": "172.28.1.6",
"c": "172.28.1.7",
},
}]
@requires_api_version('1.21')
def test_create_network_with_host_driver_fails(self):
with pytest.raises(docker.errors.APIError):
self.client.create_network(random_name(), driver='host')
@requires_api_version('1.21')
def test_remove_network(self):
net_name, net_id = self.create_network()
assert net_name in [n['Name'] for n in self.client.networks()]
self.client.remove_network(net_id)
assert net_name not in [n['Name'] for n in self.client.networks()]
@requires_api_version('1.21')
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
[container['Id']]
)
with pytest.raises(docker.errors.APIError):
self.client.connect_container_to_network(container, net_id)
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(container, net_id)
@requires_api_version('1.22')
def test_connect_and_force_disconnect_container(self):
net_name, net_id = self.create_network()
container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
[container['Id']]
)
self.client.disconnect_container_from_network(container, net_id, True)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
with pytest.raises(docker.errors.APIError):
self.client.disconnect_container_from_network(
container, net_id, force=True
)
@requires_api_version('1.22')
def test_connect_with_aliases(self):
net_name, net_id = self.create_network()
container = self.client.create_container(BUSYBOX, 'top')
self.tmp_containers.append(container)
self.client.start(container)
self.client.connect_container_to_network(
container, net_id, aliases=['foo', 'bar'])
container_data = self.client.inspect_container(container)
aliases = (
container_data['NetworkSettings']['Networks'][net_name]['Aliases']
)
assert 'foo' in aliases
assert 'bar' in aliases
@requires_api_version('1.21')
def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
[container['Id']])
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
@requires_api_version('1.22')
def test_create_with_aliases(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
image=BUSYBOX,
command='top',
host_config=self.client.create_host_config(
network_mode=net_name,
),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
aliases=['foo', 'bar'],
),
}),
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
aliases = (
container_data['NetworkSettings']['Networks'][net_name]['Aliases']
)
assert 'foo' in aliases
assert 'bar' in aliases
@requires_api_version('1.22')
def test_create_with_ipv4_address(self):
net_name, net_id = self.create_network(
ipam=IPAMConfig(
driver='default',
pool_configs=[IPAMPool(subnet="132.124.0.0/16")],
),
)
container = self.client.create_container(
image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
ipv4_address='132.124.0.23'
)
})
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
self.assertEqual(
container_data[
'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
'IPv4Address'
],
'132.124.0.23'
)
@requires_api_version('1.22')
def test_create_with_ipv6_address(self):
net_name, net_id = self.create_network(
ipam=IPAMConfig(
driver='default',
pool_configs=[IPAMPool(subnet="2001:389::1/64")],
),
)
container = self.client.create_container(
image=BUSYBOX, command='top',
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
ipv6_address='2001:389::f00d'
)
})
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
self.assertEqual(
container_data[
'NetworkSettings']['Networks'][net_name]['IPAMConfig'][
'IPv6Address'
],
'2001:389::f00d'
)
@requires_api_version('1.24')
def test_create_with_linklocal_ips(self):
container = self.client.create_container(
BUSYBOX, 'top',
networking_config=self.client.create_networking_config(
{
'bridge': self.client.create_endpoint_config(
link_local_ips=['169.254.8.8']
)
}
),
host_config=self.client.create_host_config(network_mode='bridge')
)
self.tmp_containers.append(container)
self.client.start(container)
container_data = self.client.inspect_container(container)
net_cfg = container_data['NetworkSettings']['Networks']['bridge']
assert 'IPAMConfig' in net_cfg
assert 'LinkLocalIPs' in net_cfg['IPAMConfig']
assert net_cfg['IPAMConfig']['LinkLocalIPs'] == ['169.254.8.8']
@requires_api_version('1.22')
def test_create_with_links(self):
net_name, net_id = self.create_network()
container = self.create_and_start(
host_config=self.client.create_host_config(network_mode=net_name),
networking_config=self.client.create_networking_config({
net_name: self.client.create_endpoint_config(
links=[('docker-py-test-upstream', 'bar')],
),
}),
)
container_data = self.client.inspect_container(container)
self.assertEqual(
container_data['NetworkSettings']['Networks'][net_name]['Links'],
['docker-py-test-upstream:bar'])
self.create_and_start(
name='docker-py-test-upstream',
host_config=self.client.create_host_config(network_mode=net_name),
)
self.execute(container, ['nslookup', 'bar'])
@requires_api_version('1.21')
def test_create_check_duplicate(self):
net_name, net_id = self.create_network()
with self.assertRaises(docker.errors.APIError):
self.client.create_network(net_name, check_duplicate=True)
net_id = self.client.create_network(net_name, check_duplicate=False)
self.tmp_networks.append(net_id['Id'])
@requires_api_version('1.22')
def test_connect_with_links(self):
net_name, net_id = self.create_network()
container = self.create_and_start(
host_config=self.client.create_host_config(network_mode=net_name))
self.client.disconnect_container_from_network(container, net_name)
self.client.connect_container_to_network(
container, net_name,
links=[('docker-py-test-upstream', 'bar')])
container_data = self.client.inspect_container(container)
self.assertEqual(
container_data['NetworkSettings']['Networks'][net_name]['Links'],
['docker-py-test-upstream:bar'])
self.create_and_start(
name='docker-py-test-upstream',
host_config=self.client.create_host_config(network_mode=net_name),
)
self.execute(container, ['nslookup', 'bar'])
@requires_api_version('1.22')
def test_connect_with_ipv4_address(self):
net_name, net_id = self.create_network(
ipam=IPAMConfig(
driver='default',
pool_configs=[
IPAMPool(
subnet="172.28.0.0/16", iprange="172.28.5.0/24",
gateway="172.28.5.254"
)
]
)
)
container = self.create_and_start(
host_config=self.client.create_host_config(network_mode=net_name))
self.client.disconnect_container_from_network(container, net_name)
self.client.connect_container_to_network(
container, net_name, ipv4_address='172.28.5.24'
)
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
self.assertEqual(
net_data['IPAMConfig']['IPv4Address'], '172.28.5.24'
)
@requires_api_version('1.22')
def test_connect_with_ipv6_address(self):
net_name, net_id = self.create_network(
ipam=IPAMConfig(
driver='default',
pool_configs=[
IPAMPool(
subnet="2001:389::1/64", iprange="2001:389::0/96",
gateway="2001:389::ffff"
)
]
)
)
container = self.create_and_start(
host_config=self.client.create_host_config(network_mode=net_name))
self.client.disconnect_container_from_network(container, net_name)
self.client.connect_container_to_network(
container, net_name, ipv6_address='2001:389::f00d'
)
container_data = self.client.inspect_container(container)
net_data = container_data['NetworkSettings']['Networks'][net_name]
self.assertEqual(
net_data['IPAMConfig']['IPv6Address'], '2001:389::f00d'
)
@requires_api_version('1.23')
def test_create_internal_networks(self):
_, net_id = self.create_network(internal=True)
net = self.client.inspect_network(net_id)
assert net['Internal'] is True
@requires_api_version('1.23')
def test_create_network_with_labels(self):
_, net_id = self.create_network(labels={
'com.docker.py.test': 'label'
})
net = self.client.inspect_network(net_id)
assert 'Labels' in net
assert len(net['Labels']) == 1
assert net['Labels'] == {
'com.docker.py.test': 'label'
}
@requires_api_version('1.23')
def test_create_network_with_labels_wrong_type(self):
with pytest.raises(TypeError):
self.create_network(labels=['com.docker.py.test=label', ])
@requires_api_version('1.23')
def test_create_network_ipv6_enabled(self):
_, net_id = self.create_network(
enable_ipv6=True, ipam=IPAMConfig(
driver='default',
pool_configs=[
IPAMPool(
subnet="2001:389::1/64", iprange="2001:389::0/96",
gateway="2001:389::ffff"
)
]
)
)
net = self.client.inspect_network(net_id)
assert net['EnableIPv6'] is True
@requires_api_version('1.25')
def test_create_network_attachable(self):
assert self.client.init_swarm('eth0')
_, net_id = self.create_network(driver='overlay', attachable=True)
net = self.client.inspect_network(net_id)
assert net['Attachable'] is True
@requires_api_version('1.29')
def test_create_network_ingress(self):
assert self.client.init_swarm('eth0')
self.client.remove_network('ingress')
_, net_id = self.create_network(driver='overlay', ingress=True)
net = self.client.inspect_network(net_id)
assert net['Ingress'] is True
@requires_api_version('1.25')
def test_prune_networks(self):
net_name, _ = self.create_network()
result = self.client.prune_networks()
assert net_name in result['NetworksDeleted']
docker-2.5.1/tests/integration/base.py 0000664 0001750 0001750 00000007016 13106703755 021106 0 ustar joffrey joffrey 0000000 0000000 import os
import shutil
import unittest
import docker
from docker.utils import kwargs_from_env
import six
from .. import helpers
BUSYBOX = 'busybox:buildroot-2014.02'
TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
class BaseIntegrationTest(unittest.TestCase):
"""
A base class for integration test cases. It cleans up the Docker server
after itself.
"""
def setUp(self):
if six.PY2:
self.assertRegex = self.assertRegexpMatches
self.assertCountEqual = self.assertItemsEqual
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
self.tmp_networks = []
self.tmp_plugins = []
self.tmp_secrets = []
def tearDown(self):
client = docker.from_env(version=TEST_API_VERSION)
for img in self.tmp_imgs:
try:
client.api.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
client.api.remove_container(container, force=True)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
try:
client.api.remove_network(network)
except docker.errors.APIError:
pass
for volume in self.tmp_volumes:
try:
client.api.remove_volume(volume)
except docker.errors.APIError:
pass
for secret in self.tmp_secrets:
try:
client.api.remove_secret(secret)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
class BaseAPIIntegrationTest(BaseIntegrationTest):
"""
A test case for `APIClient` integration tests. It sets up an `APIClient`
as `self.client`.
"""
def setUp(self):
super(BaseAPIIntegrationTest, self).setUp()
self.client = docker.APIClient(
version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
)
def tearDown(self):
super(BaseAPIIntegrationTest, self).tearDown()
self.client.close()
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)
if exitcode != 0:
output = self.client.logs(container)
raise Exception(
"Container exited with code {}:\n{}"
.format(exitcode, output))
return container
def create_and_start(self, image=BUSYBOX, command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
return container
def execute(self, container, cmd, exit_code=0, **kwargs):
exc = self.client.exec_create(container, cmd, **kwargs)
output = self.client.exec_start(exc)
actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
" ".join(cmd), exit_code, actual_exit_code, output)
assert actual_exit_code == exit_code, msg
def init_swarm(self, **kwargs):
return self.client.init_swarm(
'eth0', listen_addr=helpers.swarm_listen_addr(), **kwargs
)
docker-2.5.1/tests/integration/api_service_test.py 0000664 0001750 0001750 00000050712 13145377337 023533 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import random
import time
import docker
import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
)
from .base import BaseAPIIntegrationTest, BUSYBOX
class ServiceTest(BaseAPIIntegrationTest):
def setUp(self):
super(ServiceTest, self).setUp()
force_leave_swarm(self.client)
self.init_swarm()
def tearDown(self):
super(ServiceTest, self).tearDown()
for service in self.client.services(filters={'name': 'dockerpytest_'}):
try:
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
force_leave_swarm(self.client)
def get_service_name(self):
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
def get_service_container(self, service_name, attempts=20, interval=0.5,
include_stopped=False):
# There is some delay between the service's creation and the creation
# of the service's containers. This method deals with the uncertainty
# when trying to retrieve the container associated with a service.
while True:
containers = self.client.containers(
filters={'name': [service_name]}, quiet=True,
all=include_stopped
)
if len(containers) > 0:
return containers[0]
attempts -= 1
if attempts <= 0:
return None
time.sleep(interval)
def create_simple_service(self, name=None):
if name:
name = 'dockerpytest_{0}'.format(name)
else:
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
return name, self.client.create_service(task_tmpl, name=name)
@requires_api_version('1.24')
def test_list_services(self):
services = self.client.services()
assert isinstance(services, list)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
self.create_simple_service()
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 1
assert 'dockerpytest_' in test_services[0]['Spec']['Name']
def test_inspect_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_id)
assert 'ID' in svc_info
assert svc_info['ID'] == svc_id['ID']
def test_inspect_service_by_name(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_name)
assert 'ID' in svc_info
assert svc_info['ID'] == svc_id['ID']
def test_remove_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
assert self.client.remove_service(svc_id)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
def test_remove_service_by_name(self):
svc_name, svc_id = self.create_simple_service()
assert self.client.remove_service(svc_name)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
def test_create_service_simple(self):
name, svc_id = self.create_simple_service()
assert self.client.inspect_service(svc_id)
services = self.client.services(filters={'name': name})
assert len(services) == 1
assert services[0]['ID'] == svc_id['ID']
@requires_api_version('1.25')
@requires_experimental(until='1.29')
def test_service_logs(self):
name, svc_id = self.create_simple_service()
assert self.get_service_container(name, include_stopped=True)
attempts = 20
while True:
if attempts == 0:
self.fail('No service logs produced by endpoint')
return
logs = self.client.service_logs(svc_id, stdout=True, is_tty=False)
try:
log_line = next(logs)
except StopIteration:
attempts -= 1
time.sleep(0.1)
continue
else:
break
if six.PY3:
log_line = log_line.decode('utf-8')
assert 'hello\n' in log_line
def test_create_service_custom_log_driver(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
log_cfg = docker.types.DriverConfig('none')
task_tmpl = docker.types.TaskTemplate(
container_spec, log_driver=log_cfg
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
res_template = svc_info['Spec']['TaskTemplate']
assert 'LogDriver' in res_template
assert 'Name' in res_template['LogDriver']
assert res_template['LogDriver']['Name'] == 'none'
def test_create_service_with_volume_mount(self):
vol_name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['ls'],
mounts=[
docker.types.Mount(target='/test', source=vol_name)
]
)
self.tmp_volumes.append(vol_name)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'Mounts' in cspec
assert len(cspec['Mounts']) == 1
mount = cspec['Mounts'][0]
assert mount['Target'] == '/test'
assert mount['Source'] == vol_name
assert mount['Type'] == 'volume'
def test_create_service_with_resources_constraints(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
resources = docker.types.Resources(
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
)
task_tmpl = docker.types.TaskTemplate(
container_spec, resources=resources
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
res_template = svc_info['Spec']['TaskTemplate']
assert 'Resources' in res_template
assert res_template['Resources']['Limits'] == resources['Limits']
assert res_template['Resources']['Reservations'] == resources[
'Reservations'
]
def test_create_service_with_update_config(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Parallelism'] == uc['Parallelism']
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
@requires_api_version('1.25')
def test_create_service_with_update_config_monitor(self):
container_spec = docker.types.ContainerSpec('busybox', ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
monitor=300000000, max_failure_ratio=0.4
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Monitor'] == uc['Monitor']
assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
def test_create_service_with_restart_policy(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
policy = docker.types.RestartPolicy(
docker.types.RestartPolicy.condition_types.ANY,
delay=5, max_attempts=5
)
task_tmpl = docker.types.TaskTemplate(
container_spec, restart_policy=policy
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
def test_create_service_with_custom_networks(self):
net1 = self.client.create_network(
'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net1['Id'])
net2 = self.client.create_network(
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, networks=[
'dockerpytest_1', {'Target': 'dockerpytest_2'}
]
)
svc_info = self.client.inspect_service(svc_id)
assert 'Networks' in svc_info['Spec']
assert svc_info['Spec']['Networks'] == [
{'Target': net1['Id']}, {'Target': net2['Id']}
]
def test_create_service_with_placement(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=['node.id=={}'.format(node_id)]
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
{'Constraints': ['node.id=={}'.format(node_id)]})
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(
constraints=['node.id=={}'.format(node_id)]
)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
@requires_api_version('1.30')
def test_create_service_with_placement_platform(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
@requires_api_version('1.27')
def test_create_service_with_placement_preferences(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(preferences=[
{'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
def test_create_service_with_endpoint_spec(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
12357: (1990, 'udp'),
12562: (678,),
53243: 8080,
})
svc_id = self.client.create_service(
task_tmpl, name=name, endpoint_spec=endpoint_spec
)
svc_info = self.client.inspect_service(svc_id)
print(svc_info)
ports = svc_info['Spec']['EndpointSpec']['Ports']
for port in ports:
if port['PublishedPort'] == 12562:
assert port['TargetPort'] == 678
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 53243:
assert port['TargetPort'] == 8080
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 12357:
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
assert len(ports) == 3
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'Env' in con_spec
assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
@requires_api_version('1.25')
def test_create_service_with_tty(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['true'], tty=True
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'TTY' in con_spec
assert con_spec['TTY'] is True
@requires_api_version('1.25')
def test_create_service_with_tty_dict(self):
container_spec = {
'Image': BUSYBOX,
'Command': ['true'],
'TTY': True
}
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'TTY' in con_spec
assert con_spec['TTY'] is True
def test_create_service_global_mode(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, mode='global'
)
svc_info = self.client.inspect_service(svc_id)
assert 'Mode' in svc_info['Spec']
assert 'Global' in svc_info['Spec']['Mode']
def test_create_service_replicated_mode(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name,
mode=docker.types.ServiceMode('replicated', 5)
)
svc_info = self.client.inspect_service(svc_id)
assert 'Mode' in svc_info['Spec']
assert 'Replicated' in svc_info['Spec']['Mode']
assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
@requires_api_version('1.25')
def test_update_service_force_update(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
version_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
self.client.update_service(name, version_index, task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
@requires_api_version('1.25')
def test_create_service_with_secret(self):
secret_name = 'favorite_touhou'
secret_data = b'phantasmagoria of flower view'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], secrets=[secret_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
assert secrets[0] == secret_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
)
assert self.client.exec_start(exec_id) == secret_data
@requires_api_version('1.25')
def test_create_service_with_unicode_secret(self):
secret_name = 'favorite_touhou'
secret_data = u'東方花映塚'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], secrets=[secret_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
assert secrets[0] == secret_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
)
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
assert container_secret == secret_data
docker-2.5.1/tests/integration/__init__.py 0000664 0001750 0001750 00000000000 13021666666 021722 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/integration/api_volume_test.py 0000664 0001750 0001750 00000004524 13106703741 023367 0 ustar joffrey joffrey 0000000 0000000 import docker
import pytest
from ..helpers import requires_api_version
from .base import BaseAPIIntegrationTest
@requires_api_version('1.21')
class TestVolumes(BaseAPIIntegrationTest):
def test_create_volume(self):
name = 'perfectcherryblossom'
self.tmp_volumes.append(name)
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
def test_create_volume_invalid_driver(self):
driver_name = 'invalid.driver'
with pytest.raises(docker.errors.NotFound):
self.client.create_volume('perfectcherryblossom', driver_name)
def test_list_volumes(self):
name = 'imperishablenight'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.volumes()
self.assertIn('Volumes', result)
volumes = result['Volumes']
self.assertIn(volume_info, volumes)
def test_inspect_volume(self):
name = 'embodimentofscarletdevil'
self.tmp_volumes.append(name)
volume_info = self.client.create_volume(name)
result = self.client.inspect_volume(name)
self.assertEqual(volume_info, result)
def test_inspect_nonexistent_volume(self):
name = 'embodimentofscarletdevil'
with pytest.raises(docker.errors.NotFound):
self.client.inspect_volume(name)
def test_remove_volume(self):
name = 'shootthebullet'
self.tmp_volumes.append(name)
self.client.create_volume(name)
self.client.remove_volume(name)
@requires_api_version('1.25')
def test_force_remove_volume(self):
name = 'shootthebullet'
self.tmp_volumes.append(name)
self.client.create_volume(name)
self.client.remove_volume(name, force=True)
@requires_api_version('1.25')
def test_prune_volumes(self):
name = 'hopelessmasquerade'
self.client.create_volume(name)
self.tmp_volumes.append(name)
result = self.client.prune_volumes()
assert name in result['VolumesDeleted']
def test_remove_nonexistent_volume(self):
name = 'shootthebullet'
with pytest.raises(docker.errors.NotFound):
self.client.remove_volume(name)
docker-2.5.1/tests/integration/api_exec_test.py 0000664 0001750 0001750 00000012022 13106703741 022774 0 ustar joffrey joffrey 0000000 0000000 from docker.utils.socket import next_frame_size
from docker.utils.socket import read_exactly
from .base import BaseAPIIntegrationTest, BUSYBOX
from ..helpers import requires_api_version
class ExecTest(BaseAPIIntegrationTest):
def test_execute_command(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello\n')
def test_exec_command_string(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello world\n')
def test_exec_command_as_user(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'default\n')
def test_exec_command_as_root(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'root\n')
def test_exec_command_streaming(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
self.assertIn('Id', exec_id)
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
self.assertEqual(res, b'hello\nworld\n')
def test_exec_start_socket(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
line = 'yay, interactive exec!'
# `echo` appends CRLF, `printf` doesn't
exec_id = self.client.exec_create(
container_id, ['printf', line], tty=True)
self.assertIn('Id', exec_id)
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
next_size = next_frame_size(socket)
self.assertEqual(next_size, len(line))
data = read_exactly(socket, next_size)
self.assertEqual(data.decode('utf-8'), line)
def test_exec_start_detached(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
exec_id = self.client.exec_create(
container_id, ['printf', "asdqwe"])
self.assertIn('Id', exec_id)
response = self.client.exec_start(exec_id, detach=True)
self.assertEqual(response, "")
def test_exec_inspect(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
self.assertIn('Id', exec_id)
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
@requires_api_version('1.25')
def test_exec_command_with_env(self):
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'env', environment=["X=Y"])
assert 'Id' in res
exec_log = self.client.exec_start(res)
assert b'X=Y\n' in exec_log
docker-2.5.1/tests/integration/models_nodes_test.py 0000664 0001750 0001750 00000002167 13051443744 023706 0 ustar joffrey joffrey 0000000 0000000 import unittest
import docker
from .. import helpers
from .base import TEST_API_VERSION
class NodesTest(unittest.TestCase):
def setUp(self):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def tearDown(self):
helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION))
def test_list_get_update(self):
client = docker.from_env(version=TEST_API_VERSION)
client.swarm.init('eth0', listen_addr=helpers.swarm_listen_addr())
nodes = client.nodes.list()
assert len(nodes) == 1
assert nodes[0].attrs['Spec']['Role'] == 'manager'
node = client.nodes.get(nodes[0].id)
assert node.id == nodes[0].id
assert node.attrs['Spec']['Role'] == 'manager'
assert node.version > 0
node = client.nodes.list()[0]
assert not node.attrs['Spec'].get('Labels')
node.update({
'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
})
node.reload()
assert node.attrs['Spec']['Labels'] == {'foo': 'bar'}
docker-2.5.1/tests/helpers.py 0000664 0001750 0001750 00000005213 13106703741 017303 0 ustar joffrey joffrey 0000000 0000000 import functools
import os
import os.path
import random
import tarfile
import tempfile
import time
import docker
import pytest
def make_tree(dirs, files):
base = tempfile.mkdtemp()
for path in dirs:
os.makedirs(os.path.join(base, path))
for path in files:
with open(os.path.join(base, path), 'w') as f:
f.write("content")
return base
def simple_tar(path):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
abs_path = os.path.abspath(path)
t.add(abs_path, arcname=os.path.basename(path), recursive=False)
t.close()
f.seek(0)
return f
def untar_file(tardata, filename):
with tarfile.open(mode='r', fileobj=tardata) as t:
f = t.extractfile(filename)
result = f.read()
f.close()
return result
def requires_api_version(version):
test_version = os.environ.get(
'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
)
return pytest.mark.skipif(
docker.utils.version_lt(test_version, version),
reason="API version is too low (< {0})".format(version)
)
def requires_experimental(until=None):
test_version = os.environ.get(
'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION
)
def req_exp(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if not self.client.info()['ExperimentalBuild']:
pytest.skip('Feature requires Docker Engine experimental mode')
return f(self, *args, **kwargs)
if until and docker.utils.version_gte(test_version, until):
return f
return wrapped
return req_exp
def wait_on_condition(condition, delay=0.1, timeout=40):
start_time = time.time()
while not condition():
if time.time() - start_time > timeout:
raise AssertionError("Timeout: %s" % condition)
time.sleep(delay)
def random_name():
return u'dockerpytest_{0:x}'.format(random.getrandbits(64))
def force_leave_swarm(client):
"""Actually force leave a Swarm. There seems to be a bug in Swarm that
occasionally throws "context deadline exceeded" errors when leaving."""
while True:
try:
if isinstance(client, docker.DockerClient):
return client.swarm.leave(force=True)
return client.leave_swarm(force=True) # elif APIClient
except docker.errors.APIError as e:
if e.explanation == "context deadline exceeded":
continue
else:
return
def swarm_listen_addr():
return '0.0.0.0:{0}'.format(random.randrange(10000, 25000))
docker-2.5.1/tests/unit/ 0000775 0001750 0001750 00000000000 13147142650 016246 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/utils_json_stream_test.py 0000664 0001750 0001750 00000003316 13035557341 023431 0 ustar joffrey joffrey 0000000 0000000 # encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from docker.utils.json_stream import json_splitter, stream_as_text, json_stream
class TestJsonSplitter(object):
def test_json_splitter_no_object(self):
data = '{"foo": "bar'
assert json_splitter(data) is None
def test_json_splitter_with_object(self):
data = '{"foo": "bar"}\n \n{"next": "obj"}'
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
def test_json_splitter_leading_whitespace(self):
data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
class TestStreamAsText(object):
def test_stream_with_non_utf_unicode_character(self):
stream = [b'\xed\xf3\xf3']
output, = stream_as_text(stream)
assert output == '���'
def test_stream_with_utf_character(self):
stream = ['ěĝ'.encode('utf-8')]
output, = stream_as_text(stream)
assert output == 'ěĝ'
class TestJsonStream(object):
def test_with_falsy_entries(self):
stream = [
'{"one": "two"}\n{}\n',
"[1, 2, 3]\n[]\n",
]
output = list(json_stream(stream))
assert output == [
{'one': 'two'},
{},
[1, 2, 3],
[],
]
def test_with_leading_whitespace(self):
stream = [
'\n \r\n {"one": "two"}{"x": 1}',
' {"three": "four"}\t\t{"x": 2}'
]
output = list(json_stream(stream))
assert output == [
{'one': 'two'},
{'x': 1},
{'three': 'four'},
{'x': 2}
]
docker-2.5.1/tests/unit/fake_stat.py 0000664 0001750 0001750 00000007255 13021666666 020603 0 ustar joffrey joffrey 0000000 0000000 OBJ = {
"read": "2015-02-11T19:20:46.667237763+02:00",
"network": {
"rx_bytes": 567224,
"rx_packets": 3773,
"rx_errors": 0,
"rx_dropped": 0,
"tx_bytes": 1176,
"tx_packets": 13,
"tx_errors": 0,
"tx_dropped": 0
},
"cpu_stats": {
"cpu_usage": {
"total_usage": 157260874053,
"percpu_usage": [
52196306950,
24118413549,
53292684398,
27653469156
],
"usage_in_kernelmode": 37140000000,
"usage_in_usermode": 62140000000
},
"system_cpu_usage": 3.0881377e+14,
"throttling_data": {
"periods": 0,
"throttled_periods": 0,
"throttled_time": 0
}
},
"memory_stats": {
"usage": 179314688,
"max_usage": 258166784,
"stats": {
"active_anon": 90804224,
"active_file": 2195456,
"cache": 3096576,
"hierarchical_memory_limit": 1.844674407371e+19,
"inactive_anon": 85516288,
"inactive_file": 798720,
"mapped_file": 2646016,
"pgfault": 101034,
"pgmajfault": 1207,
"pgpgin": 115814,
"pgpgout": 75613,
"rss": 176218112,
"rss_huge": 12582912,
"total_active_anon": 90804224,
"total_active_file": 2195456,
"total_cache": 3096576,
"total_inactive_anon": 85516288,
"total_inactive_file": 798720,
"total_mapped_file": 2646016,
"total_pgfault": 101034,
"total_pgmajfault": 1207,
"total_pgpgin": 115814,
"total_pgpgout": 75613,
"total_rss": 176218112,
"total_rss_huge": 12582912,
"total_unevictable": 0,
"total_writeback": 0,
"unevictable": 0,
"writeback": 0
},
"failcnt": 0,
"limit": 8039038976
},
"blkio_stats": {
"io_service_bytes_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 72843264
}, {
"major": 8,
"minor": 0,
"op": "Write",
"value": 4096
}, {
"major": 8,
"minor": 0,
"op": "Sync",
"value": 4096
}, {
"major": 8,
"minor": 0,
"op": "Async",
"value": 72843264
}, {
"major": 8,
"minor": 0,
"op": "Total",
"value": 72847360
}
],
"io_serviced_recursive": [
{
"major": 8,
"minor": 0,
"op": "Read",
"value": 10581
}, {
"major": 8,
"minor": 0,
"op": "Write",
"value": 1
}, {
"major": 8,
"minor": 0,
"op": "Sync",
"value": 1
}, {
"major": 8,
"minor": 0,
"op": "Async",
"value": 10581
}, {
"major": 8,
"minor": 0,
"op": "Total",
"value": 10582
}
],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": []
}
}
docker-2.5.1/tests/unit/swarm_test.py 0000664 0001750 0001750 00000001607 13063113557 021015 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import json
from . import fake_api
from ..helpers import requires_api_version
from .api_test import BaseAPIClientTest, url_prefix, fake_request
class SwarmTest(BaseAPIClientTest):
@requires_api_version('1.24')
def test_node_update(self):
node_spec = {
'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
self.client.update_node(
node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'nodes/24ifsmvkjbyhk/update?version=1'
)
self.assertEqual(
json.loads(args[1]['data']), node_spec
)
self.assertEqual(
args[1]['headers']['Content-Type'], 'application/json'
)
docker-2.5.1/tests/unit/client_test.py 0000664 0001750 0001750 00000007407 13124577310 021145 0 ustar joffrey joffrey 0000000 0000000 import datetime
import docker
from docker.utils import kwargs_from_env
from docker.constants import (
DEFAULT_DOCKER_API_VERSION, DEFAULT_TIMEOUT_SECONDS
)
import os
import unittest
from . import fake_api
try:
from unittest import mock
except ImportError:
import mock
TEST_CERT_DIR = os.path.join(os.path.dirname(__file__), 'testdata/certs')
class ClientTest(unittest.TestCase):
@mock.patch('docker.api.APIClient.events')
def test_events(self, mock_func):
since = datetime.datetime(2016, 1, 1, 0, 0)
mock_func.return_value = fake_api.get_fake_events()[1]
client = docker.from_env()
assert client.events(since=since) == mock_func.return_value
mock_func.assert_called_with(since=since)
@mock.patch('docker.api.APIClient.info')
def test_info(self, mock_func):
mock_func.return_value = fake_api.get_fake_info()[1]
client = docker.from_env()
assert client.info() == mock_func.return_value
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.ping')
def test_ping(self, mock_func):
mock_func.return_value = True
client = docker.from_env()
assert client.ping() is True
mock_func.assert_called_with()
@mock.patch('docker.api.APIClient.version')
def test_version(self, mock_func):
mock_func.return_value = fake_api.get_fake_version()[1]
client = docker.from_env()
assert client.version() == mock_func.return_value
mock_func.assert_called_with()
def test_call_api_client_method(self):
client = docker.from_env()
with self.assertRaises(AttributeError) as cm:
client.create_container()
s = str(cm.exception)
assert "'DockerClient' object has no attribute 'create_container'" in s
assert "this method is now on the object APIClient" in s
with self.assertRaises(AttributeError) as cm:
client.abcdef()
s = str(cm.exception)
assert "'DockerClient' object has no attribute 'abcdef'" in s
assert "this method is now on the object APIClient" not in s
def test_call_containers(self):
client = docker.DockerClient(**kwargs_from_env())
with self.assertRaises(TypeError) as cm:
client.containers()
s = str(cm.exception)
assert "'ContainerCollection' object is not callable" in s
assert "docker.APIClient" in s
class FromEnvTest(unittest.TestCase):
def setUp(self):
self.os_environ = os.environ.copy()
def tearDown(self):
os.environ = self.os_environ
def test_from_env(self):
"""Test that environment variables are passed through to
utils.kwargs_from_env(). KwargsFromEnvTest tests that environment
variables are parsed correctly."""
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
client = docker.from_env()
self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
def test_from_env_with_version(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
client = docker.from_env(version='2.32')
self.assertEqual(client.api.base_url, "https://192.168.59.103:2376")
self.assertEqual(client.api._version, '2.32')
def test_from_env_without_version_uses_default(self):
client = docker.from_env()
self.assertEqual(client.api._version, DEFAULT_DOCKER_API_VERSION)
def test_from_env_without_timeout_uses_default(self):
client = docker.from_env()
self.assertEqual(client.api.timeout, DEFAULT_TIMEOUT_SECONDS)
docker-2.5.1/tests/unit/models_services_test.py 0000664 0001750 0001750 00000003652 13064275451 023057 0 ustar joffrey joffrey 0000000 0000000 import unittest
from docker.models.services import _get_create_service_kwargs
class CreateServiceKwargsTest(unittest.TestCase):
def test_get_create_service_kwargs(self):
kwargs = _get_create_service_kwargs('test', {
'image': 'foo',
'command': 'true',
'name': 'somename',
'labels': {'key': 'value'},
'hostname': 'test_host',
'mode': 'global',
'update_config': {'update': 'config'},
'networks': ['somenet'],
'endpoint_spec': {'blah': 'blah'},
'container_labels': {'containerkey': 'containervalue'},
'resources': {'foo': 'bar'},
'restart_policy': {'restart': 'policy'},
'log_driver': 'logdriver',
'log_driver_options': {'foo': 'bar'},
'args': ['some', 'args'],
'env': {'FOO': 'bar'},
'workdir': '/',
'user': 'bob',
'mounts': [{'some': 'mounts'}],
'stop_grace_period': 5,
'constraints': ['foo=bar'],
})
task_template = kwargs.pop('task_template')
assert kwargs == {
'name': 'somename',
'labels': {'key': 'value'},
'mode': 'global',
'update_config': {'update': 'config'},
'networks': ['somenet'],
'endpoint_spec': {'blah': 'blah'},
}
assert set(task_template.keys()) == set([
'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement',
'LogDriver'
])
assert task_template['Placement'] == {'Constraints': ['foo=bar']}
assert task_template['LogDriver'] == {
'Name': 'logdriver',
'Options': {'foo': 'bar'}
}
assert set(task_template['ContainerSpec'].keys()) == set([
'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User',
'Labels', 'Mounts', 'StopGracePeriod'
])
docker-2.5.1/tests/unit/testdata/ 0000775 0001750 0001750 00000000000 13147142650 020057 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/testdata/certs/ 0000775 0001750 0001750 00000000000 13147142650 021177 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/testdata/certs/cert.pem 0000664 0001750 0001750 00000000000 13021666666 022636 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/testdata/certs/key.pem 0000664 0001750 0001750 00000000000 13021666666 022471 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/testdata/certs/ca.pem 0000664 0001750 0001750 00000000000 13021666666 022264 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/api_build_test.py 0000664 0001750 0001750 00000012141 13106703741 021605 0 ustar joffrey joffrey 0000000 0000000 import gzip
import io
import docker
from docker import auth
from .api_test import BaseAPIClientTest, fake_request, url_prefix
class BuildTest(BaseAPIClientTest):
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script)
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, pull=True)
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, stream=True)
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
self.client.build(fileobj=context, custom_context=True)
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
def test_build_remote_with_registry_auth(self):
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
expected_params = {'t': None, 'q': False, 'dockerfile': None,
'rm': False, 'nocache': False, 'pull': False,
'forcerm': False,
'remote': 'https://github.com/docker-library/mongo'}
expected_headers = {
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
self.client.build(path='https://github.com/docker-library/mongo')
fake_request.assert_called_with(
'POST',
url_prefix + 'build',
stream=True,
data=None,
headers=expected_headers,
params=expected_params,
timeout=None
)
def test_build_container_with_named_dockerfile(self):
self.client.build('.', dockerfile='nameddockerfile')
def test_build_container_with_container_limits(self):
self.client.build('.', container_limits={
'memory': 1024 * 1024,
'cpusetcpus': 1,
'cpushares': 1000,
'memswap': 1024 * 1024 * 8
})
def test_build_container_invalid_container_limits(self):
self.assertRaises(
docker.errors.DockerException,
lambda: self.client.build('.', container_limits={
'foo': 'bar'
})
)
def test_set_auth_headers_with_empty_dict_and_auth_configs(self):
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
headers = {}
expected_headers = {
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
self.client._set_auth_headers(headers)
self.assertEqual(headers, expected_headers)
def test_set_auth_headers_with_dict_and_auth_configs(self):
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
headers = {'foo': 'bar'}
expected_headers = {
'foo': 'bar',
'X-Registry-Config': auth.encode_header(self.client._auth_configs)}
self.client._set_auth_headers(headers)
self.assertEqual(headers, expected_headers)
def test_set_auth_headers_with_dict_and_no_auth_configs(self):
headers = {'foo': 'bar'}
expected_headers = {
'foo': 'bar'
}
self.client._set_auth_headers(headers)
self.assertEqual(headers, expected_headers)
docker-2.5.1/tests/unit/dockertypes_test.py 0000664 0001750 0001750 00000040605 13106703752 022221 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import unittest
import warnings
import pytest
from docker.constants import DEFAULT_DOCKER_API_VERSION
from docker.errors import InvalidArgument, InvalidVersion
from docker.types import (
ContainerConfig, ContainerSpec, EndpointConfig, HostConfig, IPAMConfig,
IPAMPool, LogConfig, Mount, ServiceMode, Ulimit,
)
try:
from unittest import mock
except:
import mock
def create_host_config(*args, **kwargs):
return HostConfig(*args, **kwargs)
class HostConfigTest(unittest.TestCase):
def test_create_host_config_no_options(self):
config = create_host_config(version='1.19')
self.assertFalse('NetworkMode' in config)
def test_create_host_config_no_options_newer_api_version(self):
config = create_host_config(version='1.20')
self.assertEqual(config['NetworkMode'], 'default')
def test_create_host_config_invalid_cpu_cfs_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period='0')
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_quota=23.11)
with pytest.raises(TypeError):
create_host_config(version='1.20', cpu_period=1999.0)
def test_create_host_config_with_cpu_quota(self):
config = create_host_config(version='1.20', cpu_quota=1999)
self.assertEqual(config.get('CpuQuota'), 1999)
def test_create_host_config_with_cpu_period(self):
config = create_host_config(version='1.20', cpu_period=1999)
self.assertEqual(config.get('CpuPeriod'), 1999)
def test_create_host_config_with_blkio_constraints(self):
blkio_rate = [{"Path": "/dev/sda", "Rate": 1000}]
config = create_host_config(version='1.22',
blkio_weight=1999,
blkio_weight_device=blkio_rate,
device_read_bps=blkio_rate,
device_write_bps=blkio_rate,
device_read_iops=blkio_rate,
device_write_iops=blkio_rate)
self.assertEqual(config.get('BlkioWeight'), 1999)
self.assertTrue(config.get('BlkioWeightDevice') is blkio_rate)
self.assertTrue(config.get('BlkioDeviceReadBps') is blkio_rate)
self.assertTrue(config.get('BlkioDeviceWriteBps') is blkio_rate)
self.assertTrue(config.get('BlkioDeviceReadIOps') is blkio_rate)
self.assertTrue(config.get('BlkioDeviceWriteIOps') is blkio_rate)
self.assertEqual(blkio_rate[0]['Path'], "/dev/sda")
self.assertEqual(blkio_rate[0]['Rate'], 1000)
def test_create_host_config_with_shm_size(self):
config = create_host_config(version='1.22', shm_size=67108864)
self.assertEqual(config.get('ShmSize'), 67108864)
def test_create_host_config_with_shm_size_in_mb(self):
config = create_host_config(version='1.22', shm_size='64M')
self.assertEqual(config.get('ShmSize'), 67108864)
def test_create_host_config_with_oom_kill_disable(self):
config = create_host_config(version='1.20', oom_kill_disable=True)
self.assertEqual(config.get('OomKillDisable'), True)
self.assertRaises(
InvalidVersion, lambda: create_host_config(version='1.18.3',
oom_kill_disable=True))
def test_create_host_config_with_userns_mode(self):
config = create_host_config(version='1.23', userns_mode='host')
self.assertEqual(config.get('UsernsMode'), 'host')
self.assertRaises(
InvalidVersion, lambda: create_host_config(version='1.22',
userns_mode='host'))
self.assertRaises(
ValueError, lambda: create_host_config(version='1.23',
userns_mode='host12'))
def test_create_host_config_with_oom_score_adj(self):
config = create_host_config(version='1.22', oom_score_adj=100)
self.assertEqual(config.get('OomScoreAdj'), 100)
self.assertRaises(
InvalidVersion, lambda: create_host_config(version='1.21',
oom_score_adj=100))
self.assertRaises(
TypeError, lambda: create_host_config(version='1.22',
oom_score_adj='100'))
def test_create_host_config_with_dns_opt(self):
tested_opts = ['use-vc', 'no-tld-query']
config = create_host_config(version='1.21', dns_opt=tested_opts)
dns_opts = config.get('DnsOptions')
self.assertTrue('use-vc' in dns_opts)
self.assertTrue('no-tld-query' in dns_opts)
self.assertRaises(
InvalidVersion, lambda: create_host_config(version='1.20',
dns_opt=tested_opts))
def test_create_host_config_with_mem_reservation(self):
config = create_host_config(version='1.21', mem_reservation=67108864)
self.assertEqual(config.get('MemoryReservation'), 67108864)
self.assertRaises(
InvalidVersion, lambda: create_host_config(
version='1.20', mem_reservation=67108864))
def test_create_host_config_with_kernel_memory(self):
config = create_host_config(version='1.21', kernel_memory=67108864)
self.assertEqual(config.get('KernelMemory'), 67108864)
self.assertRaises(
InvalidVersion, lambda: create_host_config(
version='1.20', kernel_memory=67108864))
def test_create_host_config_with_pids_limit(self):
config = create_host_config(version='1.23', pids_limit=1024)
self.assertEqual(config.get('PidsLimit'), 1024)
with pytest.raises(InvalidVersion):
create_host_config(version='1.22', pids_limit=1024)
with pytest.raises(TypeError):
create_host_config(version='1.23', pids_limit='1024')
def test_create_host_config_with_isolation(self):
config = create_host_config(version='1.24', isolation='hyperv')
self.assertEqual(config.get('Isolation'), 'hyperv')
with pytest.raises(InvalidVersion):
create_host_config(version='1.23', isolation='hyperv')
with pytest.raises(TypeError):
create_host_config(
version='1.24', isolation={'isolation': 'hyperv'}
)
def test_create_host_config_pid_mode(self):
with pytest.raises(ValueError):
create_host_config(version='1.23', pid_mode='baccab125')
config = create_host_config(version='1.23', pid_mode='host')
assert config.get('PidMode') == 'host'
config = create_host_config(version='1.24', pid_mode='baccab125')
assert config.get('PidMode') == 'baccab125'
def test_create_host_config_invalid_mem_swappiness(self):
with pytest.raises(TypeError):
create_host_config(version='1.24', mem_swappiness='40')
def test_create_host_config_with_volume_driver(self):
with pytest.raises(InvalidVersion):
create_host_config(version='1.20', volume_driver='local')
config = create_host_config(version='1.21', volume_driver='local')
assert config.get('VolumeDriver') == 'local'
def test_create_host_config_invalid_cpu_count_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.25', cpu_count='1')
def test_create_host_config_with_cpu_count(self):
config = create_host_config(version='1.25', cpu_count=2)
self.assertEqual(config.get('CpuCount'), 2)
self.assertRaises(
InvalidVersion, lambda: create_host_config(
version='1.24', cpu_count=1))
def test_create_host_config_invalid_cpu_percent_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.25', cpu_percent='1')
def test_create_host_config_with_cpu_percent(self):
config = create_host_config(version='1.25', cpu_percent=15)
self.assertEqual(config.get('CpuPercent'), 15)
self.assertRaises(
InvalidVersion, lambda: create_host_config(
version='1.24', cpu_percent=10))
def test_create_host_config_invalid_nano_cpus_types(self):
with pytest.raises(TypeError):
create_host_config(version='1.25', nano_cpus='0')
def test_create_host_config_with_nano_cpus(self):
config = create_host_config(version='1.25', nano_cpus=1000)
self.assertEqual(config.get('NanoCpus'), 1000)
self.assertRaises(
InvalidVersion, lambda: create_host_config(
version='1.24', nano_cpus=1))
class ContainerConfigTest(unittest.TestCase):
def test_create_container_config_volume_driver_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
ContainerConfig(
version='1.21', image='scratch', command=None,
volume_driver='local'
)
assert len(w) == 1
assert 'The volume_driver option has been moved' in str(w[0].message)
class ContainerSpecTest(unittest.TestCase):
def test_parse_mounts(self):
spec = ContainerSpec(
image='scratch', mounts=[
'/local:/container',
'/local2:/container2:ro',
Mount(target='/target', source='/source')
]
)
assert 'Mounts' in spec
assert len(spec['Mounts']) == 3
for mount in spec['Mounts']:
assert isinstance(mount, Mount)
class UlimitTest(unittest.TestCase):
def test_create_host_config_dict_ulimit(self):
ulimit_dct = {'name': 'nofile', 'soft': 8096}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['soft'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_dict_ulimit_capitals(self):
ulimit_dct = {'Name': 'nofile', 'Soft': 8096, 'Hard': 8096 * 4}
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj.name, ulimit_dct['Name'])
self.assertEqual(ulimit_obj.soft, ulimit_dct['Soft'])
self.assertEqual(ulimit_obj.hard, ulimit_dct['Hard'])
self.assertEqual(ulimit_obj['Soft'], ulimit_obj.soft)
def test_create_host_config_obj_ulimit(self):
ulimit_dct = Ulimit(name='nofile', soft=8096)
config = create_host_config(
ulimits=[ulimit_dct], version=DEFAULT_DOCKER_API_VERSION
)
self.assertIn('Ulimits', config)
self.assertEqual(len(config['Ulimits']), 1)
ulimit_obj = config['Ulimits'][0]
self.assertTrue(isinstance(ulimit_obj, Ulimit))
self.assertEqual(ulimit_obj, ulimit_dct)
def test_ulimit_invalid_type(self):
self.assertRaises(ValueError, lambda: Ulimit(name=None))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', soft='123'))
self.assertRaises(ValueError, lambda: Ulimit(name='hello', hard='456'))
class LogConfigTest(unittest.TestCase):
def test_create_host_config_dict_logconfig(self):
dct = {'type': LogConfig.types.SYSLOG, 'config': {'key1': 'val1'}}
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=dct
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(dct['type'], config['LogConfig'].type)
def test_create_host_config_obj_logconfig(self):
obj = LogConfig(type=LogConfig.types.SYSLOG, config={'key1': 'val1'})
config = create_host_config(
version=DEFAULT_DOCKER_API_VERSION, log_config=obj
)
self.assertIn('LogConfig', config)
self.assertTrue(isinstance(config['LogConfig'], LogConfig))
self.assertEqual(obj, config['LogConfig'])
def test_logconfig_invalid_config_type(self):
with pytest.raises(ValueError):
LogConfig(type=LogConfig.types.JSON, config='helloworld')
class EndpointConfigTest(unittest.TestCase):
def test_create_endpoint_config_with_aliases(self):
config = EndpointConfig(version='1.22', aliases=['foo', 'bar'])
assert config == {'Aliases': ['foo', 'bar']}
with pytest.raises(InvalidVersion):
EndpointConfig(version='1.21', aliases=['foo', 'bar'])
class IPAMConfigTest(unittest.TestCase):
def test_create_ipam_config(self):
ipam_pool = IPAMPool(subnet='192.168.52.0/24',
gateway='192.168.52.254')
ipam_config = IPAMConfig(pool_configs=[ipam_pool])
self.assertEqual(ipam_config, {
'Driver': 'default',
'Config': [{
'Subnet': '192.168.52.0/24',
'Gateway': '192.168.52.254',
'AuxiliaryAddresses': None,
'IPRange': None,
}]
})
class ServiceModeTest(unittest.TestCase):
def test_replicated_simple(self):
mode = ServiceMode('replicated')
assert mode == {'replicated': {}}
assert mode.mode == 'replicated'
assert mode.replicas is None
def test_global_simple(self):
mode = ServiceMode('global')
assert mode == {'global': {}}
assert mode.mode == 'global'
assert mode.replicas is None
def test_global_replicas_error(self):
with pytest.raises(InvalidArgument):
ServiceMode('global', 21)
def test_replicated_replicas(self):
mode = ServiceMode('replicated', 21)
assert mode == {'replicated': {'Replicas': 21}}
assert mode.mode == 'replicated'
assert mode.replicas == 21
def test_replicated_replicas_0(self):
mode = ServiceMode('replicated', 0)
assert mode == {'replicated': {'Replicas': 0}}
assert mode.mode == 'replicated'
assert mode.replicas == 0
def test_invalid_mode(self):
with pytest.raises(InvalidArgument):
ServiceMode('foobar')
class MountTest(unittest.TestCase):
def test_parse_mount_string_ro(self):
mount = Mount.parse_mount_string("/foo/bar:/baz:ro")
assert mount['Source'] == "/foo/bar"
assert mount['Target'] == "/baz"
assert mount['ReadOnly'] is True
def test_parse_mount_string_rw(self):
mount = Mount.parse_mount_string("/foo/bar:/baz:rw")
assert mount['Source'] == "/foo/bar"
assert mount['Target'] == "/baz"
assert not mount['ReadOnly']
def test_parse_mount_string_short_form(self):
mount = Mount.parse_mount_string("/foo/bar:/baz")
assert mount['Source'] == "/foo/bar"
assert mount['Target'] == "/baz"
assert not mount['ReadOnly']
def test_parse_mount_string_no_source(self):
mount = Mount.parse_mount_string("foo/bar")
assert mount['Source'] is None
assert mount['Target'] == "foo/bar"
assert not mount['ReadOnly']
def test_parse_mount_string_invalid(self):
with pytest.raises(InvalidArgument):
Mount.parse_mount_string("foo:bar:baz:rw")
def test_parse_mount_named_volume(self):
mount = Mount.parse_mount_string("foobar:/baz")
assert mount['Source'] == 'foobar'
assert mount['Target'] == '/baz'
assert mount['Type'] == 'volume'
def test_parse_mount_bind(self):
mount = Mount.parse_mount_string('/foo/bar:/baz')
assert mount['Source'] == "/foo/bar"
assert mount['Target'] == "/baz"
assert mount['Type'] == 'bind'
@pytest.mark.xfail
def test_parse_mount_bind_windows(self):
with mock.patch('docker.types.services.IS_WINDOWS_PLATFORM', True):
mount = Mount.parse_mount_string('C:/foo/bar:/baz')
assert mount['Source'] == "C:/foo/bar"
assert mount['Target'] == "/baz"
assert mount['Type'] == 'bind'
docker-2.5.1/tests/unit/fake_api.py 0000664 0001750 0001750 00000041137 13145377337 020377 0 ustar joffrey joffrey 0000000 0000000 from . import fake_stat
from docker import constants
CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
FAKE_NODE_ID = '24ifsmvkjbyhk'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_raw_version():
status_code = 200
response = {
"ApiVersion": "1.18",
"GitCommit": "fake-commit",
"GoVersion": "go1.3.3",
"Version": "1.5.0"
}
return status_code, response
def get_fake_version():
status_code = 200
response = {'GoVersion': '1', 'Version': '1.1.1',
'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def post_fake_auth():
status_code = 200
response = {'Status': 'Login Succeeded',
'IdentityToken': '9cbaf023786cd7'}
return status_code, response
def get_fake_ping():
return 200, "OK"
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Name': 'foobar',
"State": {
"Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
"HostConfig": {
"LogConfig": {
"Type": "json-file",
"Config": {}
},
},
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'Id': FAKE_IMAGE_ID,
'Parent': "27cf784147099545",
'Created': "2013-03-23T22:24:18.818426-07:00",
'Container': FAKE_CONTAINER_ID,
'Config': {'Labels': {'bar': 'foo'}},
'ContainerConfig':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_events():
status_code = 200
response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
'from': FAKE_IMAGE_ID, 'time': 1423247867}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_exec_create():
status_code = 200
response = {'Id': FAKE_EXEC_ID}
return status_code, response
def post_fake_exec_start():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
return status_code, response
def post_fake_exec_resize():
status_code = 201
return status_code, ''
def get_fake_exec_inspect():
return 200, {
'OpenStderr': True,
'OpenStdout': True,
'Container': get_fake_inspect_container()[1],
'Running': False,
'ProcessConfig': {
'arguments': ['hello world'],
'tty': False,
'entrypoint': 'echo',
'privileged': False,
'user': ''
},
'ExitCode': 0,
'ID': FAKE_EXEC_ID,
'OpenStdin': False
}
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_pause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_unpause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_rename_container():
status_code = 204
return status_code, None
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_stats():
status_code = 200
response = fake_stat.OBJ
return status_code, response
def get_fake_top():
return 200, {
'Processes': [
[
'root',
'26501',
'6907',
'0',
'10:32',
'pts/55',
'00:00:00',
'sleep 60',
],
],
'Titles': [
'UID',
'PID',
'PPID',
'C',
'STIME',
'TTY',
'TIME',
'CMD',
],
}
def get_fake_volume_list():
status_code = 200
response = {
'Volumes': [
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
'Scope': 'local'
}
]
}
return status_code, response
def get_fake_volume():
status_code = 200
response = {
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
},
'Scope': 'local'
}
return status_code, response
def fake_remove_volume():
return 204, None
def post_fake_update_container():
return 200, {'Warnings': []}
def post_fake_update_node():
return 200, None
def get_fake_network_list():
return 200, [{
"Name": "bridge",
"Id": FAKE_NETWORK_ID,
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": False,
"Internal": False,
"IPAM": {
"Driver": "default",
"Config": [
{
"Subnet": "172.17.0.0/16"
}
]
},
"Containers": {
FAKE_CONTAINER_ID: {
"EndpointID": "ed2419a97c1d99",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
}
}]
def get_fake_network():
return 200, get_fake_network_list()[1][0]
def post_fake_network():
return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
def delete_fake_network():
return 204, None
def post_fake_network_connect():
return 200, None
def post_fake_network_disconnect():
return 200, None
# Maps real api url to fake response callback
prefix = 'http+docker://localunixsocket'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
'{0}/version'.format(prefix):
get_fake_raw_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
'{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
post_fake_auth,
'{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
get_fake_ping,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
get_fake_images,
'{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
get_fake_image_history,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_import_image,
'{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
get_fake_containers,
'{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_start_container,
'{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
post_fake_resize_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_container,
'{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
post_fake_rename_container,
'{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
post_fake_tag_image,
'{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
get_fake_wait,
'{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
get_fake_logs,
'{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
get_fake_diff,
'{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
get_fake_export,
'{1}/{0}/containers/3cc2351ab11b/update'.format(CURRENT_VERSION, prefix):
post_fake_update_container,
'{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
post_fake_exec_create,
'{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
post_fake_exec_start,
'{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
get_fake_exec_inspect,
'{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
post_fake_exec_resize,
'{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
get_fake_stats,
'{1}/{0}/containers/3cc2351ab11b/top'.format(CURRENT_VERSION, prefix):
get_fake_top,
'{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
post_fake_stop_container,
'{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
post_fake_kill_container,
'{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
post_fake_pause_container,
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
post_fake_unpause_container,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
delete_fake_remove_container,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_image_create,
'{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
delete_fake_remove_image,
'{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
get_fake_get_image,
'{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
post_fake_load_image,
'{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_image,
'{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
get_fake_insert_image,
'{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
post_fake_push,
'{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
post_fake_commit,
'{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
post_fake_create_container,
'{1}/{0}/build'.format(CURRENT_VERSION, prefix):
post_fake_build_container,
'{1}/{0}/events'.format(CURRENT_VERSION, prefix):
get_fake_events,
('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_volume_list,
('{1}/{0}/volumes/create'.format(CURRENT_VERSION, prefix), 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'GET'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
('{1}/{0}/nodes/{2}/update?version=1'.format(
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_network_list,
('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
post_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'GET'):
get_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'DELETE'):
delete_fake_network,
('{1}/{0}/networks/{2}/connect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_connect,
('{1}/{0}/networks/{2}/disconnect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_disconnect,
}
docker-2.5.1/tests/unit/models_containers_test.py 0000664 0001750 0001750 00000045146 13145377337 023413 0 ustar joffrey joffrey 0000000 0000000 import docker
from docker.models.containers import Container, _create_container_args
from docker.models.images import Image
import unittest
from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID, FAKE_EXEC_ID
from .fake_api_client import make_fake_client
class ContainerCollectionTest(unittest.TestCase):
def test_run(self):
client = make_fake_client()
out = client.containers.run("alpine", "echo hello world")
assert out == 'hello world\n'
client.api.create_container.assert_called_with(
image="alpine",
command="echo hello world",
detach=False,
host_config={'NetworkMode': 'default'}
)
client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
client.api.start.assert_called_with(FAKE_CONTAINER_ID)
client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
client.api.logs.assert_called_with(
FAKE_CONTAINER_ID,
stderr=False,
stdout=True
)
def test_create_container_args(self):
create_kwargs = _create_container_args(dict(
image='alpine',
command='echo hello world',
blkio_weight_device=[{'Path': 'foo', 'Weight': 3}],
blkio_weight=2,
cap_add=['foo'],
cap_drop=['bar'],
cgroup_parent='foobar',
cpu_period=1,
cpu_quota=2,
cpu_shares=5,
cpuset_cpus='0-3',
detach=False,
device_read_bps=[{'Path': 'foo', 'Rate': 3}],
device_read_iops=[{'Path': 'foo', 'Rate': 3}],
device_write_bps=[{'Path': 'foo', 'Rate': 3}],
device_write_iops=[{'Path': 'foo', 'Rate': 3}],
devices=['/dev/sda:/dev/xvda:rwm'],
dns=['8.8.8.8'],
domainname='example.com',
dns_opt=['foo'],
dns_search=['example.com'],
entrypoint='/bin/sh',
environment={'FOO': 'BAR'},
extra_hosts={'foo': '1.2.3.4'},
group_add=['blah'],
ipc_mode='foo',
kernel_memory=123,
labels={'key': 'value'},
links={'foo': 'bar'},
log_config={'Type': 'json-file', 'Config': {}},
lxc_conf={'foo': 'bar'},
healthcheck={'test': 'true'},
hostname='somehost',
mac_address='abc123',
mem_limit=123,
mem_reservation=123,
mem_swappiness=2,
memswap_limit=456,
name='somename',
network_disabled=False,
network='foo',
oom_kill_disable=True,
oom_score_adj=5,
pid_mode='host',
pids_limit=500,
ports={
1111: 4567,
2222: None
},
privileged=True,
publish_all_ports=True,
read_only=True,
restart_policy={'Name': 'always'},
security_opt=['blah'],
shm_size=123,
stdin_open=True,
stop_signal=9,
sysctls={'foo': 'bar'},
tmpfs={'/blah': ''},
tty=True,
ulimits=[{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
user='bob',
userns_mode='host',
version='1.23',
volume_driver='some_driver',
volumes=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
'volumename:/mnt/vol3',
'/volumewithnohostpath',
'/anothervolumewithnohostpath:ro',
],
volumes_from=['container'],
working_dir='/code'
))
expected = dict(
image='alpine',
command='echo hello world',
domainname='example.com',
detach=False,
entrypoint='/bin/sh',
environment={'FOO': 'BAR'},
host_config={
'Binds': [
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
'volumename:/mnt/vol3',
'/volumewithnohostpath',
'/anothervolumewithnohostpath:ro'
],
'BlkioDeviceReadBps': [{'Path': 'foo', 'Rate': 3}],
'BlkioDeviceReadIOps': [{'Path': 'foo', 'Rate': 3}],
'BlkioDeviceWriteBps': [{'Path': 'foo', 'Rate': 3}],
'BlkioDeviceWriteIOps': [{'Path': 'foo', 'Rate': 3}],
'BlkioWeightDevice': [{'Path': 'foo', 'Weight': 3}],
'BlkioWeight': 2,
'CapAdd': ['foo'],
'CapDrop': ['bar'],
'CgroupParent': 'foobar',
'CpuPeriod': 1,
'CpuQuota': 2,
'CpuShares': 5,
'CpusetCpus': '0-3',
'Devices': [{'PathOnHost': '/dev/sda',
'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda'}],
'Dns': ['8.8.8.8'],
'DnsOptions': ['foo'],
'DnsSearch': ['example.com'],
'ExtraHosts': ['foo:1.2.3.4'],
'GroupAdd': ['blah'],
'IpcMode': 'foo',
'KernelMemory': 123,
'Links': ['foo:bar'],
'LogConfig': {'Type': 'json-file', 'Config': {}},
'LxcConf': [{'Key': 'foo', 'Value': 'bar'}],
'Memory': 123,
'MemoryReservation': 123,
'MemorySwap': 456,
'MemorySwappiness': 2,
'NetworkMode': 'foo',
'OomKillDisable': True,
'OomScoreAdj': 5,
'PidMode': 'host',
'PidsLimit': 500,
'PortBindings': {
'1111/tcp': [{'HostIp': '', 'HostPort': '4567'}],
'2222/tcp': [{'HostIp': '', 'HostPort': ''}]
},
'Privileged': True,
'PublishAllPorts': True,
'ReadonlyRootfs': True,
'RestartPolicy': {'Name': 'always'},
'SecurityOpt': ['blah'],
'ShmSize': 123,
'Sysctls': {'foo': 'bar'},
'Tmpfs': {'/blah': ''},
'Ulimits': [{"Name": "nofile", "Soft": 1024, "Hard": 2048}],
'UsernsMode': 'host',
'VolumesFrom': ['container'],
},
healthcheck={'test': 'true'},
hostname='somehost',
labels={'key': 'value'},
mac_address='abc123',
name='somename',
network_disabled=False,
networking_config={'foo': None},
ports=[('1111', 'tcp'), ('2222', 'tcp')],
stdin_open=True,
stop_signal=9,
tty=True,
user='bob',
volume_driver='some_driver',
volumes=[
'/mnt/vol2',
'/mnt/vol1',
'/mnt/vol3',
'/volumewithnohostpath',
'/anothervolumewithnohostpath'
],
working_dir='/code'
)
assert create_kwargs == expected
def test_run_detach(self):
client = make_fake_client()
container = client.containers.run('alpine', 'sleep 300', detach=True)
assert isinstance(container, Container)
assert container.id == FAKE_CONTAINER_ID
client.api.create_container.assert_called_with(
image='alpine',
command='sleep 300',
detach=True,
host_config={
'NetworkMode': 'default',
}
)
client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
client.api.start.assert_called_with(FAKE_CONTAINER_ID)
def test_run_pull(self):
client = make_fake_client()
# raise exception on first call, then return normal value
client.api.create_container.side_effect = [
docker.errors.ImageNotFound(""),
client.api.create_container.return_value
]
container = client.containers.run('alpine', 'sleep 300', detach=True)
assert container.id == FAKE_CONTAINER_ID
client.api.pull.assert_called_with('alpine', tag=None)
def test_run_with_error(self):
client = make_fake_client()
client.api.logs.return_value = "some error"
client.api.wait.return_value = 1
with self.assertRaises(docker.errors.ContainerError) as cm:
client.containers.run('alpine', 'echo hello world')
assert cm.exception.exit_status == 1
assert "some error" in str(cm.exception)
def test_run_with_image_object(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
client.containers.run(image)
client.api.create_container.assert_called_with(
image=image.id,
command=None,
detach=False,
host_config={
'NetworkMode': 'default',
}
)
def test_run_remove(self):
client = make_fake_client()
client.containers.run("alpine")
client.api.remove_container.assert_not_called()
client = make_fake_client()
client.api.wait.return_value = 1
with self.assertRaises(docker.errors.ContainerError):
client.containers.run("alpine")
client.api.remove_container.assert_not_called()
client = make_fake_client()
client.containers.run("alpine", remove=True)
client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
client = make_fake_client()
client.api.wait.return_value = 1
with self.assertRaises(docker.errors.ContainerError):
client.containers.run("alpine", remove=True)
client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
client = make_fake_client()
client.api._version = '1.24'
with self.assertRaises(RuntimeError):
client.containers.run("alpine", detach=True, remove=True)
client = make_fake_client()
client.api._version = '1.23'
with self.assertRaises(RuntimeError):
client.containers.run("alpine", detach=True, remove=True)
client = make_fake_client()
client.api._version = '1.25'
client.containers.run("alpine", detach=True, remove=True)
client.api.remove_container.assert_not_called()
client.api.create_container.assert_called_with(
command=None,
image='alpine',
detach=True,
host_config={'AutoRemove': True,
'NetworkMode': 'default'}
)
client = make_fake_client()
client.api._version = '1.26'
client.containers.run("alpine", detach=True, remove=True)
client.api.remove_container.assert_not_called()
client.api.create_container.assert_called_with(
command=None,
image='alpine',
detach=True,
host_config={'AutoRemove': True,
'NetworkMode': 'default'}
)
def test_create(self):
client = make_fake_client()
container = client.containers.create(
'alpine',
'echo hello world',
environment={'FOO': 'BAR'}
)
assert isinstance(container, Container)
assert container.id == FAKE_CONTAINER_ID
client.api.create_container.assert_called_with(
image='alpine',
command='echo hello world',
environment={'FOO': 'BAR'},
host_config={'NetworkMode': 'default'}
)
client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
def test_create_with_image_object(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
client.containers.create(image)
client.api.create_container.assert_called_with(
image=image.id,
command=None,
host_config={'NetworkMode': 'default'}
)
def test_get(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
assert isinstance(container, Container)
assert container.id == FAKE_CONTAINER_ID
client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
def test_list(self):
client = make_fake_client()
containers = client.containers.list(all=True)
client.api.containers.assert_called_with(
all=True,
before=None,
filters=None,
limit=-1,
since=None
)
client.api.inspect_container.assert_called_with(FAKE_CONTAINER_ID)
assert len(containers) == 1
assert isinstance(containers[0], Container)
assert containers[0].id == FAKE_CONTAINER_ID
class ContainerTest(unittest.TestCase):
def test_name(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
assert container.name == 'foobar'
def test_status(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
assert container.status == "running"
def test_attach(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.attach(stream=True)
client.api.attach.assert_called_with(FAKE_CONTAINER_ID, stream=True)
def test_commit(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
image = container.commit()
client.api.commit.assert_called_with(FAKE_CONTAINER_ID,
repository=None,
tag=None)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_diff(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.diff()
client.api.diff.assert_called_with(FAKE_CONTAINER_ID)
def test_exec_run(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.exec_run("echo hello world", privileged=True, stream=True)
client.api.exec_create.assert_called_with(
FAKE_CONTAINER_ID, "echo hello world", stdout=True, stderr=True,
stdin=False, tty=False, privileged=True, user='', environment=None
)
client.api.exec_start.assert_called_with(
FAKE_EXEC_ID, detach=False, tty=False, stream=True, socket=False
)
def test_export(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.export()
client.api.export.assert_called_with(FAKE_CONTAINER_ID)
def test_get_archive(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.get_archive('foo')
client.api.get_archive.assert_called_with(FAKE_CONTAINER_ID, 'foo')
def test_image(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
assert container.image.id == FAKE_IMAGE_ID
def test_kill(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.kill(signal=5)
client.api.kill.assert_called_with(FAKE_CONTAINER_ID, signal=5)
def test_labels(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
assert container.labels == {'foo': 'bar'}
def test_logs(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.logs()
client.api.logs.assert_called_with(FAKE_CONTAINER_ID)
def test_pause(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.pause()
client.api.pause.assert_called_with(FAKE_CONTAINER_ID)
def test_put_archive(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.put_archive('path', 'foo')
client.api.put_archive.assert_called_with(FAKE_CONTAINER_ID,
'path', 'foo')
def test_remove(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.remove()
client.api.remove_container.assert_called_with(FAKE_CONTAINER_ID)
def test_rename(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.rename("foo")
client.api.rename.assert_called_with(FAKE_CONTAINER_ID, "foo")
def test_resize(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.resize(1, 2)
client.api.resize.assert_called_with(FAKE_CONTAINER_ID, 1, 2)
def test_restart(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.restart()
client.api.restart.assert_called_with(FAKE_CONTAINER_ID)
def test_start(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.start()
client.api.start.assert_called_with(FAKE_CONTAINER_ID)
def test_stats(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.stats()
client.api.stats.assert_called_with(FAKE_CONTAINER_ID)
def test_stop(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.stop()
client.api.stop.assert_called_with(FAKE_CONTAINER_ID)
def test_top(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.top()
client.api.top.assert_called_with(FAKE_CONTAINER_ID)
def test_unpause(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.unpause()
client.api.unpause.assert_called_with(FAKE_CONTAINER_ID)
def test_update(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.update(cpu_shares=2)
client.api.update_container.assert_called_with(FAKE_CONTAINER_ID,
cpu_shares=2)
def test_wait(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.wait()
client.api.wait.assert_called_with(FAKE_CONTAINER_ID)
docker-2.5.1/tests/unit/fake_api_client.py 0000664 0001750 0001750 00000004032 13023617644 021717 0 ustar joffrey joffrey 0000000 0000000 import copy
import docker
from . import fake_api
try:
from unittest import mock
except ImportError:
import mock
class CopyReturnMagicMock(mock.MagicMock):
"""
A MagicMock which deep copies every return value.
"""
def _mock_call(self, *args, **kwargs):
ret = super(CopyReturnMagicMock, self)._mock_call(*args, **kwargs)
if isinstance(ret, (dict, list)):
ret = copy.deepcopy(ret)
return ret
def make_fake_api_client():
"""
Returns non-complete fake APIClient.
This returns most of the default cases correctly, but most arguments that
change behaviour will not work.
"""
api_client = docker.APIClient()
mock_client = CopyReturnMagicMock(**{
'build.return_value': fake_api.FAKE_IMAGE_ID,
'commit.return_value': fake_api.post_fake_commit()[1],
'containers.return_value': fake_api.get_fake_containers()[1],
'create_container.return_value':
fake_api.post_fake_create_container()[1],
'create_host_config.side_effect': api_client.create_host_config,
'create_network.return_value': fake_api.post_fake_network()[1],
'exec_create.return_value': fake_api.post_fake_exec_create()[1],
'exec_start.return_value': fake_api.post_fake_exec_start()[1],
'images.return_value': fake_api.get_fake_images()[1],
'inspect_container.return_value':
fake_api.get_fake_inspect_container()[1],
'inspect_image.return_value': fake_api.get_fake_inspect_image()[1],
'inspect_network.return_value': fake_api.get_fake_network()[1],
'logs.return_value': 'hello world\n',
'networks.return_value': fake_api.get_fake_network_list()[1],
'start.return_value': None,
'wait.return_value': 0,
})
mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION
return mock_client
def make_fake_client():
"""
Returns a Client with a fake APIClient.
"""
client = docker.DockerClient()
client.api = make_fake_api_client()
return client
docker-2.5.1/tests/unit/errors_test.py 0000664 0001750 0001750 00000010435 13145377337 021210 0 ustar joffrey joffrey 0000000 0000000 import unittest
import requests
from docker.errors import (APIError, ContainerError, DockerException,
create_unexpected_kwargs_error)
from .fake_api import FAKE_CONTAINER_ID, FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
class APIErrorTest(unittest.TestCase):
def test_api_error_is_caught_by_dockerexception(self):
try:
raise APIError("this should be caught by DockerException")
except DockerException:
pass
def test_status_code_200(self):
"""The status_code property is present with 200 response."""
resp = requests.Response()
resp.status_code = 200
err = APIError('', response=resp)
assert err.status_code == 200
def test_status_code_400(self):
"""The status_code property is present with 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.status_code == 400
def test_status_code_500(self):
"""The status_code property is present with 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.status_code == 500
def test_is_server_error_200(self):
"""Report not server error on 200 response."""
resp = requests.Response()
resp.status_code = 200
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_300(self):
"""Report not server error on 300 response."""
resp = requests.Response()
resp.status_code = 300
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_400(self):
"""Report not server error on 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.is_server_error() is False
def test_is_server_error_500(self):
"""Report server error on 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.is_server_error() is True
def test_is_client_error_500(self):
"""Report not client error on 500 response."""
resp = requests.Response()
resp.status_code = 500
err = APIError('', response=resp)
assert err.is_client_error() is False
def test_is_client_error_400(self):
"""Report client error on 400 response."""
resp = requests.Response()
resp.status_code = 400
err = APIError('', response=resp)
assert err.is_client_error() is True
class ContainerErrorTest(unittest.TestCase):
def test_container_without_stderr(self):
"""The massage does not contain stderr"""
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
command = "echo Hello World"
exit_status = 42
image = FAKE_IMAGE_ID
stderr = None
err = ContainerError(container, exit_status, command, image, stderr)
msg = ("Command '{}' in image '{}' returned non-zero exit status {}"
).format(command, image, exit_status, stderr)
assert str(err) == msg
def test_container_with_stderr(self):
"""The massage contains stderr"""
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
command = "echo Hello World"
exit_status = 42
image = FAKE_IMAGE_ID
stderr = "Something went wrong"
err = ContainerError(container, exit_status, command, image, stderr)
msg = ("Command '{}' in image '{}' returned non-zero exit status {}: "
"{}").format(command, image, exit_status, stderr)
assert str(err) == msg
class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
def test_create_unexpected_kwargs_error_single(self):
e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
assert str(e) == "f() got an unexpected keyword argument 'foo'"
def test_create_unexpected_kwargs_error_multiple(self):
e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
docker-2.5.1/tests/unit/api_image_test.py 0000664 0001750 0001750 00000026302 13145377337 021607 0 ustar joffrey joffrey 0000000 0000000 import docker
import pytest
from . import fake_api
from docker import auth
from .api_test import (
BaseAPIClientTest, fake_request, DEFAULT_TIMEOUT_SECONDS, url_prefix,
fake_resolve_authconfig
)
try:
from unittest import mock
except ImportError:
import mock
class ImageTest(BaseAPIClientTest):
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
def test_images(self):
self.client.images(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
self.client.images(all=True, quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
self.client.images(quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
self.client.images(filters={'dangling': True})
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pull(self):
self.client.pull('joffrey/test001')
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None,
'changes': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
self.client.history(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
self.assertEqual(
excinfo.value.args[0], 'Resource ID was not provided'
)
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_auth(self):
auth_config = {
'username': "test_user",
'password': "test_password",
'serveraddress': "test_server",
}
encoded_auth = auth.encode_header(auth_config)
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME,
auth_config=auth_config
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json',
'X-Registry-Auth': encoded_auth},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
with mock.patch('docker.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
self.client.get_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
self.client.load_image('Byte Stream....')
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
stream=True,
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image_quiet(self):
self.client.load_image('Byte Stream....', quiet=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
stream=True,
params={'quiet': True},
timeout=DEFAULT_TIMEOUT_SECONDS
)
docker-2.5.1/tests/unit/auth_test.py 0000664 0001750 0001750 00000046567 13142163435 020641 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import random
import shutil
import tempfile
import unittest
from py.test import ensuretemp
from pytest import mark
from docker import auth, errors
try:
from unittest import mock
except ImportError:
import mock
class RegressionTest(unittest.TestCase):
def test_803_urlsafe_encode(self):
auth_data = {
'username': 'root',
'password': 'GR?XGR?XGR?XGR?X'
}
encoded = auth.encode_header(auth_data)
assert b'/' not in encoded
assert b'_' in encoded
class ResolveRepositoryNameTest(unittest.TestCase):
def test_resolve_repository_name_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image'),
('docker.io', 'image'),
)
def test_resolve_repository_name_dotted_hub_library_image(self):
self.assertEqual(
auth.resolve_repository_name('image.valid'),
('docker.io', 'image.valid')
)
def test_resolve_repository_name_hub_image(self):
self.assertEqual(
auth.resolve_repository_name('username/image'),
('docker.io', 'username/image'),
)
def test_explicit_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('docker.io/image'),
('docker.io', 'image')
)
def test_explicit_legacy_hub_index_library_image(self):
self.assertEqual(
auth.resolve_repository_name('index.docker.io/image'),
('docker.io', 'image')
)
def test_resolve_repository_name_private_registry(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/image'),
('my.registry.net', 'image'),
)
def test_resolve_repository_name_private_registry_with_port(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net:5000/image'),
('my.registry.net:5000', 'image'),
)
def test_resolve_repository_name_private_registry_with_username(self):
self.assertEqual(
auth.resolve_repository_name('my.registry.net/username/image'),
('my.registry.net', 'username/image'),
)
def test_resolve_repository_name_no_dots_but_port(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/image'),
('hostname:5000', 'image'),
)
def test_resolve_repository_name_no_dots_but_port_and_username(self):
self.assertEqual(
auth.resolve_repository_name('hostname:5000/username/image'),
('hostname:5000', 'username/image'),
)
def test_resolve_repository_name_localhost(self):
self.assertEqual(
auth.resolve_repository_name('localhost/image'),
('localhost', 'image'),
)
def test_resolve_repository_name_localhost_with_username(self):
self.assertEqual(
auth.resolve_repository_name('localhost/username/image'),
('localhost', 'username/image'),
)
def test_invalid_index_name(self):
self.assertRaises(
errors.InvalidRepository,
lambda: auth.resolve_repository_name('-gecko.com/image')
)
def encode_auth(auth_info):
return base64.b64encode(
auth_info.get('username', '').encode('utf-8') + b':' +
auth_info.get('password', '').encode('utf-8'))
class ResolveAuthTest(unittest.TestCase):
index_config = {'auth': encode_auth({'username': 'indexuser'})}
private_config = {'auth': encode_auth({'username': 'privateuser'})}
legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
auth_config = auth.parse_auth({
'https://index.docker.io/v1/': index_config,
'my.registry.net': private_config,
'http://legacy.registry.url/v1/': legacy_config,
})
def test_resolve_authconfig_hostname_only(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_protocol(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_trailing_slash(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_secure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net'
)['username'],
'privateuser'
)
def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://index.docker.io'
)['username'],
'indexuser'
)
def test_resolve_authconfig_path_wrong_proto(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'https://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_default_registry(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config)['username'],
'indexuser'
)
def test_resolve_authconfig_default_explicit_none(self):
self.assertEqual(
auth.resolve_authconfig(self.auth_config, None)['username'],
'indexuser'
)
def test_resolve_authconfig_fully_explicit(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'http://my.registry.net/v1/'
)['username'],
'privateuser'
)
def test_resolve_authconfig_legacy_config(self):
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, 'legacy.registry.url'
)['username'],
'legacyauth'
)
def test_resolve_authconfig_no_match(self):
self.assertTrue(
auth.resolve_authconfig(self.auth_config, 'does.not.exist') is None
)
def test_resolve_registry_and_auth_library_image(self):
image = 'image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_hub_image(self):
image = 'username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_hub(self):
image = 'docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_explicit_legacy_hub(self):
image = 'index.docker.io/username/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'indexuser',
)
def test_resolve_registry_and_auth_private_registry(self):
image = 'my.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
)['username'],
'privateuser',
)
def test_resolve_registry_and_auth_unauthenticated_registry(self):
image = 'other.registry.net/image'
self.assertEqual(
auth.resolve_authconfig(
self.auth_config, auth.resolve_repository_name(image)[0]
),
None,
)
class CredStoreTest(unittest.TestCase):
def test_get_credential_store(self):
auth_config = {
'credHelpers': {
'registry1.io': 'truesecret',
'registry2.io': 'powerlock'
},
'credsStore': 'blackbox',
}
assert auth.get_credential_store(
auth_config, 'registry1.io'
) == 'truesecret'
assert auth.get_credential_store(
auth_config, 'registry2.io'
) == 'powerlock'
assert auth.get_credential_store(
auth_config, 'registry3.io'
) == 'blackbox'
def test_get_credential_store_no_default(self):
auth_config = {
'credHelpers': {
'registry1.io': 'truesecret',
'registry2.io': 'powerlock'
},
}
assert auth.get_credential_store(
auth_config, 'registry2.io'
) == 'powerlock'
assert auth.get_credential_store(
auth_config, 'registry3.io'
) is None
def test_get_credential_store_default_index(self):
auth_config = {
'credHelpers': {
'https://index.docker.io/v1/': 'powerlock'
},
'credsStore': 'truesecret'
}
assert auth.get_credential_store(auth_config, None) == 'powerlock'
assert auth.get_credential_store(
auth_config, 'docker.io'
) == 'powerlock'
assert auth.get_credential_store(
auth_config, 'images.io'
) == 'truesecret'
class FindConfigFileTest(unittest.TestCase):
def tmpdir(self, name):
tmpdir = ensuretemp(name)
self.addCleanup(tmpdir.remove)
return tmpdir
def test_find_config_fallback(self):
tmpdir = self.tmpdir('test_find_config_fallback')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() is None
def test_find_config_from_explicit_path(self):
tmpdir = self.tmpdir('test_find_config_from_explicit_path')
config_path = tmpdir.ensure('my-config-file.json')
assert auth.find_config_file(str(config_path)) == str(config_path)
def test_find_config_from_environment(self):
tmpdir = self.tmpdir('test_find_config_from_environment')
config_path = tmpdir.ensure('config.json')
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_posix(self):
tmpdir = self.tmpdir('test_find_config_from_home_posix')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform == 'win32'")
def test_find_config_from_home_legacy_name(self):
tmpdir = self.tmpdir('test_find_config_from_home_legacy_name')
config_path = tmpdir.ensure('.dockercfg')
with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
@mark.skipif("sys.platform != 'win32'")
def test_find_config_from_home_windows(self):
tmpdir = self.tmpdir('test_find_config_from_home_windows')
config_path = tmpdir.ensure('.docker', 'config.json')
with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
assert auth.find_config_file() == str(config_path)
class LoadConfigTest(unittest.TestCase):
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = auth.load_config(dockercfg_path)
assert auth.INDEX_NAME in cfg
self.assertNotEqual(cfg[auth.INDEX_NAME], None)
cfg = cfg[auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_auths(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_utf8(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(
b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
config = {
'auths': {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], b'sakuya\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['password'], b'izayoi\xc3\xa6'.decode('utf8'))
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env_with_headers(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'HttpHeaders': {
'Name': 'Spike',
'Surname': 'Spiegel'
},
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = auth.load_config(None)
assert 'HttpHeaders' in cfg
self.assertNotEqual(cfg['HttpHeaders'], None)
cfg = cfg['HttpHeaders']
self.assertEqual(cfg['Name'], 'Spike')
self.assertEqual(cfg['Surname'], 'Spiegel')
def test_load_config_unknown_keys(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {}
def test_load_config_invalid_auth_dict(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
config = {
'auths': {
'scarlet.net': {'sakuya': 'izayoi'}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert cfg == {'scarlet.net': {}}
def test_load_config_identity_token(self):
folder = tempfile.mkdtemp()
registry = 'scarlet.net'
token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
config = {
'auths': {
registry: {
'auth': auth_entry,
'identitytoken': token
}
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = auth.load_config(dockercfg_path)
assert registry in cfg
cfg = cfg[registry]
assert 'IdentityToken' in cfg
assert cfg['IdentityToken'] == token
docker-2.5.1/tests/unit/models_images_test.py 0000664 0001750 0001750 00000007215 13106703727 022477 0 ustar joffrey joffrey 0000000 0000000 from docker.models.images import Image
import unittest
from .fake_api import FAKE_IMAGE_ID
from .fake_api_client import make_fake_client
class ImageCollectionTest(unittest.TestCase):
def test_build(self):
client = make_fake_client()
image = client.images.build()
client.api.build.assert_called_with()
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_get(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
client.api.inspect_image.assert_called_with(FAKE_IMAGE_ID)
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_labels(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
assert image.labels == {'bar': 'foo'}
def test_list(self):
client = make_fake_client()
images = client.images.list(all=True)
client.api.images.assert_called_with(all=True, name=None, filters=None)
assert len(images) == 1
assert isinstance(images[0], Image)
assert images[0].id == FAKE_IMAGE_ID
def test_load(self):
client = make_fake_client()
client.images.load('byte stream')
client.api.load_image.assert_called_with('byte stream')
def test_pull(self):
client = make_fake_client()
image = client.images.pull('test_image')
client.api.pull.assert_called_with('test_image', tag=None)
client.api.inspect_image.assert_called_with('test_image')
assert isinstance(image, Image)
assert image.id == FAKE_IMAGE_ID
def test_push(self):
client = make_fake_client()
client.images.push('foobar', insecure_registry=True)
client.api.push.assert_called_with(
'foobar',
tag=None,
insecure_registry=True
)
def test_remove(self):
client = make_fake_client()
client.images.remove('test_image')
client.api.remove_image.assert_called_with('test_image')
def test_search(self):
client = make_fake_client()
client.images.search('test')
client.api.search.assert_called_with('test')
class ImageTest(unittest.TestCase):
def test_short_id(self):
image = Image(attrs={'Id': 'sha256:b6846070672ce4e8f1f91564ea6782bd675'
'f69d65a6f73ef6262057ad0a15dcd'})
assert image.short_id == 'sha256:b684607067'
image = Image(attrs={'Id': 'b6846070672ce4e8f1f91564ea6782bd675'
'f69d65a6f73ef6262057ad0a15dcd'})
assert image.short_id == 'b684607067'
def test_tags(self):
image = Image(attrs={
'RepoTags': ['test_image:latest']
})
assert image.tags == ['test_image:latest']
image = Image(attrs={
'RepoTags': [':']
})
assert image.tags == []
image = Image(attrs={
'RepoTags': None
})
assert image.tags == []
def test_history(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.history()
client.api.history.assert_called_with(FAKE_IMAGE_ID)
def test_save(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.save()
client.api.get_image.assert_called_with(FAKE_IMAGE_ID)
def test_tag(self):
client = make_fake_client()
image = client.images.get(FAKE_IMAGE_ID)
image.tag('foo')
client.api.tag.assert_called_with(FAKE_IMAGE_ID, 'foo', tag=None)
docker-2.5.1/tests/unit/api_container_test.py 0000664 0001750 0001750 00000171064 13124577310 022503 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import datetime
import json
import signal
import docker
import pytest
import six
from . import fake_api
from ..helpers import requires_api_version
from .api_test import (
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
fake_inspect_container
)
try:
from unittest import mock
except ImportError:
import mock
def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
class StartContainerTest(BaseAPIClientTest):
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
self.assertEqual(
str(excinfo.value),
'Resource ID was not provided',
)
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
self.assertEqual(
str(excinfo.value),
'Resource ID was not provided',
)
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_start_container_with_lxc_conf(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
def test_start_container_with_lxc_conf_compat(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
def test_start_container_with_binds_ro(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
"bind": '/mnt',
"ro": True
}
}
)
def test_start_container_with_binds_rw(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
def test_start_container_with_port_binds(self):
self.maxDiff = None
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
def test_start_container_with_links(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
def test_start_container_with_multiple_links(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
'path1': 'alias1',
'path2': 'alias2'
}
)
def test_start_container_with_links_as_list_of_tuples(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
def test_start_container_privileged(self):
with pytest.raises(docker.errors.DeprecatedMethod):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
assert 'data' not in args[1]
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
class CreateContainerTest(BaseAPIClientTest):
def test_create_container(self):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": ["cowsay", "entry"]}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
with pytest.deprecated_call():
self.client.create_container('busybox', 'ls', cpu_shares=5)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.18')
def test_create_container_with_host_config_cpu_shares(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpu_shares=512
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpuShares": 512,
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
with pytest.deprecated_call():
self.client.create_container('busybox', 'ls', cpuset='0,1')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.18')
def test_create_container_with_host_config_cpuset(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpuset_cpus='0,1'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpusetCpus": "0,1",
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.19')
def test_create_container_with_host_config_cpuset_mems(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpuset_mems='0'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpusetMems": "0",
"NetworkMode": "default"
}}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cgroup_parent='test'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
self.assertIn('HostConfig', data)
self.assertIn('CgroupParent', data['HostConfig'])
self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
with pytest.raises(docker.errors.InvalidVersion):
self.client.create_container('busybox', 'true', volumes_from=[])
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit=128.0
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128k'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128m'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128g'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='128p'
)
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='1f28'
)
def test_create_container_with_lxc_conf(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
"ro": True,
}}
)
)
def test_create_container_with_binds_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds=[
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
],
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
expected = "02:42:ac:11:00:0a"
self.client.create_container(
'busybox',
['sleep', '60'],
mac_address=expected
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
assert data['MacAddress'] == expected
def test_create_container_with_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={link_path: alias}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links=[(link_path, alias)]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(privileged=True)
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_add=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_drop=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
volume_name = 'name'
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
volume_driver='foodriver',
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_stop_signal(self):
self.client.create_container('busybox', 'ls',
stop_signal='SIGINT')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"StopSignal": "SIGINT"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
@requires_api_version('1.22')
def test_create_container_with_aliases(self):
self.client.create_container(
'busybox', 'ls',
host_config=self.client.create_host_config(
network_mode='some-network',
),
networking_config=self.client.create_networking_config({
'some-network': self.client.create_endpoint_config(
aliases=['foo', 'bar'],
),
}),
)
args = fake_request.call_args
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"NetworkMode": "some-network"
},
"NetworkingConfig": {
"EndpointsConfig": {
"some-network": {"Aliases": ["foo", "bar"]}
}
}}'''))
@requires_api_version('1.22')
def test_create_container_with_tmpfs_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
tmpfs=[
"/tmp",
"/mnt:size=3G,uid=100"
]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.22')
def test_create_container_with_tmpfs_dict(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
tmpfs={
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Tmpfs'] = {
"/tmp": "",
"/mnt": "size=3G,uid=100"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.24')
def test_create_container_with_sysctl(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
sysctls={
'net.core.somaxconn': 1024,
'net.ipv4.tcp_syncookies': '0',
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Sysctls'] = {
'net.core.somaxconn': '1024', 'net.ipv4.tcp_syncookies': '0',
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_unicode_envvars(self):
envvars_dict = {
'foo': u'☃',
}
expected = [
u'foo=☃'
]
self.client.create_container(
'busybox', 'true',
environment=envvars_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Env'], expected)
@requires_api_version('1.25')
def test_create_container_with_host_config_cpus(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cpu_count=1,
cpu_percent=20,
nano_cpus=1000
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"HostConfig": {
"CpuCount": 1,
"CpuPercent": 20,
"NanoCpus": 1000,
"NetworkMode": "default"
}}'''))
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
class ContainerTest(BaseAPIClientTest):
def test_list_containers(self):
self.client.containers(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
self.client.wait(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_logs(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=False)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_following(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_following_backwards(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_streaming_and_following(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True,
follow=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, tail=10)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_since(self):
ts = 809222400
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=ts)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all', 'since': ts},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_since_with_datetime(self):
ts = 809222400
time = datetime.datetime.utcfromtimestamp(ts)
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=time)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all', 'since': ts},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_since_with_invalid_value_raises_error(self):
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container):
with self.assertRaises(docker.errors.InvalidArgument):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
follow=False, since=42.42)
def test_log_tty(self):
m = mock.Mock()
with mock.patch('docker.api.client.APIClient.inspect_container',
fake_inspect_container_tty):
with mock.patch('docker.api.client.APIClient._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
follow=True, stream=True)
self.assertTrue(m.called)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_diff(self):
self.client.diff(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_pause_container(self):
self.client.pause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
self.client.export(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
self.assertEqual(
excinfo.value.args[0], 'Resource ID was not provided'
)
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
def test_container_top(self):
self.client.top(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_container_top_with_psargs(self):
self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux')
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={'ps_args': 'waux'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@requires_api_version('1.22')
def test_container_update(self):
self.client.update_container(
fake_api.FAKE_CONTAINER_ID, mem_limit='2k', cpu_shares=124,
blkio_weight=345
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/3cc2351ab11b/update'
)
self.assertEqual(
json.loads(args[1]['data']),
{'Memory': 2 * 1024, 'CpuShares': 124, 'BlkioWeight': 345}
)
self.assertEqual(
args[1]['headers']['Content-Type'], 'application/json'
)
docker-2.5.1/tests/unit/models_resources_test.py 0000664 0001750 0001750 00000001556 13040271005 023230 0 ustar joffrey joffrey 0000000 0000000 import unittest
from .fake_api import FAKE_CONTAINER_ID
from .fake_api_client import make_fake_client
class ModelTest(unittest.TestCase):
def test_reload(self):
client = make_fake_client()
container = client.containers.get(FAKE_CONTAINER_ID)
container.attrs['Name'] = "oldname"
container.reload()
assert client.api.inspect_container.call_count == 2
assert container.attrs['Name'] == "foobar"
def test_hash(self):
client = make_fake_client()
container1 = client.containers.get(FAKE_CONTAINER_ID)
my_set = set([container1])
assert len(my_set) == 1
container2 = client.containers.get(FAKE_CONTAINER_ID)
my_set.add(container2)
assert len(my_set) == 1
image1 = client.images.get(FAKE_CONTAINER_ID)
my_set.add(image1)
assert len(my_set) == 2
docker-2.5.1/tests/unit/models_networks_test.py 0000664 0001750 0001750 00000004174 13023617644 023107 0 ustar joffrey joffrey 0000000 0000000 import unittest
from .fake_api import FAKE_NETWORK_ID, FAKE_CONTAINER_ID
from .fake_api_client import make_fake_client
class ImageCollectionTest(unittest.TestCase):
def test_create(self):
client = make_fake_client()
network = client.networks.create("foobar", labels={'foo': 'bar'})
assert network.id == FAKE_NETWORK_ID
assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
assert client.api.create_network.called_once_with(
"foobar",
labels={'foo': 'bar'}
)
def test_get(self):
client = make_fake_client()
network = client.networks.get(FAKE_NETWORK_ID)
assert network.id == FAKE_NETWORK_ID
assert client.api.inspect_network.called_once_with(FAKE_NETWORK_ID)
def test_list(self):
client = make_fake_client()
networks = client.networks.list()
assert networks[0].id == FAKE_NETWORK_ID
assert client.api.networks.called_once_with()
client = make_fake_client()
client.networks.list(ids=["abc"])
assert client.api.networks.called_once_with(ids=["abc"])
client = make_fake_client()
client.networks.list(names=["foobar"])
assert client.api.networks.called_once_with(names=["foobar"])
class ImageTest(unittest.TestCase):
def test_connect(self):
client = make_fake_client()
network = client.networks.get(FAKE_NETWORK_ID)
network.connect(FAKE_CONTAINER_ID)
assert client.api.connect_container_to_network.called_once_with(
FAKE_CONTAINER_ID,
FAKE_NETWORK_ID
)
def test_disconnect(self):
client = make_fake_client()
network = client.networks.get(FAKE_NETWORK_ID)
network.disconnect(FAKE_CONTAINER_ID)
assert client.api.disconnect_container_from_network.called_once_with(
FAKE_CONTAINER_ID,
FAKE_NETWORK_ID
)
def test_remove(self):
client = make_fake_client()
network = client.networks.get(FAKE_NETWORK_ID)
network.remove()
assert client.api.remove_network.called_once_with(FAKE_NETWORK_ID)
docker-2.5.1/tests/unit/api_network_test.py 0000664 0001750 0001750 00000014254 13145377337 022221 0 ustar joffrey joffrey 0000000 0000000 import json
import six
from .api_test import BaseAPIClientTest, url_prefix, response
from ..helpers import requires_api_version
from docker.types import IPAMConfig, IPAMPool
try:
from unittest import mock
except ImportError:
import mock
class NetworkTest(BaseAPIClientTest):
@requires_api_version('1.21')
def test_list_networks(self):
networks = [
{
"name": "none",
"id": "8e4e55c6863ef424",
"type": "null",
"endpoints": []
},
{
"name": "host",
"id": "062b6d9ea7913fde",
"type": "host",
"endpoints": []
},
]
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
with mock.patch('docker.api.client.APIClient.get', get):
self.assertEqual(self.client.networks(), networks)
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertFalse(filters)
self.client.networks(names=['foo'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'name': ['foo']})
self.client.networks(ids=['123'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'id': ['123']})
@requires_api_version('1.21')
def test_create_network(self):
network_data = {
"id": 'abc12345',
"warning": "",
}
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
with mock.patch('docker.api.client.APIClient.post', post):
result = self.client.create_network('foo')
self.assertEqual(result, network_data)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/create')
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"Name": "foo"})
opts = {
'com.docker.network.bridge.enable_icc': False,
'com.docker.network.bridge.enable_ip_masquerade': False,
}
self.client.create_network('foo', 'bridge', opts)
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"Name": "foo", "Driver": "bridge", "Options": opts})
ipam_pool_config = IPAMPool(subnet="192.168.52.0/24",
gateway="192.168.52.254")
ipam_config = IPAMConfig(pool_configs=[ipam_pool_config])
self.client.create_network("bar", driver="bridge",
ipam=ipam_config)
self.assertEqual(
json.loads(post.call_args[1]['data']),
{
"Name": "bar",
"Driver": "bridge",
"IPAM": {
"Driver": "default",
"Config": [{
"IPRange": None,
"Gateway": "192.168.52.254",
"Subnet": "192.168.52.0/24",
"AuxiliaryAddresses": None,
}],
}
})
@requires_api_version('1.21')
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
with mock.patch('docker.api.client.APIClient.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
@requires_api_version('1.21')
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
six.u('name'): network_name,
six.u('id'): network_id,
six.u('driver'): 'bridge',
six.u('containers'): {},
}
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
with mock.patch('docker.api.client.APIClient.get', get):
result = self.client.inspect_network(network_id)
self.assertEqual(result, network_data)
args = get.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
@requires_api_version('1.21')
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.api.client.APIClient.post', post):
self.client.connect_container_to_network(
container={'Id': container_id},
net_id=network_id,
aliases=['foo', 'bar'],
links=[('baz', 'quux')]
)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/connect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{
'Container': container_id,
'EndpointConfig': {
'Aliases': ['foo', 'bar'],
'Links': ['baz:quux'],
},
})
@requires_api_version('1.21')
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.api.client.APIClient.post', post):
self.client.disconnect_container_from_network(
container={'Id': container_id}, net_id=network_id)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/disconnect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{'Container': container_id})
docker-2.5.1/tests/unit/ssladapter_test.py 0000664 0001750 0001750 00000005055 13023617644 022031 0 ustar joffrey joffrey 0000000 0000000 import unittest
from docker.transport import ssladapter
try:
from backports.ssl_match_hostname import (
match_hostname, CertificateError
)
except ImportError:
from ssl import (
match_hostname, CertificateError
)
try:
from ssl import OP_NO_SSLv3, OP_NO_SSLv2, OP_NO_TLSv1
except ImportError:
OP_NO_SSLv2 = 0x1000000
OP_NO_SSLv3 = 0x2000000
OP_NO_TLSv1 = 0x4000000
class SSLAdapterTest(unittest.TestCase):
def test_only_uses_tls(self):
ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
assert ssl_context.options & OP_NO_SSLv3
# if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
assert not ssl_context.options & OP_NO_TLSv1
class MatchHostnameTest(unittest.TestCase):
cert = {
'issuer': (
(('countryName', u'US'),),
(('stateOrProvinceName', u'California'),),
(('localityName', u'San Francisco'),),
(('organizationName', u'Docker Inc'),),
(('organizationalUnitName', u'Docker-Python'),),
(('commonName', u'localhost'),),
(('emailAddress', u'info@docker.com'),)
),
'notAfter': 'Mar 25 23:08:23 2030 GMT',
'notBefore': u'Mar 25 23:08:23 2016 GMT',
'serialNumber': u'BD5F894C839C548F',
'subject': (
(('countryName', u'US'),),
(('stateOrProvinceName', u'California'),),
(('localityName', u'San Francisco'),),
(('organizationName', u'Docker Inc'),),
(('organizationalUnitName', u'Docker-Python'),),
(('commonName', u'localhost'),),
(('emailAddress', u'info@docker.com'),)
),
'subjectAltName': (
('DNS', u'localhost'),
('DNS', u'*.gensokyo.jp'),
('IP Address', u'127.0.0.1'),
),
'version': 3
}
def test_match_ip_address_success(self):
assert match_hostname(self.cert, '127.0.0.1') is None
def test_match_localhost_success(self):
assert match_hostname(self.cert, 'localhost') is None
def test_match_dns_success(self):
assert match_hostname(self.cert, 'touhou.gensokyo.jp') is None
def test_match_ip_address_failure(self):
self.assertRaises(
CertificateError, match_hostname, self.cert, '192.168.0.25'
)
def test_match_dns_failure(self):
self.assertRaises(
CertificateError, match_hostname, self.cert, 'foobar.co.uk'
)
docker-2.5.1/tests/unit/__init__.py 0000664 0001750 0001750 00000000000 13021666666 020356 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/tests/unit/utils_test.py 0000664 0001750 0001750 00000111342 13147142632 021021 0 ustar joffrey joffrey 0000000 0000000 # -*- coding: utf-8 -*-
import base64
import json
import os
import os.path
import shutil
import socket
import sys
import tarfile
import tempfile
import unittest
import pytest
import six
from docker.api.client import APIClient
from docker.constants import IS_WINDOWS_PLATFORM
from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
parse_bytes, parse_env_file, exclude_paths, convert_volume_binds,
decode_json_header, tar, split_command, parse_devices, update_headers,
)
from docker.utils.build import should_check_directory
from docker.utils.ports import build_port_bindings, split_port
from docker.utils.utils import format_environment
from ..helpers import make_tree
TEST_CERT_DIR = os.path.join(
os.path.dirname(__file__),
'testdata/certs',
)
class DecoratorsTest(unittest.TestCase):
def test_update_headers(self):
sample_headers = {
'X-Docker-Locale': 'en-US',
}
def f(self, headers=None):
return headers
client = APIClient()
client._auth_configs = {}
g = update_headers(f)
assert g(client, headers=None) is None
assert g(client, headers={}) == {}
assert g(client, headers={'Content-type': 'application/json'}) == {
'Content-type': 'application/json',
}
client._auth_configs = {
'HttpHeaders': sample_headers
}
assert g(client, headers=None) == sample_headers
assert g(client, headers={}) == sample_headers
assert g(client, headers={'Content-type': 'application/json'}) == {
'Content-type': 'application/json',
'X-Docker-Locale': 'en-US',
}
class KwargsFromEnvTest(unittest.TestCase):
def setUp(self):
self.os_environ = os.environ.copy()
def tearDown(self):
os.environ = self.os_environ
def test_kwargs_from_env_empty(self):
os.environ.update(DOCKER_HOST='',
DOCKER_CERT_PATH='')
os.environ.pop('DOCKER_TLS_VERIFY', None)
kwargs = kwargs_from_env()
self.assertEqual(None, kwargs.get('base_url'))
self.assertEqual(None, kwargs.get('tls'))
def test_kwargs_from_env_tls(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(False, kwargs['tls'].assert_hostname)
self.assertTrue(kwargs['tls'].verify)
try:
client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].ca_cert, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_tls_verify_false(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY='')
kwargs = kwargs_from_env(assert_hostname=True)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].ca_cert)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(True, kwargs['tls'].assert_hostname)
self.assertEqual(False, kwargs['tls'].verify)
try:
client = APIClient(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].cert, client.cert)
self.assertFalse(kwargs['tls'].verify)
except TypeError as e:
self.fail(e)
def test_kwargs_from_env_tls_verify_false_no_cert(self):
temp_dir = tempfile.mkdtemp()
cert_dir = os.path.join(temp_dir, '.docker')
shutil.copytree(TEST_CERT_DIR, cert_dir)
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
HOME=temp_dir,
DOCKER_TLS_VERIFY='')
os.environ.pop('DOCKER_CERT_PATH', None)
kwargs = kwargs_from_env(assert_hostname=True)
self.assertEqual('tcp://192.168.59.103:2376', kwargs['base_url'])
def test_kwargs_from_env_no_cert_path(self):
try:
temp_dir = tempfile.mkdtemp()
cert_dir = os.path.join(temp_dir, '.docker')
shutil.copytree(TEST_CERT_DIR, cert_dir)
os.environ.update(HOME=temp_dir,
DOCKER_CERT_PATH='',
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env()
self.assertTrue(kwargs['tls'].verify)
self.assertIn(cert_dir, kwargs['tls'].ca_cert)
self.assertIn(cert_dir, kwargs['tls'].cert[0])
self.assertIn(cert_dir, kwargs['tls'].cert[1])
finally:
if temp_dir:
shutil.rmtree(temp_dir)
def test_kwargs_from_env_alternate_env(self):
# Values in os.environ are entirely ignored if an alternate is
# provided
os.environ.update(
DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=TEST_CERT_DIR,
DOCKER_TLS_VERIFY=''
)
kwargs = kwargs_from_env(environment={
'DOCKER_HOST': 'http://docker.gensokyo.jp:2581',
})
assert 'http://docker.gensokyo.jp:2581' == kwargs['base_url']
assert 'tls' not in kwargs
class ConverVolumeBindsTest(unittest.TestCase):
def test_convert_volume_binds_empty(self):
self.assertEqual(convert_volume_binds({}), [])
self.assertEqual(convert_volume_binds([]), [])
def test_convert_volume_binds_list(self):
data = ['/a:/a:ro', '/b:/c:z']
self.assertEqual(convert_volume_binds(data), data)
def test_convert_volume_binds_complete(self):
data = {
'/mnt/vol1': {
'bind': '/data',
'mode': 'ro'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:ro'])
def test_convert_volume_binds_compact(self):
data = {
'/mnt/vol1': '/data'
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_no_mode(self):
data = {
'/mnt/vol1': {
'bind': '/data'
}
}
self.assertEqual(convert_volume_binds(data), ['/mnt/vol1:/data:rw'])
def test_convert_volume_binds_unicode_bytes_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
data = {
u'/mnt/지연'.encode('utf-8'): {
'bind': u'/unicode/박'.encode('utf-8'),
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
def test_convert_volume_binds_unicode_unicode_input(self):
expected = [u'/mnt/지연:/unicode/박:rw']
data = {
u'/mnt/지연': {
'bind': u'/unicode/박',
'mode': 'rw'
}
}
self.assertEqual(
convert_volume_binds(data), expected
)
class ParseEnvFileTest(unittest.TestCase):
def generate_tempfile(self, file_content=None):
"""
Generates a temporary file for tests with the content
of 'file_content' and returns the filename.
Don't forget to unlink the file with os.unlink() after.
"""
local_tempfile = tempfile.NamedTemporaryFile(delete=False)
local_tempfile.write(file_content.encode('UTF-8'))
local_tempfile.close()
return local_tempfile.name
def test_parse_env_file_proper(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file,
{'USER': 'jdoe', 'PASS': 'secret'})
os.unlink(env_file)
def test_parse_env_file_with_equals_character(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\nPASS=sec==ret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file,
{'USER': 'jdoe', 'PASS': 'sec==ret'})
os.unlink(env_file)
def test_parse_env_file_commented_line(self):
env_file = self.generate_tempfile(
file_content='USER=jdoe\n#PASS=secret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file, {'USER': 'jdoe'})
os.unlink(env_file)
def test_parse_env_file_newline(self):
env_file = self.generate_tempfile(
file_content='\nUSER=jdoe\n\n\nPASS=secret')
get_parse_env_file = parse_env_file(env_file)
self.assertEqual(get_parse_env_file,
{'USER': 'jdoe', 'PASS': 'secret'})
os.unlink(env_file)
def test_parse_env_file_invalid_line(self):
env_file = self.generate_tempfile(
file_content='USER jdoe')
self.assertRaises(
DockerException, parse_env_file, env_file)
os.unlink(env_file)
class ParseHostTest(unittest.TestCase):
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
]
valid_hosts = {
'0.0.0.1:5555': 'http://0.0.0.1:5555',
':6666': 'http://127.0.0.1:6666',
'tcp://:7777': 'http://127.0.0.1:7777',
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix://var/run/docker.sock',
'12.234.45.127:2375/docker/engine': (
'http://12.234.45.127:2375/docker/engine'
),
'somehost.net:80/service/swarm': (
'http://somehost.net:80/service/swarm'
),
'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
'[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
'[fd12::82d1]:2375/docker/engine': (
'http://[fd12::82d1]:2375/docker/engine'
),
}
for host in invalid_hosts:
with pytest.raises(DockerException):
parse_host(host, None)
for host, expected in valid_hosts.items():
assert parse_host(host, None) == expected
def test_parse_host_empty_value(self):
unix_socket = 'http+unix://var/run/docker.sock'
npipe = 'npipe:////./pipe/docker_engine'
for val in [None, '']:
assert parse_host(val, is_win32=False) == unix_socket
assert parse_host(val, is_win32=True) == npipe
def test_parse_host_tls(self):
host_value = 'myhost.docker.net:3348'
expected_result = 'https://myhost.docker.net:3348'
assert parse_host(host_value, tls=True) == expected_result
def test_parse_host_tls_tcp_proto(self):
host_value = 'tcp://myhost.docker.net:3348'
expected_result = 'https://myhost.docker.net:3348'
assert parse_host(host_value, tls=True) == expected_result
def test_parse_host_trailing_slash(self):
host_value = 'tcp://myhost.docker.net:2376/'
expected_result = 'http://myhost.docker.net:2376'
assert parse_host(host_value) == expected_result
class ParseRepositoryTagTest(unittest.TestCase):
sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
def test_index_image_no_tag(self):
self.assertEqual(
parse_repository_tag("root"), ("root", None)
)
def test_index_image_tag(self):
self.assertEqual(
parse_repository_tag("root:tag"), ("root", "tag")
)
def test_index_user_image_no_tag(self):
self.assertEqual(
parse_repository_tag("user/repo"), ("user/repo", None)
)
def test_index_user_image_tag(self):
self.assertEqual(
parse_repository_tag("user/repo:tag"), ("user/repo", "tag")
)
def test_private_reg_image_no_tag(self):
self.assertEqual(
parse_repository_tag("url:5000/repo"), ("url:5000/repo", None)
)
def test_private_reg_image_tag(self):
self.assertEqual(
parse_repository_tag("url:5000/repo:tag"), ("url:5000/repo", "tag")
)
def test_index_image_sha(self):
self.assertEqual(
parse_repository_tag("root@sha256:{0}".format(self.sha)),
("root", "sha256:{0}".format(self.sha))
)
def test_private_reg_image_sha(self):
self.assertEqual(
parse_repository_tag("url:5000/repo@sha256:{0}".format(self.sha)),
("url:5000/repo", "sha256:{0}".format(self.sha))
)
class ParseDeviceTest(unittest.TestCase):
def test_dict(self):
devices = parse_devices([{
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
}])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
})
def test_partial_string_definition(self):
devices = parse_devices(['/dev/sda1'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/sda1',
'CgroupPermissions': 'rwm'
})
def test_permissionless_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rwm'
})
def test_full_string_definition(self):
devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'r'
})
def test_hybrid_list(self):
devices = parse_devices([
'/dev/sda1:/dev/mnt1:rw',
{
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
}
])
self.assertEqual(devices[0], {
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/mnt1',
'CgroupPermissions': 'rw'
})
self.assertEqual(devices[1], {
'PathOnHost': '/dev/sda2',
'PathInContainer': '/dev/mnt2',
'CgroupPermissions': 'r'
})
class ParseBytesTest(unittest.TestCase):
def test_parse_bytes_valid(self):
self.assertEqual(parse_bytes("512MB"), 536870912)
self.assertEqual(parse_bytes("512M"), 536870912)
self.assertEqual(parse_bytes("512m"), 536870912)
def test_parse_bytes_invalid(self):
self.assertRaises(DockerException, parse_bytes, "512MK")
self.assertRaises(DockerException, parse_bytes, "512L")
self.assertRaises(DockerException, parse_bytes, "127.0.0.1K")
def test_parse_bytes_float(self):
self.assertRaises(DockerException, parse_bytes, "1.5k")
def test_parse_bytes_maxint(self):
self.assertEqual(
parse_bytes("{0}k".format(sys.maxsize)), sys.maxsize * 1024
)
class UtilsTest(unittest.TestCase):
longMessage = True
def test_convert_filters(self):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
({'exited': 0}, '{"exited": [0]}'),
({'exited': [0, 1]}, '{"exited": [0, 1]}'),
]
for filters, expected in tests:
self.assertEqual(convert_filters(filters), expected)
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
if six.PY3:
data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
else:
data = base64.urlsafe_b64encode(json.dumps(obj))
decoded_data = decode_json_header(data)
self.assertEqual(obj, decoded_data)
class SplitCommandTest(unittest.TestCase):
def test_split_command_with_unicode(self):
self.assertEqual(split_command(u'echo μμ'), ['echo', 'μμ'])
@pytest.mark.skipif(six.PY3, reason="shlex doesn't support bytes in py3")
def test_split_command_with_bytes(self):
self.assertEqual(split_command('echo μμ'), ['echo', 'μμ'])
class PortsTest(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_protocol(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000/udp")
self.assertEqual(internal_port, ["2000/udp"])
self.assertEqual(external_port, [("127.0.0.1", "1000")])
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("127.0.0.1", None)])
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port,
[("127.0.0.1", None), ("127.0.0.1", None)])
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, ["1000"])
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, ["1000", "1001"])
def test_split_port_random_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, ["1000-1001"])
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, None)
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
self.assertEqual(internal_port, ["2000", "2001"])
self.assertEqual(external_port, None)
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
self.assertEqual(internal_port, ["2000/udp", "2001/udp"])
self.assertEqual(external_port,
[("127.0.0.1", "1000"), ("127.0.0.1", "1001")])
def test_split_port_with_ipv6_address(self):
internal_port, external_port = split_port(
"2001:abcd:ef00::2:1000:2000")
self.assertEqual(internal_port, ["2000"])
self.assertEqual(external_port, [("2001:abcd:ef00::2", "1000")])
def test_split_port_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000:tcp"))
def test_non_matching_length_port_ranges(self):
self.assertRaises(
ValueError,
lambda: split_port("0.0.0.0:1000-1010:2000-2002/tcp")
)
def test_port_and_range_invalid(self):
self.assertRaises(ValueError,
lambda: split_port("0.0.0.0:1000:2000-2002/tcp"))
def test_port_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port(":80"))
def test_host_only_with_colon(self):
self.assertRaises(ValueError,
lambda: split_port("localhost:"))
def test_with_no_container_port(self):
self.assertRaises(ValueError,
lambda: split_port("localhost:80:"))
def test_split_port_empty_string(self):
self.assertRaises(ValueError, lambda: split_port(""))
def test_split_port_non_string(self):
assert split_port(1243) == (['1243'], None)
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["1001"], [("127.0.0.1", "1001")])
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
self.assertEqual(port_bindings["1000"],
[("127.0.0.1", "1000"), ("127.0.0.1", "2000")])
self.assertEqual(port_bindings["1001"],
[("127.0.0.1", "1001"), ("127.0.0.1", "2001")])
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
self.assertEqual(port_bindings["1000"], [("127.0.0.1", "1000")])
self.assertEqual(port_bindings["2000"], [("127.0.0.1", "2000")])
def convert_paths(collection):
return set(map(convert_path, collection))
def convert_path(path):
return path.replace('/', os.path.sep)
class ExcludePathsTest(unittest.TestCase):
dirs = [
'foo',
'foo/bar',
'bar',
'target',
'target/subdir',
'subdir',
'subdir/target',
'subdir/target/subdir',
'subdir/subdir2',
'subdir/subdir2/target',
'subdir/subdir2/target/subdir'
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
'foo/Dockerfile3',
'target/file.txt',
'target/subdir/file.txt',
'subdir/file.txt',
'subdir/target/file.txt',
'subdir/target/subdir/file.txt',
'subdir/subdir2/file.txt',
'subdir/subdir2/target/file.txt',
'subdir/subdir2/target/subdir/file.txt',
]
all_paths = set(dirs + files)
def setUp(self):
self.base = make_tree(self.dirs, self.files)
def tearDown(self):
shutil.rmtree(self.base)
def exclude(self, patterns, dockerfile=None):
return set(exclude_paths(self.base, patterns, dockerfile=dockerfile))
def test_no_excludes(self):
assert self.exclude(['']) == convert_paths(self.all_paths)
def test_no_dupes(self):
paths = exclude_paths(self.base, ['!a.py'])
assert sorted(paths) == sorted(set(paths))
def test_wildcard_exclude(self):
assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
def test_exclude_dockerfile_dockerignore(self):
"""
Even if the .dockerignore file explicitly says to exclude
Dockerfile and/or .dockerignore, don't exclude them from
the actual tar file.
"""
assert self.exclude(['Dockerfile', '.dockerignore']) == convert_paths(
self.all_paths
)
def test_exclude_custom_dockerfile(self):
"""
If we're using a custom Dockerfile, make sure that's not
excluded.
"""
assert self.exclude(['*'], dockerfile='Dockerfile.alt') == \
set(['Dockerfile.alt', '.dockerignore'])
assert self.exclude(['*'], dockerfile='foo/Dockerfile3') == \
convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
def test_exclude_dockerfile_child(self):
includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
assert convert_path('foo/Dockerfile3') in includes
assert convert_path('foo/a.py') not in includes
def test_single_filename(self):
assert self.exclude(['a.py']) == convert_paths(
self.all_paths - set(['a.py'])
)
def test_single_filename_leading_dot_slash(self):
assert self.exclude(['./a.py']) == convert_paths(
self.all_paths - set(['a.py'])
)
# As odd as it sounds, a filename pattern with a trailing slash on the
# end *will* result in that file being excluded.
def test_single_filename_trailing_slash(self):
assert self.exclude(['a.py/']) == convert_paths(
self.all_paths - set(['a.py'])
)
def test_wildcard_filename_start(self):
assert self.exclude(['*.py']) == convert_paths(
self.all_paths - set(['a.py', 'b.py', 'cde.py'])
)
def test_wildcard_with_exception(self):
assert self.exclude(['*.py', '!b.py']) == convert_paths(
self.all_paths - set(['a.py', 'cde.py'])
)
def test_wildcard_with_wildcard_exception(self):
assert self.exclude(['*.*', '!*.go']) == convert_paths(
self.all_paths - set([
'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
])
)
def test_wildcard_filename_end(self):
assert self.exclude(['a.*']) == convert_paths(
self.all_paths - set(['a.py', 'a.go'])
)
def test_question_mark(self):
assert self.exclude(['?.py']) == convert_paths(
self.all_paths - set(['a.py', 'b.py'])
)
def test_single_subdir_single_filename(self):
assert self.exclude(['foo/a.py']) == convert_paths(
self.all_paths - set(['foo/a.py'])
)
def test_single_subdir_single_filename_leading_slash(self):
assert self.exclude(['/foo/a.py']) == convert_paths(
self.all_paths - set(['foo/a.py'])
)
def test_single_subdir_with_path_traversal(self):
assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
self.all_paths - set(['foo/a.py'])
)
def test_single_subdir_wildcard_filename(self):
assert self.exclude(['foo/*.py']) == convert_paths(
self.all_paths - set(['foo/a.py', 'foo/b.py'])
)
def test_wildcard_subdir_single_filename(self):
assert self.exclude(['*/a.py']) == convert_paths(
self.all_paths - set(['foo/a.py', 'bar/a.py'])
)
def test_wildcard_subdir_wildcard_filename(self):
assert self.exclude(['*/*.py']) == convert_paths(
self.all_paths - set(['foo/a.py', 'foo/b.py', 'bar/a.py'])
)
def test_directory(self):
assert self.exclude(['foo']) == convert_paths(
self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py',
'foo/Dockerfile3'
])
)
def test_directory_with_trailing_slash(self):
assert self.exclude(['foo']) == convert_paths(
self.all_paths - set([
'foo', 'foo/a.py', 'foo/b.py',
'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
])
)
def test_directory_with_single_exception(self):
assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths(
self.all_paths - set([
'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
'foo/Dockerfile3'
])
)
def test_directory_with_subdir_exception(self):
assert self.exclude(['foo', '!foo/bar']) == convert_paths(
self.all_paths - set([
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
])
)
@pytest.mark.skipif(
not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
)
def test_directory_with_subdir_exception_win32_pathsep(self):
assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
self.all_paths - set([
'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
])
)
def test_directory_with_wildcard_exception(self):
assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
self.all_paths - set([
'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3'
])
)
def test_subdirectory(self):
assert self.exclude(['foo/bar']) == convert_paths(
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
@pytest.mark.skipif(
not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
)
def test_subdirectory_win32_pathsep(self):
assert self.exclude(['foo\\bar']) == convert_paths(
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
def test_double_wildcard(self):
assert self.exclude(['**/a.py']) == convert_paths(
self.all_paths - set(
['a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py']
)
)
assert self.exclude(['foo/**/bar']) == convert_paths(
self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
)
def test_single_and_double_wildcard(self):
assert self.exclude(['**/target/*/*']) == convert_paths(
self.all_paths - set(
['target/subdir/file.txt',
'subdir/target/subdir/file.txt',
'subdir/subdir2/target/subdir/file.txt']
)
)
def test_trailing_double_wildcard(self):
assert self.exclude(['subdir/**']) == convert_paths(
self.all_paths - set(
['subdir/file.txt',
'subdir/target/file.txt',
'subdir/target/subdir/file.txt',
'subdir/subdir2/file.txt',
'subdir/subdir2/target/file.txt',
'subdir/subdir2/target/subdir/file.txt',
'subdir/target',
'subdir/target/subdir',
'subdir/subdir2',
'subdir/subdir2/target',
'subdir/subdir2/target/subdir']
)
)
class TarTest(unittest.TestCase):
def test_tar_with_excludes(self):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
]
exclude = [
'*.py',
'!b.py',
'!a.go',
'foo',
'Dockerfile*',
'.dockerignore',
]
expected_names = set([
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
])
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
with tar(base, exclude=exclude) as archive:
tar_data = tarfile.open(fileobj=archive)
assert sorted(tar_data.getnames()) == sorted(expected_names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar_data.getnames()), ['bar', 'foo'])
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(
sorted(tar_data.getnames()), ['bar', 'bar/foo', 'foo']
)
@pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
def test_tar_socket_file(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
sock = socket.socket(socket.AF_UNIX)
self.addCleanup(sock.close)
sock.bind(os.path.join(base, 'test.sock'))
with tar(base) as archive:
tar_data = tarfile.open(fileobj=archive)
self.assertEqual(
sorted(tar_data.getnames()), ['bar', 'foo']
)
class ShouldCheckDirectoryTest(unittest.TestCase):
exclude_patterns = [
'exclude_rather_large_directory',
'dir/with/subdir_excluded',
'dir/with/exceptions'
]
include_patterns = [
'dir/with/exceptions/like_this_one',
'dir/with/exceptions/in/descendents'
]
def test_should_check_directory_not_excluded(self):
assert should_check_directory(
'not_excluded', self.exclude_patterns, self.include_patterns
)
assert should_check_directory(
convert_path('dir/with'), self.exclude_patterns,
self.include_patterns
)
def test_shoud_check_parent_directories_of_excluded(self):
assert should_check_directory(
'dir', self.exclude_patterns, self.include_patterns
)
assert should_check_directory(
convert_path('dir/with'), self.exclude_patterns,
self.include_patterns
)
def test_should_not_check_excluded_directories_with_no_exceptions(self):
assert not should_check_directory(
'exclude_rather_large_directory', self.exclude_patterns,
self.include_patterns
)
assert not should_check_directory(
convert_path('dir/with/subdir_excluded'), self.exclude_patterns,
self.include_patterns
)
def test_should_check_excluded_directory_with_exceptions(self):
assert should_check_directory(
convert_path('dir/with/exceptions'), self.exclude_patterns,
self.include_patterns
)
assert should_check_directory(
convert_path('dir/with/exceptions/in'), self.exclude_patterns,
self.include_patterns
)
def test_should_not_check_siblings_of_exceptions(self):
assert not should_check_directory(
convert_path('dir/with/exceptions/but_not_here'),
self.exclude_patterns, self.include_patterns
)
def test_should_check_subdirectories_of_exceptions(self):
assert should_check_directory(
convert_path('dir/with/exceptions/like_this_one/subdir'),
self.exclude_patterns, self.include_patterns
)
class FormatEnvironmentTest(unittest.TestCase):
def test_format_env_binary_unicode_value(self):
env_dict = {
'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
}
assert format_environment(env_dict) == [u'ARTIST_NAME=송지은']
def test_format_env_no_value(self):
env_dict = {
'FOO': None,
'BAR': '',
}
assert sorted(format_environment(env_dict)) == ['BAR=', 'FOO']
docker-2.5.1/tests/unit/api_volume_test.py 0000664 0001750 0001750 00000010451 13106703741 022017 0 ustar joffrey joffrey 0000000 0000000 import json
import pytest
from ..helpers import requires_api_version
from .api_test import BaseAPIClientTest, url_prefix, fake_request
class VolumeTest(BaseAPIClientTest):
@requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
self.assertIn('Volumes', volumes)
self.assertEqual(len(volumes['Volumes']), 2)
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], url_prefix + 'volumes')
@requires_api_version('1.21')
def test_list_volumes_and_filters(self):
volumes = self.client.volumes(filters={'dangling': True})
assert 'Volumes' in volumes
assert len(volumes['Volumes']) == 2
args = fake_request.call_args
assert args[0][0] == 'GET'
assert args[0][1] == url_prefix + 'volumes'
assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'},
'timeout': 60}
@requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes/create')
self.assertEqual(json.loads(args[1]['data']), {'Name': name})
@requires_api_version('1.23')
def test_create_volume_with_labels(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name, labels={
'com.example.some-label': 'some-value'})
self.assertEqual(
result["Labels"],
{'com.example.some-label': 'some-value'}
)
@requires_api_version('1.23')
def test_create_volume_with_invalid_labels(self):
name = 'perfectcherryblossom'
with pytest.raises(TypeError):
self.client.create_volume(name, labels=1)
@requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
self.client.create_volume(name, driver=driver_name)
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes/create')
data = json.loads(args[1]['data'])
self.assertIn('Driver', data)
self.assertEqual(data['Driver'], driver_name)
@requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts='hello=world'
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=['hello=world']
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=''
)
@requires_api_version('1.24')
def test_create_volume_with_no_specified_name(self):
result = self.client.create_volume(name=None)
self.assertIn('Name', result)
self.assertNotEqual(result['Name'], None)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
self.assertIn('Scope', result)
self.assertEqual(result['Scope'], 'local')
@requires_api_version('1.21')
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
@requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
self.client.remove_volume(name)
args = fake_request.call_args
self.assertEqual(args[0][0], 'DELETE')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
docker-2.5.1/tests/unit/api_exec_test.py 0000664 0001750 0001750 00000005442 13106703741 021440 0 ustar joffrey joffrey 0000000 0000000 import json
from . import fake_api
from .api_test import (
BaseAPIClientTest, url_prefix, fake_request, DEFAULT_TIMEOUT_SECONDS,
)
class ExecTest(BaseAPIClientTest):
def test_exec_create(self):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
self.assertEqual(
'POST',
args[0][0], url_prefix + 'containers/{0}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'AttachStdout': True,
'Container': fake_api.FAKE_CONTAINER_ID,
'Cmd': ['ls', '-1'],
'Privileged': False,
'AttachStdin': False,
'AttachStderr': True,
'User': ''
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_start(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': False,
}
)
self.assertEqual(
args[1]['headers'], {
'Content-Type': 'application/json',
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
)
def test_exec_start_detached(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': True
}
)
self.assertEqual(
args[1]['headers'], {
'Content-Type': 'application/json'
}
)
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/json'.format(
fake_api.FAKE_EXEC_ID
)
)
def test_exec_resize(self):
self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
fake_request.assert_called_with(
'POST',
url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
docker-2.5.1/tests/unit/api_test.py 0000664 0001750 0001750 00000040244 13145377337 020446 0 ustar joffrey joffrey 0000000 0000000 import datetime
import json
import io
import os
import re
import shutil
import socket
import tempfile
import threading
import time
import unittest
import docker
from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
self.assertEqual(
str(excinfo.value),
'Version parameter must be a string or None. Found float'
)
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
)
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
url = self.client._url('/hello/{0}/world', 'some?name')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
)
url = self.client._url("/images/{0}/push", "localhost:5000/image")
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'images/localhost:5000/image/push')
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
self.assertEqual(
url, '{0}{1}'.format(url_base, 'hello/somename/world')
)
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = APIClient(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True,
timeout=None
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True,
timeout=None
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True,
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
TypeError, self.client.create_host_config, security_opt='wrong'
)
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str)
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
self.assertEqual(result, content_str.decode('utf-8'))
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
self.assertEqual(result, content)
class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = APIClient()
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
self.assertEqual(headers['User-Agent'], expected)
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
self.assertEqual(self.mock_send.call_count, 1)
headers = self.mock_send.call_args[0][0].headers
self.assertEqual(headers['User-Agent'], 'foo/bar')
class DisableSocketTest(unittest.TestCase):
class DummySocket(object):
def __init__(self, timeout=60):
self.timeout = timeout
def settimeout(self, timeout):
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setUp(self):
self.client = APIClient()
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
socket = self.DummySocket()
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
and it's _sock object if present."""
socket = self.DummySocket()
socket._sock = self.DummySocket()
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
self.assertEqual(socket._sock.timeout, None)
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
socket = self.DummySocket()
socket._sock = self.DummySocket(0.0)
self.client._disable_socket_timeout(socket)
self.assertEqual(socket.timeout, None)
self.assertEqual(socket._sock.timeout, 0.0)
docker-2.5.1/tests/__init__.py 0000664 0001750 0001750 00000000000 13021666666 017377 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/ 0000775 0001750 0001750 00000000000 13147142650 015374 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/types/ 0000775 0001750 0001750 00000000000 13147142650 016540 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/types/healthcheck.py 0000664 0001750 0001750 00000002743 13124577310 021363 0 ustar joffrey joffrey 0000000 0000000 from .base import DictType
import six
class Healthcheck(DictType):
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, six.string_types):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
timeout = kwargs.get('timeout', kwargs.get('Timeout'))
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
super(Healthcheck, self).__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
'Retries': retries,
'StartPeriod': start_period
})
@property
def test(self):
return self['Test']
@test.setter
def test(self, value):
self['Test'] = value
@property
def interval(self):
return self['Interval']
@interval.setter
def interval(self, value):
self['Interval'] = value
@property
def timeout(self):
return self['Timeout']
@timeout.setter
def timeout(self, value):
self['Timeout'] = value
@property
def retries(self):
return self['Retries']
@retries.setter
def retries(self, value):
self['Retries'] = value
@property
def start_period(self):
return self['StartPeriod']
@start_period.setter
def start_period(self, value):
self['StartPeriod'] = value
docker-2.5.1/docker/types/containers.py 0000664 0001750 0001750 00000057174 13124577310 021275 0 ustar joffrey joffrey 0000000 0000000 import six
import warnings
from .. import errors
from ..utils.utils import (
convert_port_bindings, convert_tmpfs_mounts, convert_volume_binds,
format_environment, normalize_links, parse_bytes, parse_devices,
split_command, version_gte, version_lt,
)
from .base import DictType
from .healthcheck import Healthcheck
class LogConfigTypesEnum(object):
_values = (
'json-file',
'syslog',
'journald',
'gelf',
'fluentd',
'none'
)
JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
class LogConfig(DictType):
types = LogConfigTypesEnum
def __init__(self, **kwargs):
log_driver_type = kwargs.get('type', kwargs.get('Type'))
config = kwargs.get('config', kwargs.get('Config')) or {}
if config and not isinstance(config, dict):
raise ValueError("LogConfig.config must be a dictionary")
super(LogConfig, self).__init__({
'Type': log_driver_type,
'Config': config
})
@property
def type(self):
return self['Type']
@type.setter
def type(self, value):
self['Type'] = value
@property
def config(self):
return self['Config']
def set_config_value(self, key, value):
self.config[key] = value
def unset_config(self, key):
if key in self.config:
del self.config[key]
class Ulimit(DictType):
def __init__(self, **kwargs):
name = kwargs.get('name', kwargs.get('Name'))
soft = kwargs.get('soft', kwargs.get('Soft'))
hard = kwargs.get('hard', kwargs.get('Hard'))
if not isinstance(name, six.string_types):
raise ValueError("Ulimit.name must be a string")
if soft and not isinstance(soft, int):
raise ValueError("Ulimit.soft must be an integer")
if hard and not isinstance(hard, int):
raise ValueError("Ulimit.hard must be an integer")
super(Ulimit, self).__init__({
'Name': name,
'Soft': soft,
'Hard': hard
})
@property
def name(self):
return self['Name']
@name.setter
def name(self, value):
self['Name'] = value
@property
def soft(self):
return self.get('Soft')
@soft.setter
def soft(self, value):
self['Soft'] = value
@property
def hard(self):
return self.get('Hard')
@hard.setter
def hard(self, value):
self['Hard'] = value
class HostConfig(dict):
def __init__(self, version, binds=None, port_bindings=None,
lxc_conf=None, publish_all_ports=False, links=None,
privileged=False, dns=None, dns_search=None,
volumes_from=None, network_mode=None, restart_policy=None,
cap_add=None, cap_drop=None, devices=None, extra_hosts=None,
read_only=None, pid_mode=None, ipc_mode=None,
security_opt=None, ulimits=None, log_config=None,
mem_limit=None, memswap_limit=None, mem_reservation=None,
kernel_memory=None, mem_swappiness=None, cgroup_parent=None,
group_add=None, cpu_quota=None, cpu_period=None,
blkio_weight=None, blkio_weight_device=None,
device_read_bps=None, device_write_bps=None,
device_read_iops=None, device_write_iops=None,
oom_kill_disable=False, shm_size=None, sysctls=None,
tmpfs=None, oom_score_adj=None, dns_opt=None, cpu_shares=None,
cpuset_cpus=None, userns_mode=None, pids_limit=None,
isolation=None, auto_remove=False, storage_opt=None,
init=None, init_path=None, volume_driver=None,
cpu_count=None, cpu_percent=None, nano_cpus=None,
cpuset_mems=None, runtime=None):
if mem_limit is not None:
self['Memory'] = parse_bytes(mem_limit)
if memswap_limit is not None:
self['MemorySwap'] = parse_bytes(memswap_limit)
if mem_reservation:
if version_lt(version, '1.21'):
raise host_config_version_error('mem_reservation', '1.21')
self['MemoryReservation'] = parse_bytes(mem_reservation)
if kernel_memory:
if version_lt(version, '1.21'):
raise host_config_version_error('kernel_memory', '1.21')
self['KernelMemory'] = parse_bytes(kernel_memory)
if mem_swappiness is not None:
if version_lt(version, '1.20'):
raise host_config_version_error('mem_swappiness', '1.20')
if not isinstance(mem_swappiness, int):
raise host_config_type_error(
'mem_swappiness', mem_swappiness, 'int'
)
self['MemorySwappiness'] = mem_swappiness
if shm_size is not None:
if isinstance(shm_size, six.string_types):
shm_size = parse_bytes(shm_size)
self['ShmSize'] = shm_size
if pid_mode:
if version_lt(version, '1.24') and pid_mode != 'host':
raise host_config_value_error('pid_mode', pid_mode)
self['PidMode'] = pid_mode
if ipc_mode:
self['IpcMode'] = ipc_mode
if privileged:
self['Privileged'] = privileged
if oom_kill_disable:
if version_lt(version, '1.20'):
raise host_config_version_error('oom_kill_disable', '1.19')
self['OomKillDisable'] = oom_kill_disable
if oom_score_adj:
if version_lt(version, '1.22'):
raise host_config_version_error('oom_score_adj', '1.22')
if not isinstance(oom_score_adj, int):
raise host_config_type_error(
'oom_score_adj', oom_score_adj, 'int'
)
self['OomScoreAdj'] = oom_score_adj
if publish_all_ports:
self['PublishAllPorts'] = publish_all_ports
if read_only is not None:
self['ReadonlyRootfs'] = read_only
if dns_search:
self['DnsSearch'] = dns_search
if network_mode:
self['NetworkMode'] = network_mode
elif network_mode is None and version_gte(version, '1.20'):
self['NetworkMode'] = 'default'
if restart_policy:
if not isinstance(restart_policy, dict):
raise host_config_type_error(
'restart_policy', restart_policy, 'dict'
)
self['RestartPolicy'] = restart_policy
if cap_add:
self['CapAdd'] = cap_add
if cap_drop:
self['CapDrop'] = cap_drop
if devices:
self['Devices'] = parse_devices(devices)
if group_add:
if version_lt(version, '1.20'):
raise host_config_version_error('group_add', '1.20')
self['GroupAdd'] = [six.text_type(grp) for grp in group_add]
if dns is not None:
self['Dns'] = dns
if dns_opt is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('dns_opt', '1.21')
self['DnsOptions'] = dns_opt
if security_opt is not None:
if not isinstance(security_opt, list):
raise host_config_type_error(
'security_opt', security_opt, 'list'
)
self['SecurityOpt'] = security_opt
if sysctls:
if not isinstance(sysctls, dict):
raise host_config_type_error('sysctls', sysctls, 'dict')
self['Sysctls'] = {}
for k, v in six.iteritems(sysctls):
self['Sysctls'][k] = six.text_type(v)
if volumes_from is not None:
if isinstance(volumes_from, six.string_types):
volumes_from = volumes_from.split(',')
self['VolumesFrom'] = volumes_from
if binds is not None:
self['Binds'] = convert_volume_binds(binds)
if port_bindings is not None:
self['PortBindings'] = convert_port_bindings(port_bindings)
if extra_hosts is not None:
if isinstance(extra_hosts, dict):
extra_hosts = [
'{0}:{1}'.format(k, v)
for k, v in sorted(six.iteritems(extra_hosts))
]
self['ExtraHosts'] = extra_hosts
if links is not None:
self['Links'] = normalize_links(links)
if isinstance(lxc_conf, dict):
formatted = []
for k, v in six.iteritems(lxc_conf):
formatted.append({'Key': k, 'Value': str(v)})
lxc_conf = formatted
if lxc_conf is not None:
self['LxcConf'] = lxc_conf
if cgroup_parent is not None:
self['CgroupParent'] = cgroup_parent
if ulimits is not None:
if not isinstance(ulimits, list):
raise host_config_type_error('ulimits', ulimits, 'list')
self['Ulimits'] = []
for l in ulimits:
if not isinstance(l, Ulimit):
l = Ulimit(**l)
self['Ulimits'].append(l)
if log_config is not None:
if not isinstance(log_config, LogConfig):
if not isinstance(log_config, dict):
raise host_config_type_error(
'log_config', log_config, 'LogConfig'
)
log_config = LogConfig(**log_config)
self['LogConfig'] = log_config
if cpu_quota:
if not isinstance(cpu_quota, int):
raise host_config_type_error('cpu_quota', cpu_quota, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_quota', '1.19')
self['CpuQuota'] = cpu_quota
if cpu_period:
if not isinstance(cpu_period, int):
raise host_config_type_error('cpu_period', cpu_period, 'int')
if version_lt(version, '1.19'):
raise host_config_version_error('cpu_period', '1.19')
self['CpuPeriod'] = cpu_period
if cpu_shares:
if version_lt(version, '1.18'):
raise host_config_version_error('cpu_shares', '1.18')
if not isinstance(cpu_shares, int):
raise host_config_type_error('cpu_shares', cpu_shares, 'int')
self['CpuShares'] = cpu_shares
if cpuset_cpus:
if version_lt(version, '1.18'):
raise host_config_version_error('cpuset_cpus', '1.18')
self['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
if version_lt(version, '1.19'):
raise host_config_version_error('cpuset_mems', '1.19')
if not isinstance(cpuset_mems, str):
raise host_config_type_error(
'cpuset_mems', cpuset_mems, 'str'
)
self['CpusetMems'] = cpuset_mems
if blkio_weight:
if not isinstance(blkio_weight, int):
raise host_config_type_error(
'blkio_weight', blkio_weight, 'int'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight', '1.22')
self["BlkioWeight"] = blkio_weight
if blkio_weight_device:
if not isinstance(blkio_weight_device, list):
raise host_config_type_error(
'blkio_weight_device', blkio_weight_device, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('blkio_weight_device', '1.22')
self["BlkioWeightDevice"] = blkio_weight_device
if device_read_bps:
if not isinstance(device_read_bps, list):
raise host_config_type_error(
'device_read_bps', device_read_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_bps', '1.22')
self["BlkioDeviceReadBps"] = device_read_bps
if device_write_bps:
if not isinstance(device_write_bps, list):
raise host_config_type_error(
'device_write_bps', device_write_bps, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_bps', '1.22')
self["BlkioDeviceWriteBps"] = device_write_bps
if device_read_iops:
if not isinstance(device_read_iops, list):
raise host_config_type_error(
'device_read_iops', device_read_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_read_iops', '1.22')
self["BlkioDeviceReadIOps"] = device_read_iops
if device_write_iops:
if not isinstance(device_write_iops, list):
raise host_config_type_error(
'device_write_iops', device_write_iops, 'list'
)
if version_lt(version, '1.22'):
raise host_config_version_error('device_write_iops', '1.22')
self["BlkioDeviceWriteIOps"] = device_write_iops
if tmpfs:
if version_lt(version, '1.22'):
raise host_config_version_error('tmpfs', '1.22')
self["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
if userns_mode:
if version_lt(version, '1.23'):
raise host_config_version_error('userns_mode', '1.23')
if userns_mode != "host":
raise host_config_value_error("userns_mode", userns_mode)
self['UsernsMode'] = userns_mode
if pids_limit:
if not isinstance(pids_limit, int):
raise host_config_type_error('pids_limit', pids_limit, 'int')
if version_lt(version, '1.23'):
raise host_config_version_error('pids_limit', '1.23')
self["PidsLimit"] = pids_limit
if isolation:
if not isinstance(isolation, six.string_types):
raise host_config_type_error('isolation', isolation, 'string')
if version_lt(version, '1.24'):
raise host_config_version_error('isolation', '1.24')
self['Isolation'] = isolation
if auto_remove:
if version_lt(version, '1.25'):
raise host_config_version_error('auto_remove', '1.25')
self['AutoRemove'] = auto_remove
if storage_opt is not None:
if version_lt(version, '1.24'):
raise host_config_version_error('storage_opt', '1.24')
self['StorageOpt'] = storage_opt
if init is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init', '1.25')
self['Init'] = init
if init_path is not None:
if version_lt(version, '1.25'):
raise host_config_version_error('init_path', '1.25')
if version_gte(version, '1.29'):
# https://github.com/moby/moby/pull/32470
raise host_config_version_error('init_path', '1.29', False)
self['InitPath'] = init_path
if volume_driver is not None:
if version_lt(version, '1.21'):
raise host_config_version_error('volume_driver', '1.21')
self['VolumeDriver'] = volume_driver
if cpu_count:
if not isinstance(cpu_count, int):
raise host_config_type_error('cpu_count', cpu_count, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_count', '1.25')
self['CpuCount'] = cpu_count
if cpu_percent:
if not isinstance(cpu_percent, int):
raise host_config_type_error('cpu_percent', cpu_percent, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('cpu_percent', '1.25')
self['CpuPercent'] = cpu_percent
if nano_cpus:
if not isinstance(nano_cpus, six.integer_types):
raise host_config_type_error('nano_cpus', nano_cpus, 'int')
if version_lt(version, '1.25'):
raise host_config_version_error('nano_cpus', '1.25')
self['NanoCpus'] = nano_cpus
if runtime:
if version_lt(version, '1.25'):
raise host_config_version_error('runtime', '1.25')
self['Runtime'] = runtime
def host_config_type_error(param, param_value, expected):
error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
return TypeError(error_msg.format(param, expected, type(param_value)))
def host_config_version_error(param, version, less_than=True):
operator = '<' if less_than else '>'
error_msg = '{0} param is not supported in API versions {1} {2}'
return errors.InvalidVersion(error_msg.format(param, operator, version))
def host_config_value_error(param, param_value):
error_msg = 'Invalid value for {0} param: {1}'
return ValueError(error_msg.format(param, param_value))
class ContainerConfig(dict):
def __init__(
self, version, image, command, hostname=None, user=None, detach=False,
stdin_open=False, tty=False, mem_limit=None, ports=None, dns=None,
environment=None, volumes=None, volumes_from=None,
network_disabled=False, entrypoint=None, cpu_shares=None,
working_dir=None, domainname=None, memswap_limit=None, cpuset=None,
host_config=None, mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None, healthcheck=None,
stop_timeout=None, runtime=None
):
if version_gte(version, '1.10'):
message = ('{0!r} parameter has no effect on create_container().'
' It has been moved to host_config')
if dns is not None:
raise errors.InvalidVersion(message.format('dns'))
if volumes_from is not None:
raise errors.InvalidVersion(message.format('volumes_from'))
if version_lt(version, '1.18'):
if labels is not None:
raise errors.InvalidVersion(
'labels were only introduced in API version 1.18'
)
else:
if cpuset is not None or cpu_shares is not None:
warnings.warn(
'The cpuset_cpus and cpu_shares options have been moved to'
' host_config in API version 1.18, and will be removed',
DeprecationWarning
)
if version_lt(version, '1.19'):
if volume_driver is not None:
raise errors.InvalidVersion(
'Volume drivers were only introduced in API version 1.19'
)
mem_limit = mem_limit if mem_limit is not None else 0
memswap_limit = memswap_limit if memswap_limit is not None else 0
else:
if mem_limit is not None:
raise errors.InvalidVersion(
'mem_limit has been moved to host_config in API version'
' 1.19'
)
if memswap_limit is not None:
raise errors.InvalidVersion(
'memswap_limit has been moved to host_config in API '
'version 1.19'
)
if version_lt(version, '1.21'):
if stop_signal is not None:
raise errors.InvalidVersion(
'stop_signal was only introduced in API version 1.21'
)
else:
if volume_driver is not None:
warnings.warn(
'The volume_driver option has been moved to'
' host_config in API version 1.21, and will be removed',
DeprecationWarning
)
if stop_timeout is not None and version_lt(version, '1.25'):
raise errors.InvalidVersion(
'stop_timeout was only introduced in API version 1.25'
)
if healthcheck is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'Health options were only introduced in API version 1.24'
)
if version_lt(version, '1.29') and 'StartPeriod' in healthcheck:
raise errors.InvalidVersion(
'healthcheck start period was introduced in API '
'version 1.29'
)
if isinstance(command, six.string_types):
command = split_command(command)
if isinstance(entrypoint, six.string_types):
entrypoint = split_command(entrypoint)
if isinstance(environment, dict):
environment = format_environment(environment)
if isinstance(labels, list):
labels = dict((lbl, six.text_type('')) for lbl in labels)
if mem_limit is not None:
mem_limit = parse_bytes(mem_limit)
if memswap_limit is not None:
memswap_limit = parse_bytes(memswap_limit)
if isinstance(ports, list):
exposed_ports = {}
for port_definition in ports:
port = port_definition
proto = 'tcp'
if isinstance(port_definition, tuple):
if len(port_definition) == 2:
proto = port_definition[1]
port = port_definition[0]
exposed_ports['{0}/{1}'.format(port, proto)] = {}
ports = exposed_ports
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if isinstance(volumes, list):
volumes_dict = {}
for vol in volumes:
volumes_dict[vol] = {}
volumes = volumes_dict
if volumes_from:
if not isinstance(volumes_from, six.string_types):
volumes_from = ','.join(volumes_from)
else:
# Force None, an empty list or dict causes client.start to fail
volumes_from = None
if healthcheck and isinstance(healthcheck, dict):
healthcheck = Healthcheck(**healthcheck)
attach_stdin = False
attach_stdout = False
attach_stderr = False
stdin_once = False
if not detach:
attach_stdout = True
attach_stderr = True
if stdin_open:
attach_stdin = True
stdin_once = True
self.update({
'Hostname': hostname,
'Domainname': domainname,
'ExposedPorts': ports,
'User': six.text_type(user) if user else None,
'Tty': tty,
'OpenStdin': stdin_open,
'StdinOnce': stdin_once,
'Memory': mem_limit,
'AttachStdin': attach_stdin,
'AttachStdout': attach_stdout,
'AttachStderr': attach_stderr,
'Env': environment,
'Cmd': command,
'Dns': dns,
'Image': image,
'Volumes': volumes,
'VolumesFrom': volumes_from,
'NetworkDisabled': network_disabled,
'Entrypoint': entrypoint,
'CpuShares': cpu_shares,
'Cpuset': cpuset,
'CpusetCpus': cpuset,
'WorkingDir': working_dir,
'MemorySwap': memswap_limit,
'HostConfig': host_config,
'NetworkingConfig': networking_config,
'MacAddress': mac_address,
'Labels': labels,
'VolumeDriver': volume_driver,
'StopSignal': stop_signal,
'Healthcheck': healthcheck,
'StopTimeout': stop_timeout,
'Runtime': runtime
})
docker-2.5.1/docker/types/services.py 0000664 0001750 0001750 00000042543 13142163435 020744 0 ustar joffrey joffrey 0000000 0000000 import six
from .. import errors
from ..constants import IS_WINDOWS_PLATFORM
from ..utils import check_resource, format_environment, split_command
class TaskTemplate(dict):
"""
Describe the task specification to be used when creating or updating a
service.
Args:
container_spec (ContainerSpec): Container settings for containers
started as part of this task.
log_driver (DriverConfig): Log configuration for containers created as
part of the service.
resources (Resources): Resource requirements which apply to each
individual container created as part of the service.
restart_policy (RestartPolicy): Specification for the restart policy
which applies to containers created as part of this service.
placement (Placement): Placement instructions for the scheduler.
If a list is passed instead, it is assumed to be a list of
constraints as part of a :py:class:`Placement` object.
force_update (int): A counter that triggers an update even if no
relevant parameters have been changed.
"""
def __init__(self, container_spec, resources=None, restart_policy=None,
placement=None, log_driver=None, force_update=None):
self['ContainerSpec'] = container_spec
if resources:
self['Resources'] = resources
if restart_policy:
self['RestartPolicy'] = restart_policy
if placement:
if isinstance(placement, list):
placement = Placement(constraints=placement)
self['Placement'] = placement
if log_driver:
self['LogDriver'] = log_driver
if force_update is not None:
if not isinstance(force_update, int):
raise TypeError('force_update must be an integer')
self['ForceUpdate'] = force_update
@property
def container_spec(self):
return self.get('ContainerSpec')
@property
def resources(self):
return self.get('Resources')
@property
def restart_policy(self):
return self.get('RestartPolicy')
@property
def placement(self):
return self.get('Placement')
class ContainerSpec(dict):
"""
Describes the behavior of containers that are part of a task, and is used
when declaring a :py:class:`~docker.types.TaskTemplate`.
Args:
image (string): The image name to use for the container.
command (string or list): The command to be run in the image.
args (:py:class:`list`): Arguments to the command.
hostname (string): The hostname to set on the container.
env (dict): Environment variables.
dir (string): The working directory for commands to run in.
user (string): The user inside the container.
labels (dict): A map of labels to associate with the service.
mounts (:py:class:`list`): A list of specifications for mounts to be
added to containers created as part of the service. See the
:py:class:`~docker.types.Mount` class for details.
stop_grace_period (int): Amount of time to wait for the container to
terminate before forcefully killing it.
secrets (list of py:class:`SecretReference`): List of secrets to be
made available inside the containers.
tty (boolean): Whether a pseudo-TTY should be allocated.
"""
def __init__(self, image, command=None, args=None, hostname=None, env=None,
workdir=None, user=None, labels=None, mounts=None,
stop_grace_period=None, secrets=None, tty=None):
self['Image'] = image
if isinstance(command, six.string_types):
command = split_command(command)
self['Command'] = command
self['Args'] = args
if hostname is not None:
self['Hostname'] = hostname
if env is not None:
if isinstance(env, dict):
self['Env'] = format_environment(env)
else:
self['Env'] = env
if workdir is not None:
self['Dir'] = workdir
if user is not None:
self['User'] = user
if labels is not None:
self['Labels'] = labels
if mounts is not None:
parsed_mounts = []
for mount in mounts:
if isinstance(mount, six.string_types):
parsed_mounts.append(Mount.parse_mount_string(mount))
else:
# If mount already parsed
parsed_mounts.append(mount)
self['Mounts'] = parsed_mounts
if stop_grace_period is not None:
self['StopGracePeriod'] = stop_grace_period
if secrets is not None:
if not isinstance(secrets, list):
raise TypeError('secrets must be a list')
self['Secrets'] = secrets
if tty is not None:
self['TTY'] = tty
class Mount(dict):
"""
Describes a mounted folder's configuration inside a container. A list of
:py:class:`Mount` would be used as part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
target (string): Container path.
source (string): Mount source (e.g. a volume name or a host path).
type (string): The mount type (``bind`` or ``volume``).
Default: ``volume``.
read_only (bool): Whether the mount should be read-only.
propagation (string): A propagation mode with the value ``[r]private``,
``[r]shared``, or ``[r]slave``. Only valid for the ``bind`` type.
no_copy (bool): False if the volume should be populated with the data
from the target. Default: ``False``. Only valid for the ``volume``
type.
labels (dict): User-defined name and labels for the volume. Only valid
for the ``volume`` type.
driver_config (DriverConfig): Volume driver configuration. Only valid
for the ``volume`` type.
"""
def __init__(self, target, source, type='volume', read_only=False,
propagation=None, no_copy=False, labels=None,
driver_config=None):
self['Target'] = target
self['Source'] = source
if type not in ('bind', 'volume'):
raise errors.InvalidArgument(
'Only acceptable mount types are `bind` and `volume`.'
)
self['Type'] = type
self['ReadOnly'] = read_only
if type == 'bind':
if propagation is not None:
self['BindOptions'] = {
'Propagation': propagation
}
if any([labels, driver_config, no_copy]):
raise errors.InvalidArgument(
'Mount type is binding but volume options have been '
'provided.'
)
else:
volume_opts = {}
if no_copy:
volume_opts['NoCopy'] = True
if labels:
volume_opts['Labels'] = labels
if driver_config:
volume_opts['DriverConfig'] = driver_config
if volume_opts:
self['VolumeOptions'] = volume_opts
if propagation:
raise errors.InvalidArgument(
'Mount type is volume but `propagation` argument has been '
'provided.'
)
@classmethod
def parse_mount_string(cls, string):
parts = string.split(':')
if len(parts) > 3:
raise errors.InvalidArgument(
'Invalid mount format "{0}"'.format(string)
)
if len(parts) == 1:
return cls(target=parts[0], source=None)
else:
target = parts[1]
source = parts[0]
mount_type = 'volume'
if source.startswith('/') or (
IS_WINDOWS_PLATFORM and source[0].isalpha() and
source[1] == ':'
):
# FIXME: That windows condition will fail earlier since we
# split on ':'. We should look into doing a smarter split
# if we detect we are on Windows.
mount_type = 'bind'
read_only = not (len(parts) == 2 or parts[2] == 'rw')
return cls(target, source, read_only=read_only, type=mount_type)
class Resources(dict):
"""
Configures resource allocation for containers when made part of a
:py:class:`~docker.types.ContainerSpec`.
Args:
cpu_limit (int): CPU limit in units of 10^9 CPU shares.
mem_limit (int): Memory limit in Bytes.
cpu_reservation (int): CPU reservation in units of 10^9 CPU shares.
mem_reservation (int): Memory reservation in Bytes.
"""
def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
mem_reservation=None):
limits = {}
reservation = {}
if cpu_limit is not None:
limits['NanoCPUs'] = cpu_limit
if mem_limit is not None:
limits['MemoryBytes'] = mem_limit
if cpu_reservation is not None:
reservation['NanoCPUs'] = cpu_reservation
if mem_reservation is not None:
reservation['MemoryBytes'] = mem_reservation
if limits:
self['Limits'] = limits
if reservation:
self['Reservations'] = reservation
class UpdateConfig(dict):
"""
Used to specify the way container updates should be performed by a service.
Args:
parallelism (int): Maximum number of tasks to be updated in one
iteration (0 means unlimited parallelism). Default: 0.
delay (int): Amount of time between updates.
failure_action (string): Action to take if an updated task fails to
run, or stops running during the update. Acceptable values are
``continue`` and ``pause``. Default: ``continue``
monitor (int): Amount of time to monitor each updated task for
failures, in nanoseconds.
max_failure_ratio (float): The fraction of tasks that may fail during
an update before the failure action is invoked, specified as a
floating point number between 0 and 1. Default: 0
"""
def __init__(self, parallelism=0, delay=None, failure_action='continue',
monitor=None, max_failure_ratio=None):
self['Parallelism'] = parallelism
if delay is not None:
self['Delay'] = delay
if failure_action not in ('pause', 'continue'):
raise errors.InvalidArgument(
'failure_action must be either `pause` or `continue`.'
)
self['FailureAction'] = failure_action
if monitor is not None:
if not isinstance(monitor, int):
raise TypeError('monitor must be an integer')
self['Monitor'] = monitor
if max_failure_ratio is not None:
if not isinstance(max_failure_ratio, (float, int)):
raise TypeError('max_failure_ratio must be a float')
if max_failure_ratio > 1 or max_failure_ratio < 0:
raise errors.InvalidArgument(
'max_failure_ratio must be a number between 0 and 1'
)
self['MaxFailureRatio'] = max_failure_ratio
class RestartConditionTypesEnum(object):
_values = (
'none',
'on-failure',
'any',
)
NONE, ON_FAILURE, ANY = _values
class RestartPolicy(dict):
"""
Used when creating a :py:class:`~docker.types.ContainerSpec`,
dictates whether a container should restart after stopping or failing.
Args:
condition (string): Condition for restart (``none``, ``on-failure``,
or ``any``). Default: `none`.
delay (int): Delay between restart attempts. Default: 0
max_attempts (int): Maximum attempts to restart a given container
before giving up. Default value is 0, which is ignored.
window (int): Time window used to evaluate the restart policy. Default
value is 0, which is unbounded.
"""
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
'Invalid RestartPolicy condition {0}'.format(condition)
)
self['Condition'] = condition
self['Delay'] = delay
self['MaxAttempts'] = max_attempts
self['Window'] = window
class DriverConfig(dict):
"""
Indicates which driver to use, as well as its configuration. Can be used
as ``log_driver`` in a :py:class:`~docker.types.ContainerSpec`,
and for the `driver_config` in a volume
:py:class:`~docker.types.Mount`.
Args:
name (string): Name of the driver to use.
options (dict): Driver-specific options. Default: ``None``.
"""
def __init__(self, name, options=None):
self['Name'] = name
if options:
self['Options'] = options
class EndpointSpec(dict):
"""
Describes properties to access and load-balance a service.
Args:
mode (string): The mode of resolution to use for internal load
balancing between tasks (``'vip'`` or ``'dnsrr'``). Defaults to
``'vip'`` if not provided.
ports (dict): Exposed ports that this service is accessible on from the
outside, in the form of ``{ target_port: published_port }`` or
``{ target_port: (published_port, protocol) }``. Ports can only be
provided if the ``vip`` resolution mode is used.
"""
def __init__(self, mode=None, ports=None):
if ports:
self['Ports'] = convert_service_ports(ports)
if mode:
self['Mode'] = mode
def convert_service_ports(ports):
if isinstance(ports, list):
return ports
if not isinstance(ports, dict):
raise TypeError(
'Invalid type for ports, expected dict or list'
)
result = []
for k, v in six.iteritems(ports):
port_spec = {
'Protocol': 'tcp',
'PublishedPort': k
}
if isinstance(v, tuple):
port_spec['TargetPort'] = v[0]
if len(v) == 2:
port_spec['Protocol'] = v[1]
else:
port_spec['TargetPort'] = v
result.append(port_spec)
return result
class ServiceMode(dict):
"""
Indicate whether a service should be deployed as a replicated or global
service, and associated parameters
Args:
mode (string): Can be either ``replicated`` or ``global``
replicas (int): Number of replicas. For replicated services only.
"""
def __init__(self, mode, replicas=None):
if mode not in ('replicated', 'global'):
raise errors.InvalidArgument(
'mode must be either "replicated" or "global"'
)
if mode != 'replicated' and replicas is not None:
raise errors.InvalidArgument(
'replicas can only be used for replicated mode'
)
self[mode] = {}
if replicas is not None:
self[mode]['Replicas'] = replicas
@property
def mode(self):
if 'global' in self:
return 'global'
return 'replicated'
@property
def replicas(self):
if self.mode != 'replicated':
return None
return self['replicated'].get('Replicas')
class SecretReference(dict):
"""
Secret reference to be used as part of a :py:class:`ContainerSpec`.
Describes how a secret is made accessible inside the service's
containers.
Args:
secret_id (string): Secret's ID
secret_name (string): Secret's name as defined at its creation.
filename (string): Name of the file containing the secret. Defaults
to the secret's name if not specified.
uid (string): UID of the secret file's owner. Default: 0
gid (string): GID of the secret file's group. Default: 0
mode (int): File access mode inside the container. Default: 0o444
"""
@check_resource('secret_id')
def __init__(self, secret_id, secret_name, filename=None, uid=None,
gid=None, mode=0o444):
self['SecretName'] = secret_name
self['SecretID'] = secret_id
self['File'] = {
'Name': filename or secret_name,
'UID': uid or '0',
'GID': gid or '0',
'Mode': mode
}
class Placement(dict):
"""
Placement constraints to be used as part of a :py:class:`TaskTemplate`
Args:
constraints (list): A list of constraints
preferences (list): Preferences provide a way to make the
scheduler aware of factors such as topology. They are provided
in order from highest to lowest precedence.
platforms (list): A list of platforms expressed as ``(arch, os)``
tuples
"""
def __init__(self, constraints=None, preferences=None, platforms=None):
if constraints is not None:
self['Constraints'] = constraints
if preferences is not None:
self['Preferences'] = preferences
if platforms:
self['Platforms'] = []
for plat in platforms:
self['Platforms'].append({
'Architecture': plat[0], 'OS': plat[1]
})
docker-2.5.1/docker/types/networks.py 0000664 0001750 0001750 00000006765 13106703752 021005 0 ustar joffrey joffrey 0000000 0000000 from .. import errors
from ..utils import normalize_links, version_lt
class EndpointConfig(dict):
def __init__(self, version, aliases=None, links=None, ipv4_address=None,
ipv6_address=None, link_local_ips=None):
if version_lt(version, '1.22'):
raise errors.InvalidVersion(
'Endpoint config is not supported for API version < 1.22'
)
if aliases:
self["Aliases"] = aliases
if links:
self["Links"] = normalize_links(links)
ipam_config = {}
if ipv4_address:
ipam_config['IPv4Address'] = ipv4_address
if ipv6_address:
ipam_config['IPv6Address'] = ipv6_address
if link_local_ips is not None:
if version_lt(version, '1.24'):
raise errors.InvalidVersion(
'link_local_ips is not supported for API version < 1.24'
)
ipam_config['LinkLocalIPs'] = link_local_ips
if ipam_config:
self['IPAMConfig'] = ipam_config
class NetworkingConfig(dict):
def __init__(self, endpoints_config=None):
if endpoints_config:
self["EndpointsConfig"] = endpoints_config
class IPAMConfig(dict):
"""
Create an IPAM (IP Address Management) config dictionary to be used with
:py:meth:`~docker.api.network.NetworkApiMixin.create_network`.
Args:
driver (str): The IPAM driver to use. Defaults to ``default``.
pool_configs (:py:class:`list`): A list of pool configurations
(:py:class:`~docker.types.IPAMPool`). Defaults to empty list.
options (dict): Driver options as a key-value dictionary.
Defaults to `None`.
Example:
>>> ipam_config = docker.types.IPAMConfig(driver='default')
>>> network = client.create_network('network1', ipam=ipam_config)
"""
def __init__(self, driver='default', pool_configs=None, options=None):
self.update({
'Driver': driver,
'Config': pool_configs or []
})
if options:
if not isinstance(options, dict):
raise TypeError('IPAMConfig options must be a dictionary')
self['Options'] = options
class IPAMPool(dict):
"""
Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:class:`~docker.types.IPAMConfig`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Example:
>>> ipam_pool = docker.types.IPAMPool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool])
"""
def __init__(self, subnet=None, iprange=None, gateway=None,
aux_addresses=None):
self.update({
'Subnet': subnet,
'IPRange': iprange,
'Gateway': gateway,
'AuxiliaryAddresses': aux_addresses
})
docker-2.5.1/docker/types/swarm.py 0000664 0001750 0001750 00000003147 13023617644 020253 0 ustar joffrey joffrey 0000000 0000000 class SwarmSpec(dict):
def __init__(self, task_history_retention_limit=None,
snapshot_interval=None, keep_old_snapshots=None,
log_entries_for_slow_followers=None, heartbeat_tick=None,
election_tick=None, dispatcher_heartbeat_period=None,
node_cert_expiry=None, external_ca=None, name=None):
if task_history_retention_limit is not None:
self['Orchestration'] = {
'TaskHistoryRetentionLimit': task_history_retention_limit
}
if any([snapshot_interval,
keep_old_snapshots,
log_entries_for_slow_followers,
heartbeat_tick,
election_tick]):
self['Raft'] = {
'SnapshotInterval': snapshot_interval,
'KeepOldSnapshots': keep_old_snapshots,
'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
'HeartbeatTick': heartbeat_tick,
'ElectionTick': election_tick
}
if dispatcher_heartbeat_period:
self['Dispatcher'] = {
'HeartbeatPeriod': dispatcher_heartbeat_period
}
if node_cert_expiry or external_ca:
self['CAConfig'] = {
'NodeCertExpiry': node_cert_expiry,
'ExternalCA': external_ca
}
if name is not None:
self['Name'] = name
class SwarmExternalCA(dict):
def __init__(self, url, protocol=None, options=None):
self['URL'] = url
self['Protocol'] = protocol
self['Options'] = options
docker-2.5.1/docker/types/base.py 0000664 0001750 0001750 00000000202 13021666666 020027 0 ustar joffrey joffrey 0000000 0000000 import six
class DictType(dict):
def __init__(self, init):
for k, v in six.iteritems(init):
self[k] = v
docker-2.5.1/docker/types/__init__.py 0000664 0001750 0001750 00000000650 13124577310 020652 0 ustar joffrey joffrey 0000000 0000000 # flake8: noqa
from .containers import ContainerConfig, HostConfig, LogConfig, Ulimit
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ContainerSpec, DriverConfig, EndpointSpec, Mount, Placement, Resources,
RestartPolicy, SecretReference, ServiceMode, TaskTemplate, UpdateConfig
)
from .swarm import SwarmSpec, SwarmExternalCA
docker-2.5.1/docker/transport/ 0000775 0001750 0001750 00000000000 13147142650 017430 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/transport/ssladapter.py 0000664 0001750 0001750 00000004750 13142163435 022151 0 ustar joffrey joffrey 0000000 0000000 """ Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
import sys
from distutils.version import StrictVersion
from requests.adapters import HTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
# Monkey-patching match_hostname with a version that supports
# IP-address checking. Not necessary for Python 3.5 and above
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
from backports.ssl_match_hostname import match_hostname
urllib3.connection.match_hostname = match_hostname
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
'assert_hostname',
'ssl_version']
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return StrictVersion(urllib_ver) > StrictVersion('1.5')
docker-2.5.1/docker/transport/npipesocket.py 0000664 0001750 0001750 00000013341 13106703727 022333 0 ustar joffrey joffrey 0000000 0000000 import functools
import io
import six
import win32file
import win32pipe
cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
RETRY_WAIT_TIMEOUT = 10000
def check_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self._closed:
raise RuntimeError(
'Can not reuse socket after connection was closed.'
)
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket(object):
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address):
win32pipe.WaitNamedPipe(address, self._timeout)
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
return self.connect(address)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
@check_closed
def fileno(self):
return int(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
if six.PY2:
return self._recv_into_py2(buf, nbytes)
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf
)
return len(data)
def _recv_into_py2(self, buf, nbytes):
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
n = len(data)
buf[:n] = data
return n
@check_closed
def send(self, string, flags=0):
err, nbytes = win32file.WriteFile(self._handle, string)
return nbytes
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
elif value == 0:
# Non-blocking mode
self._timeout = win32pipe.NMPWAIT_NO_WAIT
else:
# Timeout mode - Value converted to milliseconds
self._timeout = value * 1000
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket):
self.sock = npipe_socket
def close(self):
super(NpipeFileIOBase, self).close()
self.sock = None
def fileno(self):
return self.sock.fileno()
def isatty(self):
return False
def readable(self):
return True
def readinto(self, buf):
return self.sock.recv_into(buf)
def seekable(self):
return False
def writable(self):
return False
docker-2.5.1/docker/transport/unixconn.py 0000664 0001750 0001750 00000005526 13142163435 021652 0 ustar joffrey joffrey 0000000 0000000 import six
import requests.adapters
import socket
from .. import constants
if six.PY3:
import http.client as httplib
else:
import httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super(UnixHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(
self.base_url, self.socket_path, self.timeout
)
class UnixAdapter(requests.adapters.HTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(UnixAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url
def close(self):
self.pools.clear()
docker-2.5.1/docker/transport/__init__.py 0000664 0001750 0001750 00000000307 13106703737 021545 0 ustar joffrey joffrey 0000000 0000000 # flake8: noqa
from .unixconn import UnixAdapter
from .ssladapter import SSLAdapter
try:
from .npipeconn import NpipeAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
docker-2.5.1/docker/transport/npipeconn.py 0000664 0001750 0001750 00000006647 13142163435 022007 0 ustar joffrey joffrey 0000000 0000000 import six
import requests.adapters
from .. import constants
from .npipesocket import NpipeSocket
if six.PY3:
import http.client as httplib
else:
import httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, npipe_path, timeout=60):
super(NpipeHTTPConnection, self).__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self):
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super(NpipeHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except six.moves.queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
pass # Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeAdapter(requests.adapters.HTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(NpipeAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url
def close(self):
self.pools.clear()
docker-2.5.1/docker/tls.py 0000664 0001750 0001750 00000006140 13106703745 016554 0 ustar joffrey joffrey 0000000 0000000 import os
import ssl
from . import errors
from .transport import SSLAdapter
class TLSConfig(object):
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be ``False`` or a path to a CA cert
file.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
cert = None
ca_cert = None
verify = None
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving tls_verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
# ways: https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_config must be a tuple of'
' (client certificate, key file)'
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_config param'
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `tls_ca_cert`.'
)
def configure_client(self, client):
"""
Configure a client with these TLS options.
"""
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
client.mount('https://', SSLAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
))
docker-2.5.1/docker/utils/ 0000775 0001750 0001750 00000000000 13147142650 016534 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/utils/ports.py 0000664 0001750 0001750 00000005354 13147140407 020262 0 ustar joffrey joffrey 0000000 0000000 import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
"((?P[a-fA-F\d.:]+):)?" # Address
"(?P[\d]*)(-(?P[\d]+))?:" # External range
")?"
"(?P[\d]+)(-(?P[\d]+))?" # Internal range
"(?P/(udp|tcp))?" # Protocol
"$" # Match full string
)
def add_port_mapping(port_bindings, internal_port, external):
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(port_bindings, internal_port_range, external_range):
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
ports = zip(internal_port_range, external_range)
for internal_port, external_port in ports:
add_port_mapping(port_bindings, internal_port, external_port)
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def _raise_invalid_port(port):
raise ValueError('Invalid port "%s", should be '
'[[remote_ip:]remote_port[-remote_port]:]'
'port[/protocol]' % port)
def port_range(start, end, proto, randomly_available_port=False):
if not start:
return start
if not end:
return [start + proto]
if randomly_available_port:
return ['{}-{}'.format(start, end) + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
def split_port(port):
if hasattr(port, 'legacy_repr'):
# This is the worst hack, but it prevents a bug in Compose 1.14.0
# https://github.com/docker/docker-py/issues/1668
# TODO: remove once fixed in Compose stable
port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
parts = match.groupdict()
host = parts['host']
proto = parts['proto'] or ''
internal = port_range(parts['int'], parts['int_end'], proto)
external = port_range(
parts['ext'], parts['ext_end'], '', len(internal) == 1)
if host is None:
if external is not None and len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, external
else:
if not external:
external = [None] * len(internal)
elif len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, [(host, ext_port) for ext_port in external]
docker-2.5.1/docker/utils/utils.py 0000664 0001750 0001750 00000037070 13106703755 020261 0 ustar joffrey joffrey 0000000 0000000 import base64
import io
import os
import os.path
import json
import shlex
import tarfile
import tempfile
import warnings
from distutils.version import StrictVersion
from datetime import datetime
import requests
import six
from .. import constants
from .. import errors
from .. import tls
if six.PY2:
from urllib import splitnport
else:
from urllib.parse import splitnport
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
def create_ipam_pool(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_pool has been removed. Please use a '
'docker.types.IPAMPool object instead.'
)
def create_ipam_config(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_config has been removed. Please use a '
'docker.types.IPAMConfig object instead.'
)
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if six.PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def decode_json_header(header):
data = base64.b64decode(header)
if six.PY3:
data = data.decode('utf-8')
return json.loads(data)
def build_file_list(root):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(
longpath.replace(root, '', 1).lstrip('/')
)
return files
def create_archive(root, files=None, fileobj=None, gzip=False):
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
for path in files:
i = t.gettarinfo(os.path.join(root, path), arcname=path)
if i is None:
# This happens when we encounter a socket file. We can safely
# ignore it and proceed.
continue
if constants.IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
try:
# We open the file object in binary mode for Windows support.
with open(os.path.join(root, path), 'rb') as f:
t.addfile(i, f)
except IOError:
# When we encounter a directory the file object is set to None.
t.addfile(i, None)
t.close()
fileobj.seek(0)
return fileobj
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def ping_registry(url):
warnings.warn(
'The `ping_registry` method is deprecated and will be removed.',
DeprecationWarning
)
return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
def ping(url, valid_4xx_statuses=None):
try:
res = requests.get(url, timeout=3)
except Exception:
return False
else:
# We don't send yet auth headers
# and a v2 registry will respond with status 401
return (
res.status_code < 400 or
(valid_4xx_statuses and res.status_code in valid_4xx_statuses)
)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], six.string_types):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in six.iteritems(port_bindings):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, six.binary_type):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, six.binary_type):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
six.text_type('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, six.binary_type):
v = v.decode('utf-8')
result.append(
six.text_type('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, six.string_types):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, six.string_types):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
# fd:// protocol unsupported (for obvious reasons)
# Added support for http and https
# Protocol translation: tcp -> http, unix -> http+unix
def parse_host(addr, is_win32=False, tls=False):
proto = "http+unix"
port = None
path = ''
if not addr and is_win32:
addr = DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
if addr.startswith('http://'):
addr = addr.replace('http://', 'tcp://')
if addr.startswith('http+unix://'):
addr = addr.replace('http+unix://', 'unix://')
if addr == 'tcp://':
raise errors.DockerException(
"Invalid bind address format: {0}".format(addr)
)
elif addr.startswith('unix://'):
addr = addr[7:]
elif addr.startswith('tcp://'):
proto = 'http{0}'.format('s' if tls else '')
addr = addr[6:]
elif addr.startswith('https://'):
proto = "https"
addr = addr[8:]
elif addr.startswith('npipe://'):
proto = 'npipe'
addr = addr[8:]
elif addr.startswith('fd://'):
raise errors.DockerException("fd protocol is not implemented")
else:
if "://" in addr:
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
proto = "https" if tls else "http"
if proto in ("http", "https"):
address_parts = addr.split('/', 1)
host = address_parts[0]
if len(address_parts) == 2:
path = '/' + address_parts[1]
host, port = splitnport(host)
if port is None:
raise errors.DockerException(
"Invalid port: {0}".format(addr)
)
if not host:
host = DEFAULT_HTTP_HOST
else:
host = addr
if proto in ("http", "https") and port == -1:
raise errors.DockerException(
"Bind address needs a port: {0}".format(addr))
if proto == "http+unix" or proto == 'npipe':
return "{0}://{1}".format(proto, host).rstrip('/')
return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, six.string_types):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = (
host.replace('tcp://', 'https://') if enable_tls else host
)
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = tls.TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in six.iteritems(filters):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = v
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, six.integer_types + (float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = int(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def normalize_links(links):
if isinstance(links, dict):
links = six.iteritems(links)
return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
line = line.strip()
if not line:
continue
parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
if six.PY2 and not isinstance(command, six.binary_type):
command = command.encode('utf-8')
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in six.iteritems(environment)]
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
'docker.types.HostConfig object instead.'
)
docker-2.5.1/docker/utils/build.py 0000664 0001750 0001750 00000011763 13145377337 020227 0 ustar joffrey joffrey 0000000 0000000 import os
from ..constants import IS_WINDOWS_PLATFORM
from .fnmatch import fnmatch
from .utils import create_archive
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
root = os.path.abspath(path)
exclude = exclude or []
return create_archive(
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
root=root, fileobj=fileobj, gzip=gzip
)
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
patterns = [p.lstrip('/') for p in patterns]
exceptions = [p for p in patterns if p.startswith('!')]
include_patterns = [p[1:] for p in exceptions]
include_patterns += [dockerfile, '.dockerignore']
exclude_patterns = list(set(patterns) - set(exceptions))
paths = get_paths(root, exclude_patterns, include_patterns,
has_exceptions=len(exceptions) > 0)
return set(paths).union(
# If the Dockerfile is in a subdirectory that is excluded, get_paths
# will not descend into it and the file will be skipped. This ensures
# it doesn't happen.
set([dockerfile.replace('/', os.path.sep)])
if os.path.exists(os.path.join(root, dockerfile)) else set()
)
def should_include(path, exclude_patterns, include_patterns):
"""
Given a path, a list of exclude patterns, and a list of inclusion patterns:
1. Returns True if the path doesn't match any exclusion pattern
2. Returns False if the path matches an exclusion pattern and doesn't match
an inclusion pattern
3. Returns true if the path matches an exclusion pattern and matches an
inclusion pattern
"""
for pattern in exclude_patterns:
if match_path(path, pattern):
for pattern in include_patterns:
if match_path(path, pattern):
return True
return False
return True
def should_check_directory(directory_path, exclude_patterns, include_patterns):
"""
Given a directory path, a list of exclude patterns, and a list of inclusion
patterns:
1. Returns True if the directory path should be included according to
should_include.
2. Returns True if the directory path is the prefix for an inclusion
pattern
3. Returns False otherwise
"""
# To account for exception rules, check directories if their path is a
# a prefix to an inclusion pattern. This logic conforms with the current
# docker logic (2016-10-27):
# https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671
def normalize_path(path):
return path.replace(os.path.sep, '/')
path_with_slash = normalize_path(directory_path) + '/'
possible_child_patterns = [
pattern for pattern in map(normalize_path, include_patterns)
if (pattern + '/').startswith(path_with_slash)
]
directory_included = should_include(
directory_path, exclude_patterns, include_patterns
)
return directory_included or len(possible_child_patterns) > 0
def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
paths = []
for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
parent = os.path.relpath(parent, root)
if parent == '.':
parent = ''
# Remove excluded patterns from the list of directories to traverse
# by mutating the dirs we're iterating over.
# This looks strange, but is considered the correct way to skip
# traversal. See https://docs.python.org/2/library/os.html#os.walk
dirs[:] = [
d for d in dirs if should_check_directory(
os.path.join(parent, d), exclude_patterns, include_patterns
)
]
for path in dirs:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
for path in files:
if should_include(os.path.join(parent, path),
exclude_patterns, include_patterns):
paths.append(os.path.join(parent, path))
return paths
def match_path(path, pattern):
pattern = pattern.rstrip('/' + os.path.sep)
if pattern:
pattern = os.path.relpath(pattern)
pattern_components = pattern.split(os.path.sep)
if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM:
pattern_components = pattern.split('/')
if '**' not in pattern:
path_components = path.split(os.path.sep)[:len(pattern_components)]
else:
path_components = path.split(os.path.sep)
return fnmatch('/'.join(path_components), '/'.join(pattern_components))
docker-2.5.1/docker/utils/json_stream.py 0000664 0001750 0001750 00000004402 13035555261 021434 0 ustar joffrey joffrey 0000000 0000000 from __future__ import absolute_import
from __future__ import unicode_literals
import json
import json.decoder
import six
from ..errors import StreamParseError
json_decoder = json.JSONDecoder()
def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace')
yield data
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator=u'\n'):
index = buffer.find(six.text_type(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type('')
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e)
docker-2.5.1/docker/utils/decorators.py 0000664 0001750 0001750 00000003004 13145161534 021250 0 ustar joffrey joffrey 0000000 0000000 import functools
from .. import errors
from . import utils
def check_resource(resource_name):
def decorator(f):
@functools.wraps(f)
def wrapped(self, resource_id=None, *args, **kwargs):
if resource_id is None and kwargs.get(resource_name):
resource_id = kwargs.pop(resource_name)
if isinstance(resource_id, dict):
resource_id = resource_id.get('Id', resource_id.get('ID'))
if not resource_id:
raise errors.NullResource(
'Resource ID was not provided'
)
return f(self, resource_id, *args, **kwargs)
return wrapped
return decorator
def minimum_version(version):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
'{0} is not available for version < {1}'.format(
f.__name__, version
)
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner
docker-2.5.1/docker/utils/fnmatch.py 0000664 0001750 0001750 00000006344 13147142632 020535 0 ustar joffrey joffrey 0000000 0000000 """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
_MAXCACHE = 100
def _purge():
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
pat = pat.lower()
return fnmatchcase(name, pat)
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = '^'
while i < n:
c = pat[i]
i = i + 1
if c == '*':
if i < n and pat[i] == '*':
# is some flavor of "**"
i = i + 1
# Treat **/ as ** so eat the "/"
if i < n and pat[i] == '/':
i = i + 1
if i >= n:
# is "**EOF" - to align with .gitignore just accept all
res = res + '.*'
else:
# is "**"
# Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s
res = res + '(.*/)?'
else:
# is "*" so map it to anything but "/"
res = res + '[^/]*'
elif c == '?':
# "?" is any char except "/"
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '$'
docker-2.5.1/docker/utils/__init__.py 0000664 0001750 0001750 00000001131 13145161542 020640 0 ustar joffrey joffrey 0000000 0000000 # flake8: noqa
from .build import tar, exclude_paths
from .decorators import check_resource, minimum_version, update_headers
from .utils import (
compare_version, convert_port_bindings, convert_volume_binds,
mkbuildcontext, parse_repository_tag, parse_host,
kwargs_from_env, convert_filters, datetime_to_timestamp,
create_host_config, parse_bytes, ping_registry, parse_env_file, version_lt,
version_gte, decode_json_header, split_command, create_ipam_config,
create_ipam_pool, parse_devices, normalize_links, convert_service_networks,
format_environment, create_archive
)
docker-2.5.1/docker/utils/socket.py 0000664 0001750 0001750 00000004317 13145377337 020415 0 ustar joffrey joffrey 0000000 0000000 import errno
import os
import select
import struct
import six
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
class SocketError(Exception):
pass
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
# wait for data to become available
if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = six.binary_type()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_size(socket):
"""
Returns the size of the next frame of data waiting to be read from socket,
according to the protocol defined here:
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return 0
_, actual = struct.unpack('>BxxxL', data)
return actual
def frames_iter(socket):
"""
Returns a generator of frames read from socket
"""
while True:
n = next_frame_size(socket)
if n == 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield result
def socket_raw_iter(socket):
"""
Returns a generator of data read from the socket.
This is used for non-multiplexed streams.
"""
while True:
result = read(socket)
if len(result) == 0:
# We have reached EOF
return
yield result
docker-2.5.1/docker/errors.py 0000664 0001750 0001750 00000010146 13147140503 017257 0 ustar joffrey joffrey 0000000 0000000 import requests
class DockerException(Exception):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e):
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()['message']
except ValueError:
explanation = response.content.strip()
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
'not found: does not exist or no pull access'
in str(explanation) or
'repository does not exist' in str(explanation)):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation)
class APIError(requests.exceptions.HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
message = super(APIError, self).__str__()
if self.is_client_error():
message = '{0} Client Error: {1}'.format(
self.response.status_code, self.response.reason)
elif self.is_server_error():
message = '{0} Server Error: {1}'.format(
self.response.status_code, self.response.reason)
if self.explanation:
message = '{0} ("{1}")'.format(message, self.explanation)
return message
@property
def status_code(self):
if self.response is not None:
return self.response.status_code
def is_client_error(self):
if self.status_code is None:
return False
return 400 <= self.status_code < 500
def is_server_error(self):
if self.status_code is None:
return False
return 500 <= self.status_code < 600
class NotFound(APIError):
pass
class ImageNotFound(NotFound):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details.")
class NullResource(DockerException, ValueError):
pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(self, container, exit_status, command, image, stderr):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = ": {}".format(stderr) if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
super(ContainerError, self).__init__(msg)
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason
class BuildError(Exception):
pass
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = ["'{}'".format(k) for k in sorted(kwargs)]
text = ["{}() ".format(name)]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
docker-2.5.1/docker/auth.py 0000664 0001750 0001750 00000025110 13145377337 016720 0 ustar joffrey joffrey 0000000 0000000 import base64
import json
import logging
import os
import dockerpycreds
import six
from . import errors
from .constants import IS_WINDOWS_PLATFORM
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
TOKEN_USERNAME = ''
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config()
authcfg = resolve_authconfig(client._auth_configs, registry)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not registry or registry == INDEX_NAME:
registry = 'https://index.docker.io/v1/'
return authconfig.get('credHelpers', {}).get(registry) or authconfig.get(
'credsStore'
)
def resolve_authconfig(authconfig, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the config
with full URLs are stripped down to hostnames before checking for a match.
Returns None if no match was found.
"""
if 'credHelpers' in authconfig or 'credsStore' in authconfig:
store_name = get_credential_store(authconfig, registry)
if store_name is not None:
log.debug(
'Using credentials store "{0}"'.format(store_name)
)
return _resolve_authconfig_credstore(
authconfig, registry, store_name
)
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for {0}".format(repr(registry)))
if registry in authconfig:
log.debug("Found {0}".format(repr(registry)))
return authconfig[registry]
for key, config in six.iteritems(authconfig):
if resolve_index_name(key) == registry:
log.debug("Found {0}".format(repr(key)))
return config
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for {0}".format(repr(registry)))
store = dockerpycreds.Store(credstore_name)
try:
data = store.get(registry)
res = {
'ServerAddress': registry,
}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({
'Username': data['Username'],
'Password': data['Secret'],
})
return res
except dockerpycreds.CredentialsNotFound as e:
log.debug('No entry found')
return None
except dockerpycreds.StoreError as e:
raise errors.DockerException(
'Credentials store error: {0}'.format(repr(e))
)
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, six.string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(registry)
)
# We sometimes fall back to parsing the whole config as if it was
# the auth config by itself, for legacy purposes. In that case, we
# fail silently and return an empty conf if any of the keys is not
# formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(registry)
)
return {}
if 'identitytoken' in entry:
log.debug('Found an IdentityToken entry for registry {0}'.format(
registry
))
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token, skip.
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
def find_config_file(config_path=None):
paths = list(filter(None, [
config_path, # 1
config_path_from_environment(), # 2
os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug("Trying paths: {0}".format(repr(paths)))
for path in paths:
if os.path.exists(path):
log.debug("Found file at path: {0}".format(path))
return path
log.debug("No config file found")
return None
def config_path_from_environment():
config_dir = os.environ.get('DOCKER_CONFIG')
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir():
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get('USERPROFILE', '')
else:
return os.path.expanduser('~')
def load_config(config_path=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment variable >
~/.docker/config.json > ~/.dockercfg
"""
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
data = json.load(f)
res = {}
if data.get('auths'):
log.debug("Found 'auths' section")
res.update(parse_auth(data['auths'], raise_on_error=True))
if data.get('HttpHeaders'):
log.debug("Found 'HttpHeaders' section")
res.update({'HttpHeaders': data['HttpHeaders']})
if data.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': data['credsStore']})
if data.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': data['credHelpers']})
if res:
return res
else:
log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
f.seek(0)
return parse_auth(json.load(f))
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}
except Exception as e:
log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config")
return {}
docker-2.5.1/docker/api/ 0000775 0001750 0001750 00000000000 13147142650 016145 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/api/volume.py 0000664 0001750 0001750 00000012125 13106703746 020033 0 ustar joffrey joffrey 0000000 0000000 from .. import errors
from .. import utils
class VolumeApiMixin(object):
@utils.minimum_version('1.21')
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(dict): Dictionary with list of volume objects as value of the
``Volumes`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> cli.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
u'Name': u'baz'}]}
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.21')
def create_volume(self, name=None, driver=None, driver_opts=None,
labels=None):
"""
Create and register a named volume
Args:
name (str): Name of the volume
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(dict): The created volume reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = cli.create_volume(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
>>> print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'DriverOpts': driver_opts,
}
if labels is not None:
if utils.compare_version('1.23', self._version) < 0:
raise errors.InvalidVersion(
'volume labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
return self._result(self._post_json(url, data=data), True)
@utils.minimum_version('1.21')
def inspect_volume(self, name):
"""
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> cli.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
"""
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def prune_volumes(self, filters=None):
"""
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
@utils.minimum_version('1.21')
def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)
docker-2.5.1/docker/api/build.py 0000664 0001750 0001750 00000030520 13147140743 017617 0 ustar joffrey joffrey 0000000 0000000 import json
import logging
import os
import re
from .. import auth
from .. import constants
from .. import errors
from .. import utils
log = logging.getLogger(__name__)
class BuildApiMixin(object):
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
squash=None):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
stream (bool): *Deprecated for API version > 1.8 (always True)*.
Return a blocking generator you can iterate over to retrieve
build output as it happens
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
remote = context = None
headers = {}
container_limits = container_limits or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
raise errors.DockerException(
'Can not use custom encoding if gzip is enabled'
)
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
'Invalid container_limits key {0}'.format(key)
)
if custom_context:
if not fileobj:
raise TypeError("You must specify fileobj with custom_context")
context = fileobj
elif fileobj is not None:
context = utils.mkbuildcontext(fileobj)
elif path.startswith(('http://', 'https://',
'git://', 'github.com/', 'git@')):
remote = path
elif not os.path.isdir(path):
raise TypeError("You must specify a directory to build in path")
else:
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore, 'r') as f:
exclude = list(filter(bool, f.read().splitlines()))
context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
)
encoding = 'gzip' if gzip else encoding
if utils.compare_version('1.8', self._version) >= 0:
stream = True
if dockerfile and utils.compare_version('1.17', self._version) < 0:
raise errors.InvalidVersion(
'dockerfile was only introduced in API version 1.17'
)
if utils.compare_version('1.19', self._version) < 0:
pull = 1 if pull else 0
u = self._url('/build')
params = {
't': tag,
'remote': remote,
'q': quiet,
'nocache': nocache,
'rm': rm,
'forcerm': forcerm,
'pull': pull,
'dockerfile': dockerfile,
}
params.update(container_limits)
if buildargs:
if utils.version_gte(self._version, '1.21'):
params.update({'buildargs': json.dumps(buildargs)})
else:
raise errors.InvalidVersion(
'buildargs was only introduced in API version 1.21'
)
if shmsize:
if utils.version_gte(self._version, '1.22'):
params.update({'shmsize': shmsize})
else:
raise errors.InvalidVersion(
'shmsize was only introduced in API version 1.22'
)
if labels:
if utils.version_gte(self._version, '1.23'):
params.update({'labels': json.dumps(labels)})
else:
raise errors.InvalidVersion(
'labels was only introduced in API version 1.23'
)
if cache_from:
if utils.version_gte(self._version, '1.25'):
params.update({'cachefrom': json.dumps(cache_from)})
else:
raise errors.InvalidVersion(
'cache_from was only introduced in API version 1.25'
)
if target:
if utils.version_gte(self._version, '1.29'):
params.update({'target': target})
else:
raise errors.InvalidVersion(
'target was only introduced in API version 1.29'
)
if network_mode:
if utils.version_gte(self._version, '1.25'):
params.update({'networkmode': network_mode})
else:
raise errors.InvalidVersion(
'network_mode was only introduced in API version 1.25'
)
if squash:
if utils.version_gte(self._version, '1.25'):
params.update({'squash': squash})
else:
raise errors.InvalidVersion(
'squash was only introduced in API version 1.25'
)
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
if utils.compare_version('1.9', self._version) >= 0:
self._set_auth_headers(headers)
response = self._post(
u,
data=context,
params=params,
headers=headers,
stream=stream,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
if stream:
return self._stream_helper(response, decode=decode)
else:
output = self._result(response)
srch = r'Successfully built ([0-9a-f]+)'
match = re.search(srch, output)
if not match:
return None, output
return match.group(1), output
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config()
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
auth_data = {}
if self._auth_configs.get('credsStore'):
# Using a credentials store, we need to retrieve the
# credentials for each registry listed in the config.json file
# Matches CLI behavior: https://github.com/docker/docker/blob/
# 67b85f9d26f1b0b2b240f2d794748fac0f45243c/cliconfig/
# credentials/native_store.go#L68-L83
for registry in self._auth_configs.keys():
if registry == 'credsStore' or registry == 'HttpHeaders':
continue
auth_data[registry] = auth.resolve_authconfig(
self._auth_configs, registry
)
else:
auth_data = self._auth_configs.copy()
# See https://github.com/docker/docker-py/issues/1683
if auth.INDEX_NAME in auth_data:
auth_data[auth.INDEX_URL] = auth_data[auth.INDEX_NAME]
log.debug(
'Sending auth config ({0})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
if utils.compare_version('1.19', self._version) >= 0:
headers['X-Registry-Config'] = auth.encode_header(
auth_data
)
else:
headers['X-Registry-Config'] = auth.encode_header({
'configs': auth_data
})
else:
log.debug('No auth config found')
docker-2.5.1/docker/api/exec_api.py 0000664 0001750 0001750 00000012104 13145377337 020304 0 ustar joffrey joffrey 0000000 0000000 import six
from .. import errors
from .. import utils
class ExecApiMixin(object):
@utils.minimum_version('1.15')
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
environment=None):
"""
Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if privileged and utils.version_lt(self._version, '1.19'):
raise errors.InvalidVersion(
'Privileged exec is not supported in API < 1.19'
)
if user and utils.version_lt(self._version, '1.19'):
raise errors.InvalidVersion(
'User-specific exec is not supported in API < 1.19'
)
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25'
)
if isinstance(cmd, six.string_types):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd,
'Env': environment,
}
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
@utils.minimum_version('1.16')
def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
@utils.minimum_version('1.15')
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.minimum_version('1.15')
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False):
"""
Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Stream response data. Default: False
Returns:
(generator or str): If ``stream=True``, a generator yielding
response chunks. A string containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# we want opened socket if socket == True
data = {
'Tty': tty,
'Detach': detach
}
headers = {} if detach else {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
res = self._post_json(
self._url('/exec/{0}/start', exec_id),
headers=headers,
data=data,
stream=True
)
if detach:
return self._result(res)
if socket:
return self._get_raw_response_socket(res)
return self._read_from_socket(res, stream, tty)
docker-2.5.1/docker/api/service.py 0000664 0001750 0001750 00000031200 13145377337 020165 0 ustar joffrey joffrey 0000000 0000000 import warnings
from .. import auth, errors, utils
from ..types import ServiceMode
def _check_api_features(version, task_template, update_config):
if update_config is not None:
if utils.version_lt(version, '1.25'):
if 'MaxFailureRatio' in update_config:
raise errors.InvalidVersion(
'UpdateConfig.max_failure_ratio is not supported in'
' API version < 1.25'
)
if 'Monitor' in update_config:
raise errors.InvalidVersion(
'UpdateConfig.monitor is not supported in'
' API version < 1.25'
)
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
raise errors.InvalidVersion(
'force_update is not supported in API version < 1.25'
)
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
if task_template['Placement'].get('Platforms'):
raise errors.InvalidVersion(
'Placement.platforms is not supported in'
' API version < 1.30'
)
if utils.version_lt(version, '1.27'):
if task_template['Placement'].get('Preferences'):
raise errors.InvalidVersion(
'Placement.preferences is not supported in'
' API version < 1.27'
)
if task_template.get('ContainerSpec', {}).get('TTY'):
if utils.version_lt(version, '1.25'):
raise errors.InvalidVersion(
'ContainerSpec.TTY is not supported in API version < 1.25'
)
class ServiceApiMixin(object):
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None
):
"""
Create a service.
Args:
task_template (TaskTemplate): Specification of the task to start as
part of the new service.
name (string): User-defined name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs to attach
the service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
A dictionary containing an ``ID`` key for the newly created
service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if endpoint_config is not None:
warnings.warn(
'endpoint_config has been renamed to endpoint_spec.',
DeprecationWarning
)
endpoint_spec = endpoint_config
_check_api_features(self._version, task_template, update_config)
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is None:
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
if mode and not isinstance(mode, dict):
mode = ServiceMode(mode)
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
data = {
'Name': name,
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
'Networks': utils.convert_service_networks(networks),
'EndpointSpec': endpoint_spec
}
if update_config is not None:
data['UpdateConfig'] = update_config
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def inspect_service(self, service):
"""
Return information about a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource('task')
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def remove_service(self, service):
"""
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True
@utils.minimum_version('1.24')
def services(self, filters=None):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id`` and ``name``. Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/services')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.25')
@utils.check_resource('service')
def service_logs(self, service, details=False, follow=False, stdout=False,
stderr=False, since=0, timestamps=False, tail='all',
is_tty=None):
"""
Get log stream for a service.
Note: This endpoint works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
service (str): ID or name of the service
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
enables the TTY option. If omitted, the method will query
the Engine for the information, causing an additional
roundtrip.
Returns (generator): Logs for the service.
"""
params = {
'details': details,
'follow': follow,
'stdout': stdout,
'stderr': stderr,
'since': since,
'timestamps': timestamps,
'tail': tail
}
url = self._url('/services/{0}/logs', service)
res = self._get(url, params=params, stream=True)
if is_tty is None:
is_tty = self.inspect_service(
service
)['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
return self._get_result_tty(True, res, is_tty)
@utils.minimum_version('1.24')
def tasks(self, filters=None):
"""
Retrieve a list of tasks.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``service``, ``node``,
``label`` and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/tasks')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
endpoint_spec=None):
"""
Update a service.
Args:
service (string): A service identifier (either its name or service
ID).
version (int): The version number of the service object being
updated. This is required to avoid conflicting writes.
task_template (TaskTemplate): Specification of the updated task to
start as part of the service.
name (string): New name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
networks (:py:class:`list`): List of network names or IDs to attach
the service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if endpoint_config is not None:
warnings.warn(
'endpoint_config has been renamed to endpoint_spec.',
DeprecationWarning
)
endpoint_spec = endpoint_config
_check_api_features(self._version, task_template, update_config)
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
if name is not None:
data['Name'] = name
if labels is not None:
data['Labels'] = labels
if mode is not None:
if not isinstance(mode, dict):
mode = ServiceMode(mode)
data['Mode'] = mode
if task_template is not None:
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is not None:
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
data['TaskTemplate'] = task_template
if update_config is not None:
data['UpdateConfig'] = update_config
if networks is not None:
data['Networks'] = utils.convert_service_networks(networks)
if endpoint_spec is not None:
data['EndpointSpec'] = endpoint_spec
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
self._raise_for_status(resp)
return True
docker-2.5.1/docker/api/container.py 0000664 0001750 0001750 00000136162 13147142632 020512 0 ustar joffrey joffrey 0000000 0000000 import six
import warnings
from datetime import datetime
from .. import errors
from .. import utils
from ..types import (
ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig
)
class ContainerApiMixin(object):
@utils.check_resource('container')
def attach(self, container, stdout=True, stderr=True,
stream=False, logs=False):
"""
Attach to a container.
The ``.logs()`` function is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
container (str): The container to attach to.
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'logs': logs and 1 or 0,
'stdout': stdout and 1 or 0,
'stderr': stderr and 1 or 0,
'stream': stream and 1 or 0
}
headers = {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
u = self._url("/containers/{0}/attach", container)
response = self._post(u, headers=headers, params=params, stream=True)
return self._read_from_socket(
response, stream, self._check_is_tty(container)
)
@utils.check_resource('container')
def attach_socket(self, container, params=None, ws=False):
"""
Like ``attach``, but returns the underlying socket-like object for the
HTTP request.
Args:
container (str): The container to attach to.
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if params is None:
params = {
'stdout': 1,
'stderr': 1,
'stream': 1
}
if ws:
return self._attach_websocket(container, params)
headers = {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
u = self._url("/containers/{0}/attach", container)
return self._get_raw_response_socket(
self.post(
u, None, params=self._attach_params(params), stream=True,
headers=headers
)
)
@utils.check_resource('container')
def commit(self, container, repository=None, tag=None, message=None,
author=None, changes=None, conf=None):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
container (str): The image hash of the container
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'container': container,
'repo': repository,
'tag': tag,
'comment': message,
'author': author,
'changes': changes
}
u = self._url("/commit")
return self._result(self._post_json(u, data=conf, params=params),
json=True)
def containers(self, quiet=False, all=False, trunc=False, latest=False,
since=None, before=None, limit=-1, size=False,
filters=None):
"""
List containers. Similar to the ``docker ps`` command.
Args:
quiet (bool): Only display numeric Ids
all (bool): Show all containers. Only running containers are shown
by default trunc (bool): Truncate output
latest (bool): Show only the latest created container, include
non-running ones.
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
size (bool): Display sizes
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str): format either ``"key"`` or ``"key=value"``
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``[:tag]``, ````, or
````.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
`_.
Returns:
A list of dicts, one per container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'limit': 1 if latest else limit,
'all': 1 if all else 0,
'size': 1 if size else 0,
'trunc_cmd': 1 if trunc else 0,
'since': since,
'before': before
}
if filters:
params['filters'] = utils.convert_filters(filters)
u = self._url("/containers/json")
res = self._result(self._get(u, params=params), True)
if quiet:
return [{'Id': x['Id']} for x in res]
if trunc:
for x in res:
x['Id'] = x['Id'][:12]
return res
@utils.check_resource('container')
def copy(self, container, resource):
"""
Identical to the ``docker cp`` command. Get files/folders from the
container.
**Deprecated for API version >= 1.20.** Use
:py:meth:`~ContainerApiMixin.get_archive` instead.
Args:
container (str): The container to copy from
resource (str): The path within the container
Returns:
The contents of the file as a string
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if utils.version_gte(self._version, '1.20'):
warnings.warn(
'APIClient.copy() is deprecated for API version >= 1.20, '
'please use get_archive() instead',
DeprecationWarning
)
res = self._post_json(
self._url("/containers/{0}/copy", container),
data={"Resource": resource},
stream=True
)
self._raise_for_status(res)
return res.raw
def create_container(self, image, command=None, hostname=None, user=None,
detach=False, stdin_open=False, tty=False,
mem_limit=None, ports=None, environment=None,
dns=None, volumes=None, volumes_from=None,
network_disabled=False, name=None, entrypoint=None,
cpu_shares=None, working_dir=None, domainname=None,
memswap_limit=None, cpuset=None, host_config=None,
mac_address=None, labels=None, volume_driver=None,
stop_signal=None, networking_config=None,
healthcheck=None, stop_timeout=None, runtime=None):
"""
Creates a container. Parameters are similar to those for the ``docker
run`` command except it doesn't support the attach options (``-a``).
The arguments that are passed directly to this function are
host-independent configuration options. Host-specific configuration
is passed with the `host_config` argument. You'll normally want to
use this method in combination with the :py:meth:`create_host_config`
method to generate ``host_config``.
**Port bindings**
Port binding is done in two parts: first, provide a list of ports to
open inside the container with the ``ports`` parameter, then declare
bindings with the ``host_config`` parameter. For example:
.. code-block:: python
container_id = cli.create_container(
'busybox', 'ls', ports=[1111, 2222],
host_config=cli.create_host_config(port_bindings={
1111: 4567,
2222: None
})
)
You can limit the host address on which the port will be exposed like
such:
.. code-block:: python
cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)})
Or without host port assignment:
.. code-block:: python
cli.create_host_config(port_bindings={1111: ('127.0.0.1',)})
If you wish to use UDP instead of TCP (default), you need to declare
ports as such in both the config and host config:
.. code-block:: python
container_id = cli.create_container(
'busybox', 'ls', ports=[(1111, 'udp'), 2222],
host_config=cli.create_host_config(port_bindings={
'1111/udp': 4567, 2222: None
})
)
To bind multiple host ports to a single container port, use the
following syntax:
.. code-block:: python
cli.create_host_config(port_bindings={
1111: [1234, 4567]
})
You can also bind multiple IPs to a single container port:
.. code-block:: python
cli.create_host_config(port_bindings={
1111: [
('192.168.0.100', 1234),
('192.168.0.101', 1234)
]
})
**Using volumes**
Volume declaration is done in two parts. Provide a list of
paths to use as mountpoints inside the container with the
``volumes`` parameter, and declare mappings from paths on the host
in the ``host_config`` section.
.. code-block:: python
container_id = cli.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=cli.create_host_config(binds={
'/home/user1/': {
'bind': '/mnt/vol2',
'mode': 'rw',
},
'/var/www': {
'bind': '/mnt/vol1',
'mode': 'ro',
}
})
)
You can alternatively specify binds as a list. This code is equivalent
to the example above:
.. code-block:: python
container_id = cli.create_container(
'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'],
host_config=cli.create_host_config(binds=[
'/home/user1/:/mnt/vol2',
'/var/www:/mnt/vol1:ro',
])
)
**Networking**
You can specify networks to connect the container to by using the
``networking_config`` parameter. At the time of creation, you can
only connect a container to a single networking, but you
can create more connections by using
:py:meth:`~connect_container_to_network`.
For example:
.. code-block:: python
networking_config = docker_client.create_networking_config({
'network1': docker_client.create_endpoint_config(
ipv4_address='172.28.0.124',
aliases=['foo', 'bar'],
links=['container2']
)
})
ctnr = docker_client.create_container(
img, command, networking_config=networking_config
)
Args:
image (str): The image to run
command (str or list): The command to be run in the container
hostname (str): Optional hostname for the container
user (str or int): Username or UID
detach (bool): Detached mode: run container in the background and
return container ID
stdin_open (bool): Keep STDIN open even if not attached
tty (bool): Allocate a pseudo-TTY
mem_limit (float or str): Memory limit. Accepts float values (which
represent the memory limit of the created container in bytes)
or a string with a units identification char (``100000b``,
``1000k``, ``128m``, ``1g``). If a string is specified without
a units character, bytes are assumed as an intended unit.
ports (list of ints): A list of port numbers
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
dns (:py:class:`list`): DNS name servers. Deprecated since API
version 1.10. Use ``host_config`` instead.
volumes (str or list): List of paths inside the container to use
as volumes.
volumes_from (:py:class:`list`): List of container names or Ids to
get volumes from.
network_disabled (bool): Disable networking
name (str): A name for the container
entrypoint (str or list): An entrypoint
working_dir (str): Path to the working directory
domainname (str): The domain name to use for the container
memswap_limit (int):
host_config (dict): A dictionary created with
:py:meth:`create_host_config`.
mac_address (str): The Mac Address to assign the container
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
volume_driver (str): The name of a volume driver/plugin.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
stop_timeout (int): Timeout to stop the container, in seconds.
Default: 10
networking_config (dict): A networking configuration generated
by :py:meth:`create_networking_config`.
runtime (str): Runtime to use with this container.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
Returns:
A dictionary with an image 'Id' key and a 'Warnings' key.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(volumes, six.string_types):
volumes = [volumes, ]
if host_config and utils.compare_version('1.15', self._version) < 0:
raise errors.InvalidVersion(
'host_config is not supported in API < 1.15'
)
config = self.create_container_config(
image, command, hostname, user, detach, stdin_open, tty, mem_limit,
ports, dns, environment, volumes, volumes_from,
network_disabled, entrypoint, cpu_shares, working_dir, domainname,
memswap_limit, cpuset, host_config, mac_address, labels,
volume_driver, stop_signal, networking_config, healthcheck,
stop_timeout, runtime
)
return self.create_container_from_config(config, name)
def create_container_config(self, *args, **kwargs):
return ContainerConfig(self._version, *args, **kwargs)
def create_container_from_config(self, config, name=None):
u = self._url("/containers/create")
params = {
'name': name
}
res = self._post_json(u, data=config, params=params)
return self._result(res, True)
def create_host_config(self, *args, **kwargs):
"""
Create a dictionary for the ``host_config`` argument to
:py:meth:`create_container`.
Args:
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
binds (dict): Volumes to bind. See :py:meth:`create_container`
for more information.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``::``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file
dns_search (:py:class:`list`): DNS search domains.
extra_hosts (dict): Addtional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
links (dict or list of tuples): Either a dictionary mapping name
to alias or as a list of ``(name, alias)`` tuples.
log_config (dict): Logging configuration, as a dictionary with
keys:
- ``type`` The logging driver name.
- ``config`` A dictionary of configuration for the logging
driver.
lxc_conf (dict): LXC config.
mem_limit (float or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
on the bridge network.
- ``none`` No networking for this container.
- ``container:`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
port_bindings (dict): See :py:meth:`create_container`
for more information.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
ulimits (:py:class:`list`): Ulimits to set inside the container,
as a list of dicts.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
runtime (str): Runtime to use with this container.
Returns:
(dict) A dictionary which can be passed to the ``host_config``
argument to :py:meth:`create_container`.
Example:
>>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'],
volumes_from=['nostalgic_newton'])
{'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True,
'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False}
"""
if not kwargs:
kwargs = {}
if 'version' in kwargs:
raise TypeError(
"create_host_config() got an unexpected "
"keyword argument 'version'"
)
kwargs['version'] = self._version
return HostConfig(*args, **kwargs)
def create_networking_config(self, *args, **kwargs):
"""
Create a networking config dictionary to be used as the
``networking_config`` parameter in :py:meth:`create_container`.
Args:
endpoints_config (dict): A dictionary mapping network names to
endpoint configurations generated by
:py:meth:`create_endpoint_config`.
Returns:
(dict) A networking config.
Example:
>>> docker_client.create_network('network1')
>>> networking_config = docker_client.create_networking_config({
'network1': docker_client.create_endpoint_config()
})
>>> container = docker_client.create_container(
img, command, networking_config=networking_config
)
"""
return NetworkingConfig(*args, **kwargs)
def create_endpoint_config(self, *args, **kwargs):
"""
Create an endpoint config dictionary to be used with
:py:meth:`create_networking_config`.
Args:
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
Returns:
(dict) An endpoint config.
Example:
>>> endpoint_config = client.create_endpoint_config(
aliases=['web', 'app'],
links=['app_db'],
ipv4_address='132.65.0.123'
)
"""
return EndpointConfig(self._version, *args, **kwargs)
@utils.check_resource('container')
def diff(self, container):
"""
Inspect changes on a container's filesystem.
Args:
container (str): The container to diff
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/containers/{0}/changes", container)), True
)
@utils.check_resource('container')
def export(self, container):
"""
Export the contents of a filesystem as a tar archive.
Args:
container (str): The container to export
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(
self._url("/containers/{0}/export", container), stream=True
)
self._raise_for_status(res)
return res.raw
@utils.check_resource('container')
@utils.minimum_version('1.20')
def get_archive(self, container, path):
"""
Retrieve a file or folder from a container in the form of a tar
archive.
Args:
container (str): The container where the file is located
path (str): Path to the file or folder to retrieve
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'path': path
}
url = self._url('/containers/{0}/archive', container)
res = self._get(url, params=params, stream=True)
self._raise_for_status(res)
encoded_stat = res.headers.get('x-docker-container-path-stat')
return (
res.raw,
utils.decode_json_header(encoded_stat) if encoded_stat else None
)
@utils.check_resource('container')
def inspect_container(self, container):
"""
Identical to the `docker inspect` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of `docker inspect`, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/containers/{0}/json", container)), True
)
@utils.check_resource('container')
def kill(self, container, signal=None):
"""
Kill a container or send a signal to a container.
Args:
container (str): The container to kill
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/kill", container)
params = {}
if signal is not None:
if not isinstance(signal, six.string_types):
signal = int(signal)
params['signal'] = signal
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('container')
def logs(self, container, stdout=True, stderr=True, stream=False,
timestamps=False, tail='all', since=None, follow=None):
"""
Get logs from a container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
container (str): The container to get logs from
stdout (bool): Get ``STDOUT``
stderr (bool): Get ``STDERR``
stream (bool): Stream the response
timestamps (bool): Show timestamps
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output
Returns:
(generator or str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if utils.compare_version('1.11', self._version) >= 0:
if follow is None:
follow = stream
params = {'stderr': stderr and 1 or 0,
'stdout': stdout and 1 or 0,
'timestamps': timestamps and 1 or 0,
'follow': follow and 1 or 0,
}
if utils.compare_version('1.13', self._version) >= 0:
if tail != 'all' and (not isinstance(tail, int) or tail < 0):
tail = 'all'
params['tail'] = tail
if since is not None:
if utils.compare_version('1.19', self._version) < 0:
raise errors.InvalidVersion(
'since is not supported in API < 1.19'
)
else:
if isinstance(since, datetime):
params['since'] = utils.datetime_to_timestamp(since)
elif (isinstance(since, int) and since > 0):
params['since'] = since
else:
raise errors.InvalidArgument(
'since value should be datetime or int, not {}'.
format(type(since))
)
url = self._url("/containers/{0}/logs", container)
res = self._get(url, params=params, stream=stream)
return self._get_result(container, stream, res)
return self.attach(
container,
stdout=stdout,
stderr=stderr,
stream=stream,
logs=True
)
@utils.check_resource('container')
def pause(self, container):
"""
Pauses all processes within a container.
Args:
container (str): The container to pause
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/containers/{0}/pause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.check_resource('container')
def port(self, container, private_port):
"""
Lookup the public-facing port that is NAT-ed to ``private_port``.
Identical to the ``docker port`` command.
Args:
container (str): The container to look up
private_port (int): The private port to inspect
Returns:
(list of dict): The mapping for the host ports
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
.. code-block:: bash
$ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30
7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b
.. code-block:: python
>>> cli.port('7174d6347063', 80)
[{'HostIp': '0.0.0.0', 'HostPort': '80'}]
"""
res = self._get(self._url("/containers/{0}/json", container))
self._raise_for_status(res)
json_ = res.json()
private_port = str(private_port)
h_ports = None
# Port settings is None when the container is running with
# network_mode=host.
port_settings = json_.get('NetworkSettings', {}).get('Ports')
if port_settings is None:
return None
if '/' in private_port:
return port_settings.get(private_port)
h_ports = port_settings.get(private_port + '/tcp')
if h_ports is None:
h_ports = port_settings.get(private_port + '/udp')
return h_ports
@utils.check_resource('container')
@utils.minimum_version('1.20')
def put_archive(self, container, path, data):
"""
Insert a file or folder in an existing container using a tar archive as
source.
Args:
container (str): The container where the file(s) will be extracted
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'path': path}
url = self._url('/containers/{0}/archive', container)
res = self._put(url, params=params, data=data)
self._raise_for_status(res)
return res.status_code == 200
@utils.minimum_version('1.25')
def prune_containers(self, filters=None):
"""
Delete stopped containers
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted container IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/containers/prune')
return self._result(self._post(url, params=params), True)
@utils.check_resource('container')
def remove_container(self, container, v=False, link=False, force=False):
"""
Remove a container. Similar to the ``docker rm`` command.
Args:
container (str): The container to remove
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'v': v, 'link': link, 'force': force}
res = self._delete(
self._url("/containers/{0}", container), params=params
)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource('container')
def rename(self, container, name):
"""
Rename a container. Similar to the ``docker rename`` command.
Args:
container (str): ID of the container to rename
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/rename", container)
params = {'name': name}
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('container')
def resize(self, container, height, width):
"""
Resize the tty session.
Args:
container (str or dict): The container to resize
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'h': height, 'w': width}
url = self._url("/containers/{0}/resize", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('container')
def restart(self, container, timeout=10):
"""
Restart a container. Similar to the ``docker restart`` command.
Args:
container (str or dict): The container to restart. If a dict, the
``Id`` key is used.
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'t': timeout}
url = self._url("/containers/{0}/restart", container)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('container')
def start(self, container, *args, **kwargs):
"""
Start a container. Similar to the ``docker start`` command, but
doesn't support attach options.
**Deprecation warning:** Passing configuration options in ``start`` is
no longer supported. Users are expected to provide host config options
in the ``host_config`` parameter of
:py:meth:`~ContainerApiMixin.create_container`.
Args:
container (str): The container to start
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.DeprecatedMethod`
If any argument besides ``container`` are provided.
Example:
>>> container = cli.create_container(
... image='busybox:latest',
... command='/bin/sleep 30')
>>> cli.start(container=container.get('Id'))
"""
if args or kwargs:
raise errors.DeprecatedMethod(
'Providing configuration in the start() method is no longer '
'supported. Use the host_config param in create_container '
'instead.'
)
url = self._url("/containers/{0}/start", container)
res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.17')
@utils.check_resource('container')
def stats(self, container, decode=None, stream=True):
"""
Stream statistics for a specific container. Similar to the
``docker stats`` command.
Args:
container (str): The container to stream statistics from
decode (bool): If set to true, stream will be decoded into dicts
on the fly. False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/stats", container)
if stream:
return self._stream_helper(self._get(url, stream=True),
decode=decode)
else:
return self._result(self._get(url, params={'stream': False}),
json=True)
@utils.check_resource('container')
def stop(self, container, timeout=10):
"""
Stops a container. Similar to the ``docker stop`` command.
Args:
container (str): The container to stop
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'t': timeout}
url = self._url("/containers/{0}/stop", container)
res = self._post(url, params=params,
timeout=(timeout + (self.timeout or 0)))
self._raise_for_status(res)
@utils.check_resource('container')
def top(self, container, ps_args=None):
"""
Display the running processes of a container.
Args:
container (str): The container to inspect
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
u = self._url("/containers/{0}/top", container)
params = {}
if ps_args is not None:
params['ps_args'] = ps_args
return self._result(self._get(u, params=params), True)
@utils.check_resource('container')
def unpause(self, container):
"""
Unpause all processes within a container.
Args:
container (str): The container to unpause
"""
url = self._url('/containers/{0}/unpause', container)
res = self._post(url)
self._raise_for_status(res)
@utils.minimum_version('1.22')
@utils.check_resource('container')
def update_container(
self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
mem_reservation=None, memswap_limit=None, kernel_memory=None,
restart_policy=None
):
"""
Update resource configs of one or more containers.
Args:
container (str): The container to inspect
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/containers/{0}/update', container)
data = {}
if blkio_weight:
data['BlkioWeight'] = blkio_weight
if cpu_period:
data['CpuPeriod'] = cpu_period
if cpu_shares:
data['CpuShares'] = cpu_shares
if cpu_quota:
data['CpuQuota'] = cpu_quota
if cpuset_cpus:
data['CpusetCpus'] = cpuset_cpus
if cpuset_mems:
data['CpusetMems'] = cpuset_mems
if mem_limit:
data['Memory'] = utils.parse_bytes(mem_limit)
if mem_reservation:
data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
if memswap_limit:
data['MemorySwap'] = utils.parse_bytes(memswap_limit)
if kernel_memory:
data['KernelMemory'] = utils.parse_bytes(kernel_memory)
if restart_policy:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'restart policy update is not supported '
'for API version < 1.23'
)
data['RestartPolicy'] = restart_policy
res = self._post_json(url, data=data)
return self._result(res, True)
@utils.check_resource('container')
def wait(self, container, timeout=None):
"""
Block until a container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
container (str or dict): The container to wait on. If a dict, the
``Id`` key is used.
timeout (int): Request timeout
Returns:
(int): The exit code of the container. Returns ``-1`` if the API
responds without a ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/containers/{0}/wait", container)
res = self._post(url, timeout=timeout)
self._raise_for_status(res)
json_ = res.json()
if 'StatusCode' in json_:
return json_['StatusCode']
return -1
docker-2.5.1/docker/api/secret.py 0000664 0001750 0001750 00000004653 13124577310 020014 0 ustar joffrey joffrey 0000000 0000000 import base64
import six
from .. import utils
class SecretApiMixin(object):
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
docker-2.5.1/docker/api/image.py 0000664 0001750 0001750 00000044517 13147140552 017613 0 ustar joffrey joffrey 0000000 0000000 import logging
import os
import warnings
import six
from .. import auth, errors, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource('image')
def get_image(self, image):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
Returns:
(urllib3.response.HTTPResponse object): The response from the
daemon.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.get_image("fedora:latest")
>>> f = open('/tmp/fedora-latest.tar', 'w')
>>> f.write(image.data)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource('image')
def history(self, image):
"""
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
"""
List images. Similar to the ``docker images`` command.
Args:
name (str): Only show images belonging to the repository ``name``
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- ``label`` (str): format either ``key`` or ``key=value``
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
"""
Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
"""
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
)
u = self._url('/images/create')
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, six.string_types) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
if image or params.get('fromSrc') != '-': # from image or URL
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, six.string_types): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
u, data=f, params=params, headers=headers, timeout=None
)
)
else: # from raw data
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(
self._post(u, data=src, params=params, headers=headers)
)
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result(
self._post(
u, data=data, params=params, headers=headers, timeout=None
)
)
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
def import_image_from_stream(self, stream, repository=None, tag=None,
changes=None):
return self.import_image(
src=stream, stream_src=True, repository=repository, tag=tag,
changes=changes
)
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@utils.check_resource('image')
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@utils.check_resource('image')
def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for containers.
Args:
container (str): The container to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if quiet is not None:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'quiet is not supported in API version < 1.23'
)
params['quiet'] = quiet
res = self._post(
self._url("/images/load"), data=data, params=params, stream=True
)
if utils.version_gte(self._version, '1.23'):
return self._stream_helper(res, decode=True)
self._raise_for_status(res)
@utils.minimum_version('1.25')
def prune_images(self, filters=None):
"""
Delete unused images
Args:
filters (dict): Filters to process on the prune list.
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/images/prune")
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
stream (bool): Stream the output as a generator
insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.pull('busybox', stream=True):
... print(json.dumps(json.loads(line), indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None, decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
insecure_registry (bool): Use ``http://`` to connect to the
registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.api.daemon.DaemonApiMixin.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for line in cli.push('yourname/app', stream=True):
... print line
{"status":"Pushing repository yourname/app (1 tags)"}
{"status":"Pushing","progressDetail":{},"id":"511136ea3c5a"}
{"status":"Image already pushed, skipping","progressDetail":{},
"id":"511136ea3c5a"}
...
"""
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
@utils.check_resource('image')
def remove_image(self, image, force=False, noprune=False):
"""
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
def search(self, term):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
@utils.check_resource('image')
def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
def is_file(src):
try:
return (
isinstance(src, six.string_types) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
return False
def _import_image_params(repo, tag, image=None, src=None,
changes=None):
params = {
'repo': repo,
'tag': tag,
}
if image:
params['fromImage'] = image
elif src and not is_file(src):
params['fromSrc'] = src
else:
params['fromSrc'] = '-'
if changes:
params['changes'] = changes
return params
docker-2.5.1/docker/api/network.py 0000664 0001750 0001750 00000023266 13147140431 020214 0 ustar joffrey joffrey 0000000 0000000 from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin(object):
@minimum_version('1.21')
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[]`` Matches a network's driver.
- ``label=[]`` or ``label=[=]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.25')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@minimum_version('1.21')
@check_resource('net_id')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@minimum_version('1.21')
@check_resource('net_id')
def inspect_network(self, net_id, verbose=None):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
@minimum_version('1.21')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource('container')
@minimum_version('1.21')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
docker-2.5.1/docker/api/swarm.py 0000664 0001750 0001750 00000027675 13124577310 017671 0 ustar joffrey joffrey 0000000 0000000 import logging
from six.moves import http_client
from .. import types
from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin(object):
def create_swarm_spec(self, *args, **kwargs):
"""
Create a ``docker.types.SwarmSpec`` instance that can be used as the
``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_ca (dict): Configuration for forwarding signing requests
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
Returns:
``docker.types.SwarmSpec`` instance.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> spec = client.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
"""
return types.SwarmSpec(*args, **kwargs)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
Args:
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
``advertise_addr`` is not specified, it will be automatically
detected when possible. Default: None
listen_addr (string): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: '0.0.0.0:2377'
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def inspect_swarm(self):
"""
Retrieve low-level information about the current swarm.
Returns:
A dictionary containing data about the swarm.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
"""
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr=None,
advertise_addr=None):
"""
Make this Engine join a swarm that has already been created.
Args:
remote_addrs (:py:class:`list`): Addresses of one or more manager
nodes already participating in the Swarm to join.
join_token (string): Secret token for joining this Swarm.
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``None``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
data = {
"RemoteAddrs": remote_addrs,
"ListenAddr": listen_addr,
"JoinToken": join_token,
"AdvertiseAddr": advertise_addr,
}
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
"""
Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
# FIXME: Temporary workaround for 1.13.0-rc bug
# https://github.com/docker/docker/issues/29192
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of dictionaries containing data about each swarm node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def remove_node(self, node_id, force=False):
"""
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
"""
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
"""
Update the Node's configuration
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
rotate_manager_token=False):
"""
Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/update')
response = self._post_json(url, data=swarm_spec, params={
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
})
self._raise_for_status(response)
return True
docker-2.5.1/docker/api/plugin.py 0000664 0001750 0001750 00000020557 13124577310 020026 0 ustar joffrey joffrey 0000000 0000000 import six
from .. import auth, utils
class PluginApiMixin(object):
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
"""
Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = ['{0}={1}'.format(k, v) for k, v in six.iteritems(data)]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def create_plugin(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
``True`` if successful
"""
url = self._url('/plugins/create')
with utils.create_archive(root=plugin_data_dir, gzip=gzip) as archv:
res = self._post(url, params={'name': name}, data=archv)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def disable_plugin(self, name):
"""
Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/disable', name)
res = self._post(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def enable_plugin(self, name, timeout=0):
"""
Enable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
timeout (int): Operation timeout (in seconds). Default: 0
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/enable', name)
params = {'timeout': timeout}
res = self._post(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def inspect_plugin(self, name):
"""
Retrieve plugin metadata.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
A dict containing plugin info
"""
url = self._url('/plugins/{0}/json', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def pull_plugin(self, remote, privileges, name=None):
"""
Pull and install a plugin. After the plugin is installed, it can be
enabled using :py:meth:`~enable_plugin`.
Args:
remote (string): Remote reference for the plugin to install.
The ``:latest`` tag is optional, and is the default if
omitted.
privileges (list): A list of privileges the user consents to
grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
name (string): Local name for the pulled plugin. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/pull')
params = {
'remote': remote,
}
if name:
params['name'] = name
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)
@utils.minimum_version('1.25')
def plugins(self):
"""
Retrieve a list of installed plugins.
Returns:
A list of dicts, one per plugin
"""
url = self._url('/plugins')
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def plugin_privileges(self, name):
"""
Retrieve list of privileges to be granted to a plugin.
Args:
name (string): Name of the remote plugin to examine. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
A list of dictionaries representing the plugin's
permissions
"""
params = {
'remote': name,
}
url = self._url('/plugins/privileges')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def remove_plugin(self, name, force=False):
"""
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}', name)
res = self._delete(url, params={'force': force})
self._raise_for_status(res)
return True
@utils.minimum_version('1.26')
@utils.check_resource('name')
def upgrade_plugin(self, name, remote, privileges):
"""
Upgrade an installed plugin.
Args:
name (string): Name of the plugin to upgrade. The ``:latest``
tag is optional and is the default if omitted.
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
privileges (list): A list of privileges the user consents to
grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/{0}/upgrade', name)
params = {
'remote': remote,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)
docker-2.5.1/docker/api/client.py 0000664 0001750 0001750 00000040513 13145377337 020012 0 ustar joffrey joffrey 0000000 0000000 import json
import struct
import warnings
from functools import partial
import requests
import requests.exceptions
import six
import websocket
from .build import BuildApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .network import NetworkApiMixin
from .plugin import PluginApiMixin
from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
from .. import auth
from ..constants import (
DEFAULT_TIMEOUT_SECONDS, DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
DEFAULT_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES, DEFAULT_NUM_POOLS,
MINIMUM_DOCKER_API_VERSION
)
from ..errors import (
DockerException, TLSParameterError,
create_api_error_from_http_exception
)
from ..tls import TLSConfig
from ..transport import SSLAdapter, UnixAdapter
from ..utils import utils, check_resource, update_headers
from ..utils.socket import frames_iter, socket_raw_iter
from ..utils.json_stream import json_stream
try:
from ..transport import NpipeAdapter
except ImportError:
pass
class APIClient(
requests.Session,
BuildApiMixin,
ContainerApiMixin,
DaemonApiMixin,
ExecApiMixin,
ImageApiMixin,
NetworkApiMixin,
PluginApiMixin,
SecretApiMixin,
ServiceApiMixin,
SwarmApiMixin,
VolumeApiMixin):
"""
A low-level client for the Docker Engine API.
Example:
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
{u'ApiVersion': u'1.24',
u'Arch': u'amd64',
u'BuildTime': u'2016-09-27T23:38:15.810178467+00:00',
u'Experimental': True,
u'GitCommit': u'45bed2c',
u'GoVersion': u'go1.6.3',
u'KernelVersion': u'4.4.22-moby',
u'Os': u'linux',
u'Version': u'1.12.2-rc1'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.26``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
'_version',
'base_url',
'timeout']
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=DEFAULT_NUM_POOLS):
super(APIClient, self).__init__()
if tls and not base_url:
raise TLSParameterError(
'If using TLS, the base_url argument must be provided.'
)
self.base_url = base_url
self.timeout = timeout
self.headers['User-Agent'] = user_agent
self._auth_configs = auth.load_config()
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixAdapter(
base_url, timeout, pool_connections=num_pools
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
self.base_url = 'http+docker://localunixsocket'
elif base_url.startswith('npipe://'):
if not IS_WINDOWS_PLATFORM:
raise DockerException(
'The npipe:// protocol is only supported on Windows'
)
try:
self._custom_adapter = NpipeAdapter(
base_url, timeout, pool_connections=num_pools
)
except NameError:
raise DockerException(
'Install pypiwin32 package to enable npipe:// support'
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = SSLAdapter(pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None:
self._version = DEFAULT_DOCKER_API_VERSION
elif isinstance(version, six.string_types):
if version.lower() == 'auto':
self._version = self._retrieve_server_version()
else:
self._version = version
else:
raise DockerException(
'Version parameter must be a string or None. Found {0}'.format(
type(version).__name__
)
)
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
warnings.warn(
'The minimum API version supported is {}, but you are using '
'version {}. It is recommended you either upgrade Docker '
'Engine or use an older version of Docker SDK for '
'Python.'.format(MINIMUM_DOCKER_API_VERSION, self._version)
)
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise DockerException(
'Error while fetching server API version: {0}'.format(e)
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, six.string_types):
raise ValueError(
'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
)
quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise create_api_error_from_http_exception(e)
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
for k, v in six.iteritems(data):
if v is not None:
data2[k] = v
elif data is not None:
data2 = data
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource('container')
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
elif six.PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
for chunk in json_stream(self._stream_helper(response, False)):
yield chunk
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
_, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result_old(self, response):
''' Stream raw output for API versions below 1.6 '''
self._raise_for_status(response)
for line in response.iter_lines(chunk_size=1,
decode_unicode=True):
# filter out keep-alive new lines
if line:
yield line
def _stream_raw_result(self, response):
''' Stream result for TTY-enabled container above API 1.6 '''
self._raise_for_status(response)
for out in response.iter_content(chunk_size=1, decode_unicode=True):
yield out
def _read_from_socket(self, response, stream, tty=False):
socket = self._get_raw_response_socket(response)
gen = None
if tty is False:
gen = frames_iter(socket)
else:
gen = socket_raw_iter(socket)
if stream:
return gen
else:
return six.binary_type().join(gen)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None)
@check_resource('container')
def _check_is_tty(self, container):
cont = self.inspect_container(container)
return cont['Config']['Tty']
def _get_result(self, container, stream, res):
return self._get_result_tty(stream, res, self._check_is_tty(container))
def _get_result_tty(self, stream, res, is_tty):
# Stream multi-plexing was only introduced in API v1.6. Anything
# before that needs old-style streaming.
if utils.compare_version('1.6', self._version) < 0:
return self._stream_raw_result_old(res)
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = six.binary_type()
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
[x for x in self._multiplexed_buffer_helper(res)]
)
def _unmount(self, *args):
for proto in args:
self.adapters.pop(proto)
def get_adapter(self, url):
try:
return super(APIClient, self).get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(dockercfg_path)
docker-2.5.1/docker/api/__init__.py 0000664 0001750 0001750 00000000055 13023617644 020261 0 ustar joffrey joffrey 0000000 0000000 # flake8: noqa
from .client import APIClient
docker-2.5.1/docker/api/daemon.py 0000664 0001750 0001750 00000013417 13144673416 017776 0 ustar joffrey joffrey 0000000 0000000 import os
import warnings
from datetime import datetime
from .. import auth, utils
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
(generator): A blocking generator you can iterate over to retrieve
events as they happen.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events()
... print event
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
return self._stream_helper(
self._get(url, params=params, stream=True, timeout=None),
decode=decode
)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, insecure_registry=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
DeprecationWarning
)
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(dockercfg_path)
elif not self._auth_configs:
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs[registry or auth.INDEX_NAME] = req_data
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
docker-2.5.1/docker/client.py 0000664 0001750 0001750 00000014317 13124577310 017232 0 ustar joffrey joffrey 0000000 0000000 from .api.client import APIClient
from .constants import DEFAULT_TIMEOUT_SECONDS
from .models.containers import ContainerCollection
from .models.images import ImageCollection
from .models.networks import NetworkCollection
from .models.nodes import NodeCollection
from .models.plugins import PluginCollection
from .models.secrets import SecretCollection
from .models.services import ServiceCollection
from .models.swarm import Swarm
from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
class DockerClient(object):
"""
A client for communicating with a Docker server.
Example:
>>> import docker
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.26``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
"""
Return a client configured from environment variables.
The environment variables used are the same as those used by the
Docker command-line client. They are:
.. envvar:: DOCKER_HOST
The URL to the Docker host.
.. envvar:: DOCKER_TLS_VERIFY
Verify the host against a CA certificate.
.. envvar:: DOCKER_CERT_PATH
A path to a directory containing TLS certificates to use when
connecting to the Docker host.
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.26``
timeout (int): Default timeout for API calls, in seconds.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
version = kwargs.pop('version', None)
return cls(timeout=timeout, version=version,
**kwargs_from_env(**kwargs))
# Resources
@property
def containers(self):
"""
An object for managing containers on the server. See the
:doc:`containers documentation ` for full details.
"""
return ContainerCollection(client=self)
@property
def images(self):
"""
An object for managing images on the server. See the
:doc:`images documentation ` for full details.
"""
return ImageCollection(client=self)
@property
def networks(self):
"""
An object for managing networks on the server. See the
:doc:`networks documentation ` for full details.
"""
return NetworkCollection(client=self)
@property
def nodes(self):
"""
An object for managing nodes on the server. See the
:doc:`nodes documentation ` for full details.
"""
return NodeCollection(client=self)
@property
def plugins(self):
"""
An object for managing plugins on the server. See the
:doc:`plugins documentation ` for full details.
"""
return PluginCollection(client=self)
@property
def secrets(self):
"""
An object for managing secrets on the server. See the
:doc:`secrets documentation ` for full details.
"""
return SecretCollection(client=self)
@property
def services(self):
"""
An object for managing services on the server. See the
:doc:`services documentation ` for full details.
"""
return ServiceCollection(client=self)
@property
def swarm(self):
"""
An object for managing a swarm on the server. See the
:doc:`swarm documentation ` for full details.
"""
return Swarm(client=self)
@property
def volumes(self):
"""
An object for managing volumes on the server. See the
:doc:`volumes documentation ` for full details.
"""
return VolumeCollection(client=self)
# Top-level methods
def events(self, *args, **kwargs):
return self.api.events(*args, **kwargs)
events.__doc__ = APIClient.events.__doc__
def df(self):
return self.api.df()
df.__doc__ = APIClient.df.__doc__
def info(self, *args, **kwargs):
return self.api.info(*args, **kwargs)
info.__doc__ = APIClient.info.__doc__
def login(self, *args, **kwargs):
return self.api.login(*args, **kwargs)
login.__doc__ = APIClient.login.__doc__
def ping(self, *args, **kwargs):
return self.api.ping(*args, **kwargs)
ping.__doc__ = APIClient.ping.__doc__
def version(self, *args, **kwargs):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
def __getattr__(self, name):
s = ["'DockerClient' object has no attribute '{}'".format(name)]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
"object APIClient. See the low-level API section of the "
"documentation for more details.")
raise AttributeError(' '.join(s))
from_env = DockerClient.from_env
docker-2.5.1/docker/__init__.py 0000664 0001750 0001750 00000000254 13106703747 017513 0 ustar joffrey joffrey 0000000 0000000 # flake8: noqa
from .api import APIClient
from .client import DockerClient, from_env
from .version import version, version_info
__version__ = version
__title__ = 'docker'
docker-2.5.1/docker/version.py 0000664 0001750 0001750 00000000133 13147142632 017430 0 ustar joffrey joffrey 0000000 0000000 version = "2.5.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
docker-2.5.1/docker/models/ 0000775 0001750 0001750 00000000000 13147142650 016657 5 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/models/containers.py 0000664 0001750 0001750 00000106174 13147140754 021412 0 ustar joffrey joffrey 0000000 0000000 import copy
from ..api import APIClient
from ..errors import (ContainerError, ImageNotFound,
create_unexpected_kwargs_error)
from ..types import HostConfig
from ..utils import version_gte
from .images import Image
from .resource import Collection, Model
class Container(Model):
@property
def name(self):
"""
The name of the container.
"""
if self.attrs.get('Name') is not None:
return self.attrs['Name'].lstrip('/')
@property
def image(self):
"""
The image of the container.
"""
image_id = self.attrs['Image']
if image_id is None:
return None
return self.client.images.get(image_id.split(':')[1])
@property
def labels(self):
"""
The labels of a container as dictionary.
"""
result = self.attrs['Config'].get('Labels')
return result or {}
@property
def status(self):
"""
The status of the container. For example, ``running``, or ``exited``.
"""
return self.attrs['State']['Status']
def attach(self, **kwargs):
"""
Attach to this container.
:py:meth:`logs` is a wrapper around this method, which you can
use instead if you want to fetch/stream container output without first
retrieving the entire backlog.
Args:
stdout (bool): Include stdout.
stderr (bool): Include stderr.
stream (bool): Return container output progressively as an iterator
of strings, rather than a single string.
logs (bool): Include the container's previous output.
Returns:
By default, the container's output as a single string.
If ``stream=True``, an iterator of output strings.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach(self.id, **kwargs)
def attach_socket(self, **kwargs):
"""
Like :py:meth:`attach`, but returns the underlying socket-like object
for the HTTP request.
Args:
params (dict): Dictionary of request parameters (e.g. ``stdout``,
``stderr``, ``stream``).
ws (bool): Use websockets instead of raw HTTP.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.attach_socket(self.id, **kwargs)
def commit(self, repository=None, tag=None, **kwargs):
"""
Commit a container to an image. Similar to the ``docker commit``
command.
Args:
repository (str): The repository to push the image to
tag (str): The tag to push
message (str): A commit message
author (str): The name of the author
changes (str): Dockerfile instructions to apply while committing
conf (dict): The configuration for the container. See the
`Engine API documentation
`_
for full details.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.commit(self.id, repository=repository, tag=tag,
**kwargs)
return self.client.images.get(resp['Id'])
def diff(self):
"""
Inspect changes on a container's filesystem.
Returns:
(str)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.diff(self.id)
def exec_run(self, cmd, stdout=True, stderr=True, stdin=False, tty=False,
privileged=False, user='', detach=False, stream=False,
socket=False, environment=None):
"""
Run a command inside this container. Similar to
``docker exec``.
Args:
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
detach (bool): If true, detach from the exec command.
Default: False
stream (bool): Stream response data. Default: False
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
Returns:
(generator or str): If ``stream=True``, a generator yielding
response chunks. A string containing response data otherwise.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.exec_create(
self.id, cmd, stdout=stdout, stderr=stderr, stdin=stdin, tty=tty,
privileged=privileged, user=user, environment=environment
)
return self.client.api.exec_start(
resp['Id'], detach=detach, tty=tty, stream=stream, socket=socket
)
def export(self):
"""
Export the contents of the container's filesystem as a tar archive.
Returns:
(str): The filesystem tar archive
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.export(self.id)
def get_archive(self, path):
"""
Retrieve a file or folder from the container in the form of a tar
archive.
Args:
path (str): Path to the file or folder to retrieve
Returns:
(tuple): First element is a raw tar data stream. Second element is
a dict containing ``stat`` information on the specified ``path``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.get_archive(self.id, path)
def kill(self, signal=None):
"""
Kill or send a signal to the container.
Args:
signal (str or int): The signal to send. Defaults to ``SIGKILL``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.kill(self.id, signal=signal)
def logs(self, **kwargs):
"""
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``
stderr (bool): Get ``STDERR``
stream (bool): Stream the response
timestamps (bool): Show timestamps
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.logs(self.id, **kwargs)
def pause(self):
"""
Pauses all processes within this container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.pause(self.id)
def put_archive(self, path, data):
"""
Insert a file or folder in this container using a tar archive as
source.
Args:
path (str): Path inside the container where the file(s) will be
extracted. Must exist.
data (bytes): tar data to be extracted
Returns:
(bool): True if the call succeeds.
Raises:
:py:class:`~docker.errors.APIError` If an error occurs.
"""
return self.client.api.put_archive(self.id, path, data)
def remove(self, **kwargs):
"""
Remove this container. Similar to the ``docker rm`` command.
Args:
v (bool): Remove the volumes associated with the container
link (bool): Remove the specified link and not the underlying
container
force (bool): Force the removal of a running container (uses
``SIGKILL``)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_container(self.id, **kwargs)
def rename(self, name):
"""
Rename this container. Similar to the ``docker rename`` command.
Args:
name (str): New name for the container
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.rename(self.id, name)
def resize(self, height, width):
"""
Resize the tty session.
Args:
height (int): Height of tty session
width (int): Width of tty session
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.resize(self.id, height, width)
def restart(self, **kwargs):
"""
Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.restart(self.id, **kwargs)
def start(self, **kwargs):
"""
Start this container. Similar to the ``docker start`` command, but
doesn't support attach options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.start(self.id, **kwargs)
def stats(self, **kwargs):
"""
Stream statistics for this container. Similar to the
``docker stats`` command.
Args:
decode (bool): If set to true, stream will be decoded into dicts
on the fly. False by default.
stream (bool): If set to false, only the current stats will be
returned instead of a stream. True by default.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stats(self.id, **kwargs)
def stop(self, **kwargs):
"""
Stops a container. Similar to the ``docker stop`` command.
Args:
timeout (int): Timeout in seconds to wait for the container to
stop before sending a ``SIGKILL``. Default: 10
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.stop(self.id, **kwargs)
def top(self, **kwargs):
"""
Display the running processes of the container.
Args:
ps_args (str): An optional arguments passed to ps (e.g. ``aux``)
Returns:
(str): The output of the top
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.top(self.id, **kwargs)
def unpause(self):
"""
Unpause all processes within the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.unpause(self.id)
def update(self, **kwargs):
"""
Update resource configuration of the containers.
Args:
blkio_weight (int): Block IO (relative weight), between 10 and 1000
cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period
cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota
cpu_shares (int): CPU shares (relative weight)
cpuset_cpus (str): CPUs in which to allow execution
cpuset_mems (str): MEMs in which to allow execution
mem_limit (int or str): Memory limit
mem_reservation (int or str): Memory soft limit
memswap_limit (int or str): Total memory (memory + swap), -1 to
disable swap
kernel_memory (int or str): Kernel memory limit
restart_policy (dict): Restart policy dictionary
Returns:
(dict): Dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.update_container(self.id, **kwargs)
def wait(self, **kwargs):
"""
Block until the container stops, then return its exit code. Similar to
the ``docker wait`` command.
Args:
timeout (int): Request timeout
Returns:
(int): The exit code of the container. Returns ``-1`` if the API
responds without a ``StatusCode`` attribute.
Raises:
:py:class:`requests.exceptions.ReadTimeout`
If the timeout is exceeded.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.wait(self.id, **kwargs)
class ContainerCollection(Collection):
model = Container
def run(self, image, command=None, stdout=True, stderr=False,
remove=False, **kwargs):
"""
Run a container. By default, it will wait for the container to finish
and return its logs, similar to ``docker run``.
If the ``detach`` argument is ``True``, it will start the container
and immediately return a :py:class:`Container` object, similar to
``docker run -d``.
Example:
Run a container and get its output:
>>> import docker
>>> client = docker.from_env()
>>> client.containers.run('alpine', 'echo hello world')
b'hello world\\n'
Run a container and detach:
>>> container = client.containers.run('bfirsh/reticulate-splines',
detach=True)
>>> container.logs()
'Reticulating spline 1...\\nReticulating spline 2...\\n'
Args:
image (str): The image to run.
command (str or list): The command to run in the container.
auto_remove (bool): enable auto-removal of the container on daemon
side when the container's process exits.
blkio_weight_device: Block IO weight (relative device weight) in
the form of: ``[{"Path": "device_path", "Weight": weight}]``.
blkio_weight: Block IO weight (relative weight), accepts a weight
value between 10 and 1000.
cap_add (list of str): Add kernel capabilities. For example,
``["SYS_ADMIN", "MKNOD"]``.
cap_drop (list of str): Drop kernel capabilities.
cpu_count (int): Number of usable CPUs (Windows only).
cpu_percent (int): Usable percentage of the available CPUs
(Windows only).
cpu_period (int): The length of a CPU period in microseconds.
cpu_quota (int): Microseconds of CPU time that the container can
get in a CPU period.
cpu_shares (int): CPU shares (relative weight).
cpuset_cpus (str): CPUs in which to allow execution (``0-3``,
``0,1``).
cpuset_mems (str): Memory nodes (MEMs) in which to allow execution
(``0-3``, ``0,1``). Only effective on NUMA systems.
detach (bool): Run container in the background and return a
:py:class:`Container` object.
device_read_bps: Limit read rate (bytes per second) from a device
in the form of: `[{"Path": "device_path", "Rate": rate}]`
device_read_iops: Limit read rate (IO per second) from a device.
device_write_bps: Limit write rate (bytes per second) from a
device.
device_write_iops: Limit write rate (IO per second) from a device.
devices (:py:class:`list`): Expose host devices to the container,
as a list of strings in the form
``::``.
For example, ``/dev/sda:/dev/xvda:rwm`` allows the container
to have read-write access to the host's ``/dev/sda`` via a
node named ``/dev/xvda`` inside the container.
dns (:py:class:`list`): Set custom DNS servers.
dns_opt (:py:class:`list`): Additional options to be added to the
container's ``resolv.conf`` file.
dns_search (:py:class:`list`): DNS search domains.
domainname (str or list): Set custom DNS search domains.
entrypoint (str or list): The entrypoint for the container.
environment (dict or list): Environment variables to set inside
the container, as a dictionary or a list of strings in the
format ``["SOMEVARIABLE=xxx"]``.
extra_hosts (dict): Addtional hostnames to resolve inside the
container, as a mapping of hostname to IP address.
group_add (:py:class:`list`): List of additional group names and/or
IDs that the container process will run as.
healthcheck (dict): Specify a test to perform to check that the
container is healthy.
hostname (str): Optional hostname for the container.
init (bool): Run an init inside the container that forwards
signals and reaps processes
init_path (str): Path to the docker-init binary
ipc_mode (str): Set the IPC mode for the container.
isolation (str): Isolation technology to use. Default: `None`.
labels (dict or list): A dictionary of name-value labels (e.g.
``{"label1": "value1", "label2": "value2"}``) or a list of
names of labels to set with empty values (e.g.
``["label1", "label2"]``)
links (dict or list of tuples): Either a dictionary mapping name
to alias or as a list of ``(name, alias)`` tuples.
log_config (dict): Logging configuration, as a dictionary with
keys:
- ``type`` The logging driver name.
- ``config`` A dictionary of configuration for the logging
driver.
mac_address (str): MAC address to assign to the container.
mem_limit (int or str): Memory limit. Accepts float values
(which represent the memory limit of the created container in
bytes) or a string with a units identification char
(``100000b``, ``1000k``, ``128m``, ``1g``). If a string is
specified without a units character, bytes are assumed as an
intended unit.
mem_swappiness (int): Tune a container's memory swappiness
behavior. Accepts number between 0 and 100.
memswap_limit (str or int): Maximum amount of memory + swap a
container is allowed to consume.
name (str): The name for this container.
nano_cpus (int): CPU quota in units of 10-9 CPUs.
network (str): Name of the network this container will be connected
to at creation time. You can connect to additional networks
using :py:meth:`Network.connect`. Incompatible with
``network_mode``.
network_disabled (bool): Disable networking.
network_mode (str): One of:
- ``bridge`` Create a new network stack for the container on
on the bridge network.
- ``none`` No networking for this container.
- ``container:`` Reuse another container's network
stack.
- ``host`` Use the host network stack.
Incompatible with ``network``.
oom_kill_disable (bool): Whether to disable OOM killer.
oom_score_adj (int): An integer value containing the score given
to the container in order to tune OOM killer preferences.
pid_mode (str): If set to ``host``, use the host PID namespace
inside the container.
pids_limit (int): Tune a container's pids limit. Set ``-1`` for
unlimited.
ports (dict): Ports to bind inside the container.
The keys of the dictionary are the ports to bind inside the
container, either as an integer or a string in the form
``port/protocol``, where the protocol is either ``tcp`` or
``udp``.
The values of the dictionary are the corresponding ports to
open on the host, which can be either:
- The port number, as an integer. For example,
``{'2222/tcp': 3333}`` will expose port 2222 inside the
container as port 3333 on the host.
- ``None``, to assign a random host port. For example,
``{'2222/tcp': None}``.
- A tuple of ``(address, port)`` if you want to specify the
host interface. For example,
``{'1111/tcp': ('127.0.0.1', 1111)}``.
- A list of integers, if you want to bind multiple host ports
to a single container port. For example,
``{'1111/tcp': [1234, 4567]}``.
privileged (bool): Give extended privileges to this container.
publish_all_ports (bool): Publish all ports to the host.
read_only (bool): Mount the container's root filesystem as read
only.
remove (bool): Remove the container when it has finished running.
Default: ``False``.
restart_policy (dict): Restart the container when it exits.
Configured as a dictionary with keys:
- ``Name`` One of ``on-failure``, or ``always``.
- ``MaximumRetryCount`` Number of times to restart the
container on failure.
For example:
``{"Name": "on-failure", "MaximumRetryCount": 5}``
security_opt (:py:class:`list`): A list of string values to
customize labels for MLS systems, such as SELinux.
shm_size (str or int): Size of /dev/shm (e.g. ``1G``).
stdin_open (bool): Keep ``STDIN`` open even if not attached.
stdout (bool): Return logs from ``STDOUT`` when ``detach=False``.
Default: ``True``.
stderr (bool): Return logs from ``STDERR`` when ``detach=False``.
Default: ``False``.
stop_signal (str): The stop signal to use to stop the container
(e.g. ``SIGINT``).
storage_opt (dict): Storage driver options per container as a
key-value mapping.
sysctls (dict): Kernel parameters to set in the container.
tmpfs (dict): Temporary filesystems to mount, as a dictionary
mapping a path inside the container to options for that path.
For example:
.. code-block:: python
{
'/mnt/vol2': '',
'/mnt/vol1': 'size=3G,uid=1000'
}
tty (bool): Allocate a pseudo-TTY.
ulimits (:py:class:`list`): Ulimits to set inside the container, as
a list of dicts.
user (str or int): Username or UID to run commands as inside the
container.
userns_mode (str): Sets the user namespace mode for the container
when user namespace remapping option is enabled. Supported
values are: ``host``
volume_driver (str): The name of a volume driver/plugin.
volumes (dict or list): A dictionary to configure volumes mounted
inside the container. The key is either the host path or a
volume name, and the value is a dictionary with the keys:
- ``bind`` The path to mount the volume inside the container
- ``mode`` Either ``rw`` to mount the volume read/write, or
``ro`` to mount it read-only.
For example:
.. code-block:: python
{'/home/user1/': {'bind': '/mnt/vol2', 'mode': 'rw'},
'/var/www': {'bind': '/mnt/vol1', 'mode': 'ro'}}
volumes_from (:py:class:`list`): List of container names or IDs to
get volumes from.
working_dir (str): Path to the working directory.
runtime (str): Runtime to use with this container.
Returns:
The container logs, either ``STDOUT``, ``STDERR``, or both,
depending on the value of the ``stdout`` and ``stderr`` arguments.
``STDOUT`` and ``STDERR`` may be read only if either ``json-file``
or ``journald`` logging driver used. Thus, if you are using none of
these drivers, a ``None`` object is returned instead. See the
`Engine API documentation
`_
for full details.
If ``detach`` is ``True``, a :py:class:`Container` object is
returned instead.
Raises:
:py:class:`docker.errors.ContainerError`
If the container exits with a non-zero exit code and
``detach`` is ``False``.
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
detach = kwargs.pop("detach", False)
if detach and remove:
if version_gte(self.client.api._version, '1.25'):
kwargs["auto_remove"] = True
else:
raise RuntimeError("The options 'detach' and 'remove' cannot "
"be used together in api versions < 1.25.")
if kwargs.get('network') and kwargs.get('network_mode'):
raise RuntimeError(
'The options "network" and "network_mode" can not be used '
'together.'
)
try:
container = self.create(image=image, command=command,
detach=detach, **kwargs)
except ImageNotFound:
self.client.images.pull(image)
container = self.create(image=image, command=command,
detach=detach, **kwargs)
container.start()
if detach:
return container
exit_status = container.wait()
if exit_status != 0:
stdout = False
stderr = True
logging_driver = container.attrs['HostConfig']['LogConfig']['Type']
if logging_driver == 'json-file' or logging_driver == 'journald':
out = container.logs(stdout=stdout, stderr=stderr)
else:
out = None
if remove:
container.remove()
if exit_status != 0:
raise ContainerError(container, exit_status, command, image, out)
return out
def create(self, image, command=None, **kwargs):
"""
Create a container without starting it. Similar to ``docker create``.
Takes the same arguments as :py:meth:`run`, except for ``stdout``,
``stderr``, and ``remove``.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the specified image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(image, Image):
image = image.id
kwargs['image'] = image
kwargs['command'] = command
kwargs['version'] = self.client.api._version
create_kwargs = _create_container_args(kwargs)
resp = self.client.api.create_container(**create_kwargs)
return self.get(resp['Id'])
def get(self, container_id):
"""
Get a container by name or ID.
Args:
container_id (str): Container name or ID.
Returns:
A :py:class:`Container` object.
Raises:
:py:class:`docker.errors.NotFound`
If the container does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.inspect_container(container_id)
return self.prepare_model(resp)
def list(self, all=False, before=None, filters=None, limit=-1, since=None):
"""
List containers. Similar to the ``docker ps`` command.
Args:
all (bool): Show all containers. Only running containers are shown
by default
since (str): Show only containers created since Id or Name, include
non-running ones
before (str): Show only container created before Id or Name,
include non-running ones
limit (int): Show `limit` last created containers, include
non-running ones
filters (dict): Filters to be processed on the image list.
Available filters:
- `exited` (int): Only containers with specified exit code
- `status` (str): One of ``restarting``, ``running``,
``paused``, ``exited``
- `label` (str): format either ``"key"`` or ``"key=value"``
- `id` (str): The id of the container.
- `name` (str): The name of the container.
- `ancestor` (str): Filter by container ancestor. Format of
``[:tag]``, ````, or
````.
- `before` (str): Only containers created before a particular
container. Give the container name or id.
- `since` (str): Only containers created after a particular
container. Give container name or id.
A comprehensive list can be found in the documentation for
`docker ps
`_.
Returns:
(list of :py:class:`Container`)
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.containers(all=all, before=before,
filters=filters, limit=limit,
since=since)
return [self.get(r['Id']) for r in resp]
def prune(self, filters=None):
return self.client.api.prune_containers(filters=filters)
prune.__doc__ = APIClient.prune_containers.__doc__
# kwargs to copy straight from run to create
RUN_CREATE_KWARGS = [
'command',
'detach',
'domainname',
'entrypoint',
'environment',
'healthcheck',
'hostname',
'image',
'labels',
'mac_address',
'name',
'network_disabled',
'stdin_open',
'stop_signal',
'tty',
'user',
'volume_driver',
'working_dir',
]
# kwargs to copy straight from run to host_config
RUN_HOST_CONFIG_KWARGS = [
'auto_remove',
'blkio_weight_device',
'blkio_weight',
'cap_add',
'cap_drop',
'cgroup_parent',
'cpu_count',
'cpu_percent',
'cpu_period',
'cpu_quota',
'cpu_shares',
'cpuset_cpus',
'cpuset_mems',
'device_read_bps',
'device_read_iops',
'device_write_bps',
'device_write_iops',
'devices',
'dns_opt',
'dns_search',
'dns',
'extra_hosts',
'group_add',
'init',
'init_path',
'ipc_mode',
'isolation',
'kernel_memory',
'links',
'log_config',
'lxc_conf',
'mem_limit',
'mem_reservation',
'mem_swappiness',
'memswap_limit',
'nano_cpus',
'network_mode',
'oom_kill_disable',
'oom_score_adj',
'pid_mode',
'pids_limit',
'privileged',
'publish_all_ports',
'read_only',
'restart_policy',
'security_opt',
'shm_size',
'storage_opt',
'sysctls',
'tmpfs',
'ulimits',
'userns_mode',
'version',
'volumes_from',
'runtime'
]
def _create_container_args(kwargs):
"""
Convert arguments to create() to arguments to create_container().
"""
# Copy over kwargs which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_CREATE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
host_config_kwargs = {}
for key in copy.copy(kwargs):
if key in RUN_HOST_CONFIG_KWARGS:
host_config_kwargs[key] = kwargs.pop(key)
# Process kwargs which are split over both create and host_config
ports = kwargs.pop('ports', {})
if ports:
host_config_kwargs['port_bindings'] = ports
volumes = kwargs.pop('volumes', {})
if volumes:
host_config_kwargs['binds'] = volumes
network = kwargs.pop('network', None)
if network:
create_kwargs['networking_config'] = {network: None}
host_config_kwargs['network_mode'] = network
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error('run', kwargs)
create_kwargs['host_config'] = HostConfig(**host_config_kwargs)
# Fill in any kwargs which need processing by create_host_config first
port_bindings = create_kwargs['host_config'].get('PortBindings')
if port_bindings:
# sort to make consistent for tests
create_kwargs['ports'] = [tuple(p.split('/', 1))
for p in sorted(port_bindings.keys())]
binds = create_kwargs['host_config'].get('Binds')
if binds:
create_kwargs['volumes'] = [_host_volume_from_bind(v) for v in binds]
return create_kwargs
def _host_volume_from_bind(bind):
bits = bind.split(':')
if len(bits) == 1:
return bits[0]
elif len(bits) == 2 and bits[1] in ('ro', 'rw'):
return bits[0]
else:
return bits[1]
docker-2.5.1/docker/models/secrets.py 0000664 0001750 0001750 00000003373 13061630562 020706 0 ustar joffrey joffrey 0000000 0000000 from ..api import APIClient
from .resource import Model, Collection
class Secret(Model):
"""A secret."""
id_attribute = 'ID'
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this secret.
Raises:
:py:class:`docker.errors.APIError`
If secret failed to remove.
"""
return self.client.api.remove_secret(self.id)
class SecretCollection(Collection):
"""Secrets on the Docker server."""
model = Secret
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
def get(self, secret_id):
"""
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_secret(secret_id))
def list(self, **kwargs):
"""
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp]
docker-2.5.1/docker/models/plugins.py 0000664 0001750 0001750 00000013332 13106703755 020720 0 ustar joffrey joffrey 0000000 0000000 from .. import errors
from .resource import Collection, Model
class Plugin(Model):
"""
A plugin on the server.
"""
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, self.name)
@property
def name(self):
"""
The plugin's name.
"""
return self.attrs.get('Name')
@property
def enabled(self):
"""
Whether the plugin is enabled.
"""
return self.attrs.get('Enabled')
@property
def settings(self):
"""
A dictionary representing the plugin's configuration.
"""
return self.attrs.get('Settings')
def configure(self, options):
"""
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.configure_plugin(self.name, options)
self.reload()
def disable(self):
"""
Disable the plugin.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name)
self.reload()
def enable(self, timeout=0):
"""
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.enable_plugin(self.name, timeout)
self.reload()
def push(self):
"""
Push the plugin to a remote registry.
Returns:
A dict iterator streaming the status of the upload.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.push_plugin(self.name)
def remove(self, force=False):
"""
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_plugin(self.name, force=force)
def upgrade(self, remote=None):
"""
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
"""
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
for d in self.client.api.upgrade_plugin(self.name, remote, privileges):
yield d
self._reload()
class PluginCollection(Collection):
model = Plugin
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
def get(self, name):
"""
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_plugin(name))
def install(self, remote_name, local_name=None):
"""
Pull and install a plugin.
Args:
remote_name (string): Remote reference for the plugin to
install. The ``:latest`` tag is optional, and is the
default if omitted.
local_name (string): Local name for the pulled plugin.
The ``:latest`` tag is optional, and is the default if
omitted. Optional.
Returns:
(:py:class:`Plugin`): The installed plugin
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
privileges = self.client.api.plugin_privileges(remote_name)
it = self.client.api.pull_plugin(remote_name, privileges, local_name)
for data in it:
pass
return self.get(local_name or remote_name)
def list(self):
"""
List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.plugins()
return [self.prepare_model(r) for r in resp]
docker-2.5.1/docker/models/images.py 0000664 0001750 0001750 00000023473 13147140566 020513 0 ustar joffrey joffrey 0000000 0000000 import re
import six
from ..api import APIClient
from ..errors import BuildError
from ..utils.json_stream import json_stream
from .resource import Collection, Model
class Image(Model):
"""
An image on the server.
"""
def __repr__(self):
return "<%s: '%s'>" % (self.__class__.__name__, "', '".join(self.tags))
@property
def labels(self):
"""
The labels of an image as dictionary.
"""
result = self.attrs['Config'].get('Labels')
return result or {}
@property
def short_id(self):
"""
The ID of the image truncated to 10 characters, plus the ``sha256:``
prefix.
"""
if self.id.startswith('sha256:'):
return self.id[:17]
return self.id[:10]
@property
def tags(self):
"""
The image's tags.
"""
tags = self.attrs.get('RepoTags')
if tags is None:
tags = []
return [tag for tag in tags if tag != ':']
def history(self):
"""
Show the history of an image.
Returns:
(str): The history of the image.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.history(self.id)
def save(self):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Returns:
(urllib3.response.HTTPResponse object): The response from the
daemon.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.images.get("fedora:latest")
>>> resp = image.save()
>>> f = open('/tmp/fedora-latest.tar', 'w')
>>> for chunk in resp.stream():
>>> f.write(chunk)
>>> f.close()
"""
return self.client.api.get_image(self.id)
def tag(self, repository, tag=None, **kwargs):
"""
Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful
"""
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
class ImageCollection(Collection):
model = Image
def build(self, **kwargs):
"""
Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
Returns:
(:py:class:`Image`): The built image.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, six.string_types):
return self.get(resp)
last_event = None
image_id = None
for chunk in json_stream(resp):
if 'error' in chunk:
raise BuildError(chunk['error'])
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
last_event = chunk
if image_id:
return self.get(image_id)
raise BuildError(last_event or 'Unknown')
def get(self, name):
"""
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_image(name))
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- ``label`` (str): format either ``key`` or ``key=value``
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.images(name=name, all=all, filters=filters)
return [self.get(r["Id"]) for r in resp]
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(generator): Progress output as JSON objects
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.load_image(data)
def pull(self, name, tag=None, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
insecure_registry (bool): Use an insecure registry
auth_config (dict): Override the credentials that
:py:meth:`~docker.client.DockerClient.login` has set for
this request. ``auth_config`` should contain the ``username``
and ``password`` keys to be valid.
Returns:
(:py:class:`Image`): The image that has been pulled.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.images.pull('busybox')
"""
self.client.api.pull(name, tag=tag, **kwargs)
return self.get('{0}:{1}'.format(name, tag) if tag else name)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
push.__doc__ = APIClient.push.__doc__
def remove(self, *args, **kwargs):
self.client.api.remove_image(*args, **kwargs)
remove.__doc__ = APIClient.remove_image.__doc__
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
docker-2.5.1/docker/models/services.py 0000664 0001750 0001750 00000022142 13142163435 021054 0 ustar joffrey joffrey 0000000 0000000 import copy
from docker.errors import create_unexpected_kwargs_error
from docker.types import TaskTemplate, ContainerSpec
from .resource import Model, Collection
class Service(Model):
"""A service."""
id_attribute = 'ID'
@property
def name(self):
"""The service's name."""
return self.attrs['Spec']['Name']
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def remove(self):
"""
Stop and remove the service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_service(self.id)
def tasks(self, filters=None):
"""
List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
filters['service'] = self.id
return self.client.api.tasks(filters=filters)
def update(self, **kwargs):
"""
Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# Image is required, so if it hasn't been set, use current image
if 'image' not in kwargs:
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(
self.id,
self.version,
**create_kwargs
)
def logs(self, **kwargs):
"""
Get log stream for the service.
Note: This method works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
Returns (generator): Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
)
return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
class ServiceCollection(Collection):
"""Services on the Docker server."""
model = Service
def create(self, image, command=None, **kwargs):
"""
Create a service. Similar to the ``docker service create`` command.
Args:
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
constraints (list of str): Placement constraints.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
mode (ServiceMode): Scheduling mode for the service.
Default:``None``
mounts (list of str): Mounts for the containers, in the form
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
networks (list of str): List of network names or IDs to attach
the service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
Returns:
(:py:class:`Service`) The created service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
kwargs['image'] = image
kwargs['command'] = command
create_kwargs = _get_create_service_kwargs('create', kwargs)
service_id = self.client.api.create_service(**create_kwargs)
return self.get(service_id)
def get(self, service_id):
"""
Get a service.
Args:
service_id (str): The ID of the service.
Returns:
(:py:class:`Service`): The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_service(service_id))
def list(self, **kwargs):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id`` and ``name``. Default: ``None``.
Returns:
(list of :py:class:`Service`): The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return [
self.prepare_model(s)
for s in self.client.api.services(**kwargs)
]
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'image',
'command',
'args',
'env',
'hostname',
'workdir',
'user',
'labels',
'mounts',
'stop_grace_period',
'secrets',
'tty'
]
# kwargs to copy straight over to TaskTemplate
TASK_TEMPLATE_KWARGS = [
'resources',
'restart_policy',
]
# kwargs to copy straight over to create_service
CREATE_SERVICE_KWARGS = [
'name',
'labels',
'mode',
'update_config',
'networks',
'endpoint_spec',
]
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in CREATE_SERVICE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
container_spec_kwargs = {}
for key in copy.copy(kwargs):
if key in CONTAINER_SPEC_KWARGS:
container_spec_kwargs[key] = kwargs.pop(key)
task_template_kwargs = {}
for key in copy.copy(kwargs):
if key in TASK_TEMPLATE_KWARGS:
task_template_kwargs[key] = kwargs.pop(key)
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
if 'constraints' in kwargs:
task_template_kwargs['placement'] = {
'Constraints': kwargs.pop('constraints')
}
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
'Name': kwargs.pop('log_driver'),
'Options': kwargs.pop('log_driver_options', {})
}
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error(func_name, kwargs)
container_spec = ContainerSpec(**container_spec_kwargs)
task_template_kwargs['container_spec'] = container_spec
create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
return create_kwargs
docker-2.5.1/docker/models/resource.py 0000664 0001750 0001750 00000005120 13040271005 021043 0 ustar joffrey joffrey 0000000 0000000
class Model(object):
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.short_id)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash("%s:%s" % (self.__class__.__name__, self.id))
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 10 characters.
"""
return self.id[:10]
def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = self.collection.get(self.id)
self.attrs = new_model.attrs
class Collection(object):
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
def __call__(self, *args, **kwargs):
raise TypeError(
"'{}' object is not callable. You might be trying to use the old "
"(pre-2.0) API - use docker.APIClient if so."
.format(self.__class__.__name__))
def list(self):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception("Can't create %s from %s" %
(self.model.__name__, attrs))
docker-2.5.1/docker/models/networks.py 0000664 0001750 0001750 00000014635 13124577310 021116 0 ustar joffrey joffrey 0000000 0000000 from ..api import APIClient
from .containers import Container
from .resource import Model, Collection
class Network(Model):
"""
A Docker network.
"""
@property
def name(self):
"""
The name of the network.
"""
return self.attrs.get('Name')
@property
def containers(self):
"""
The containers that are connected to the network, as a list of
:py:class:`~docker.models.containers.Container` objects.
"""
return [
self.client.containers.get(cid) for cid in
(self.attrs.get('Containers') or {}).keys()
]
def connect(self, container, *args, **kwargs):
"""
Connect a container to this network.
Args:
container (str): Container to connect to this network, as either
an ID, name, or :py:class:`~docker.models.containers.Container`
object.
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linkedto this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.connect_container_to_network(
container, self.id, *args, **kwargs
)
def disconnect(self, container, *args, **kwargs):
"""
Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
)
def remove(self):
"""
Remove this network.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_network(self.id)
class NetworkCollection(Collection):
"""
Networks on the Docker server.
"""
model = Network
def create(self, name, *args, **kwargs):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (dict): Optional custom IP scheme for the network.
Created with :py:class:`~docker.types.IPAMConfig`.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``True``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(:py:class:`Network`): The network that was created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.networks.create("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.networks.create(
"network1",
driver="bridge",
ipam=ipam_config
)
"""
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
def get(self, network_id):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_network(network_id))
def list(self, *args, **kwargs):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.networks(*args, **kwargs)
return [self.prepare_model(item) for item in resp]
def prune(self, filters=None):
self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__
docker-2.5.1/docker/models/volumes.py 0000664 0001750 0001750 00000005440 13051443744 020730 0 ustar joffrey joffrey 0000000 0000000 from ..api import APIClient
from .resource import Model, Collection
class Volume(Model):
"""A volume."""
id_attribute = 'Name'
@property
def name(self):
"""The name of the volume."""
return self.attrs['Name']
def remove(self, force=False):
"""
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
return self.client.api.remove_volume(self.id, force=force)
class VolumeCollection(Collection):
"""Volumes on the Docker server."""
model = Volume
def create(self, name=None, **kwargs):
"""
Create a volume.
Args:
name (str): Name of the volume. If not specified, the engine
generates a name.
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(:py:class:`Volume`): The volume created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.volumes.create(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
"""
obj = self.client.api.create_volume(name, **kwargs)
return self.prepare_model(obj)
def get(self, volume_id):
"""
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_volume(volume_id))
def list(self, **kwargs):
"""
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']]
def prune(self, filters=None):
return self.client.api.prune_volumes(filters=filters)
prune.__doc__ = APIClient.prune_volumes.__doc__
docker-2.5.1/docker/models/swarm.py 0000664 0001750 0001750 00000013303 13051443744 020364 0 ustar joffrey joffrey 0000000 0000000 from docker.api import APIClient
from docker.errors import APIError
from docker.types import SwarmSpec
from .resource import Model
class Swarm(Model):
"""
The server's Swarm state. This a singleton that must be reloaded to get
the current state of the Swarm.
"""
def __init__(self, *args, **kwargs):
super(Swarm, self).__init__(*args, **kwargs)
if self.client:
try:
self.reload()
except APIError as e:
# FIXME: https://github.com/docker/docker/issues/29192
if e.response.status_code not in (406, 503):
raise
@property
def version(self):
"""
The version number of the swarm. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, **kwargs):
"""
Initialize a new swarm on this Engine.
Args:
advertise_addr (str): Externally reachable address advertised to
other nodes. This can either be an address/port combination in
the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used.
If not specified, it will be automatically detected when
possible.
listen_addr (str): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_ca (dict): Configuration for forwarding signing requests
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
"""
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
'force_new_cluster': force_new_cluster
}
init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
self.client.api.init_swarm(**init_kwargs)
self.reload()
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
join.__doc__ = APIClient.join_swarm.__doc__
def leave(self, *args, **kwargs):
return self.client.api.leave_swarm(*args, **kwargs)
leave.__doc__ = APIClient.leave_swarm.__doc__
def reload(self):
"""
Inspect the swarm on the server and store the response in
:py:attr:`attrs`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.attrs = self.client.api.inspect_swarm()
def update(self, rotate_worker_token=False, rotate_manager_token=False,
**kwargs):
"""
Update the swarm's configuration.
It takes the same arguments as :py:meth:`init`, except
``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
addition, it takes these arguments:
Args:
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# this seems to have to be set
if kwargs.get('node_cert_expiry') is None:
kwargs['node_cert_expiry'] = 7776000000000000
return self.client.api.update_swarm(
version=self.version,
swarm_spec=SwarmSpec(**kwargs),
rotate_worker_token=rotate_worker_token,
rotate_manager_token=rotate_manager_token
)
docker-2.5.1/docker/models/nodes.py 0000664 0001750 0001750 00000005560 13023617644 020352 0 ustar joffrey joffrey 0000000 0000000 from .resource import Model, Collection
class Node(Model):
"""A node in a swarm."""
id_attribute = 'ID'
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def update(self, node_spec):
"""
Update the node's configuration.
Args:
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> node.update(node_spec)
"""
return self.client.api.update_node(self.id, self.version, node_spec)
def remove(self, force=False):
"""
Remove this node from the swarm.
Args:
force (bool): Force remove an active node. Default: `False`
Returns:
`True` if the request was successful.
Raises:
:py:class:`docker.errors.NotFound`
If the node doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_node(self.id, force=force)
class NodeCollection(Collection):
"""Nodes on the Docker server."""
model = Node
def get(self, node_id):
"""
Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_node(node_id))
def list(self, *args, **kwargs):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
"""
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
]
docker-2.5.1/docker/models/__init__.py 0000664 0001750 0001750 00000000000 13023617644 020761 0 ustar joffrey joffrey 0000000 0000000 docker-2.5.1/docker/constants.py 0000664 0001750 0001750 00000001025 13145400651 017754 0 ustar joffrey joffrey 0000000 0000000 import sys
from .version import version
DEFAULT_DOCKER_API_VERSION = '1.30'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
DEFAULT_USER_AGENT = "docker-sdk-python/{0}".format(version)
DEFAULT_NUM_POOLS = 25