pax_global_header00006660000000000000000000000064140462055230014513gustar00rootroot0000000000000052 comment=5becea4ca9f68875334c92f191a13482bcd6e5cf compose-1.29.2/000077500000000000000000000000001404620552300132535ustar00rootroot00000000000000compose-1.29.2/.dockerignore000066400000000000000000000001731404620552300157300ustar00rootroot00000000000000*.egg-info .coverage .git .github .tox build binaries coverage-html docs/_site *venv .tox **/__pycache__ *.pyc Jenkinsfile compose-1.29.2/.github/000077500000000000000000000000001404620552300146135ustar00rootroot00000000000000compose-1.29.2/.github/CODEOWNERS000066400000000000000000000003141404620552300162040ustar00rootroot00000000000000# GitHub code owners # See https://help.github.com/articles/about-codeowners/ # # KEEP THIS FILE SORTED. Order is important. Last match takes precedence. * @aiordache @ndeloof @rumpl @ulyssessouza compose-1.29.2/.github/ISSUE_TEMPLATE/000077500000000000000000000000001404620552300167765ustar00rootroot00000000000000compose-1.29.2/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000026331404620552300214740ustar00rootroot00000000000000--- name: Bug report about: Report a bug encountered while using docker-compose title: '' labels: kind/bug assignees: '' --- ## Description of the issue ## Context information (for bug reports) **Output of `docker-compose version`** ``` (paste here) ``` **Output of `docker version`** ``` (paste here) ``` **Output of `docker-compose config`** (Make sure to add the relevant `-f` and other flags) ``` (paste here) ``` ## Steps to reproduce the issue 1. 2. 3. ### Observed result ### Expected result ### Stacktrace / full error message ``` (paste here) ``` ## Additional information OS version / distribution, `docker-compose` install method, etc. compose-1.29.2/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000023121404620552300225210ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea to improve Compose title: '' labels: kind/feature assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. compose-1.29.2/.github/ISSUE_TEMPLATE/question-about-using-compose.md000066400000000000000000000005261404620552300250700ustar00rootroot00000000000000--- name: Question about using Compose about: This is not the appropriate channel title: '' labels: kind/question assignees: '' --- Please post on our forums: https://forums.docker.com for questions about using `docker-compose`. Posts that are not a bug report or a feature/enhancement request will not be addressed on this issue tracker. compose-1.29.2/.github/stale.yml000066400000000000000000000040551404620552300164520ustar00rootroot00000000000000# Configuration for probot-stale - https://github.com/probot/stale # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 180 # Number of days of inactivity before an Issue or Pull Request with the stale label is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. daysUntilClose: 7 # Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) onlyLabels: [] # Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable exemptLabels: - kind/feature # Set to true to ignore issues in a project (defaults to false) exemptProjects: false # Set to true to ignore issues in a milestone (defaults to false) exemptMilestones: false # Set to true to ignore issues with an assignee (defaults to false) exemptAssignees: true # Label to use when marking as stale staleLabel: stale # Comment to post when marking as stale. Set to `false` to disable markComment: > This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions. # Comment to post when removing the stale label. unmarkComment: > This issue has been automatically marked as not stale anymore due to the recent activity. # Comment to post when closing a stale Issue or Pull Request. closeComment: > This issue has been automatically closed because it had not recent activity during the stale period. # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 30 # Limit to only `issues` or `pulls` only: issues # Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': # pulls: # daysUntilStale: 30 # markComment: > # This pull request has been automatically marked as stale because it has not had # recent activity. It will be closed if no further activity occurs. Thank you # for your contributions. # issues: # exemptLabels: # - confirmed compose-1.29.2/.gitignore000066400000000000000000000002331404620552300152410ustar00rootroot00000000000000*.egg-info *.pyc *.swo *.swp .cache .coverage* .DS_Store .idea /.tox /binaries /build /compose/GITSHA /coverage-html /dist /docs/_site /README.rst /*venv compose-1.29.2/.pre-commit-config.yaml000066400000000000000000000013661404620552300175420ustar00rootroot00000000000000- repo: git://github.com/pre-commit/pre-commit-hooks sha: 'v0.9.1' hooks: - id: check-added-large-files - id: check-docstring-first - id: check-merge-conflict - id: check-yaml - id: check-json - id: debug-statements - id: end-of-file-fixer - id: flake8 - id: name-tests-test exclude: 'tests/(integration/testcases\.py|helpers\.py)' - id: requirements-txt-fixer - id: trailing-whitespace - repo: git://github.com/asottile/reorder_python_imports sha: v1.3.4 hooks: - id: reorder-python-imports language_version: 'python3.7' args: - --py3-plus - repo: https://github.com/asottile/pyupgrade rev: v2.1.0 hooks: - id: pyupgrade args: - --py3-plus compose-1.29.2/CHANGELOG.md000066400000000000000000002702471404620552300151000ustar00rootroot00000000000000Change log ========== 1.29.2 (2021-05-10) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/59?closed=1) ### Miscellaneous - Remove advertisement for `docker compose` in the `up` command to avoid annoyance - Bump `py` to `1.10.0` in `requirements-indirect.txt` 1.29.1 (2021-04-13) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/58?closed=1) ### Bugs - Fix for invalid handler warning on Windows builds - Fix config hash to trigger container recreation on IPC mode updates - Fix conversion map for `placement.max_replicas_per_node` - Remove extra scan suggestion on build 1.29.0 (2021-04-06) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/56?closed=1) ### Features - Add profile filter to `docker-compose config` - Add a `depends_on` condition to wait for successful service completion ### Miscellaneous - Add image scan message on build - Update warning message for `--no-ansi` to mention `--ansi never` as alternative - Bump docker-py to 5.0.0 - Bump PyYAML to 5.4.1 - Bump python-dotenv to 0.17.0 1.28.6 (2021-03-23) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/57?closed=1) ### Bugs - Make `--env-file` relative to the current working directory and error out for invalid paths. Environment file paths set with `--env-file` are relative to the current working directory while the default `.env` file is located in the project directory which by default is the base directory of the Compose file. - Fix missing service property `storage_opt` by updating the compose schema - Fix build `extra_hosts` list format - Remove extra error message on `exec` ### Miscellaneous - Add `compose.yml` and `compose.yaml` to default filename list 1.28.5 (2021-02-25) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/55?closed=1) ### Bugs - Fix OpenSSL version mismatch error when shelling out to the ssh client (via bump to docker-py 4.4.4 which contains the fix) - Add missing build flags to the native builder: `platform`, `isolation` and `extra_hosts` - Remove info message on native build - Avoid fetching logs when service logging driver is set to 'none' 1.28.4 (2021-02-18) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/54?closed=1) ### Bugs - Fix SSH port parsing by bumping docker-py to 4.4.3 ### Miscellaneous - Bump Python to 3.7.10 1.28.3 (2021-02-17) ------------------- [List of PRs / issues for this release](https://github.com/docker/compose/milestone/53?closed=1) ### Bugs - Fix SSH hostname parsing when it contains leading s/h, and remove the quiet option that was hiding the error (via docker-py bump to 4.4.2) - Fix key error for '--no-log-prefix' option - Fix incorrect CLI environment variable name for service profiles: `COMPOSE_PROFILES` instead of `COMPOSE_PROFILE` - Fix fish completion ### Miscellaneous - Bump cryptography to 3.3.2 - Remove log driver filter 1.28.2 (2021-01-26) ------------------- ### Miscellaneous - CI setup update 1.28.1 (2021-01-25) ------------------- ### Bugs - Revert to Python 3.7 bump for Linux static builds - Add bash completion for `docker-compose logs|up --no-log-prefix` 1.28.0 (2021-01-20) ------------------- ### Features - Support for Nvidia GPUs via device requests - Support for service profiles - Change the SSH connection approach to the Docker CLI's via shellout to the local SSH client (old behaviour enabled by setting `COMPOSE_PARAMIKO_SSH` environment variable) - Add flag to disable log prefix - Add flag for ansi output control ### Bugs - Make `parallel_pull=True` by default - Bring back warning for configs in non-swarm mode - Take `--file` in account when defining `project_dir` - On `compose up`, attach only to services we read logs from ### Miscellaneous - Make COMPOSE_DOCKER_CLI_BUILD=1 the default - Add usage metrics - Sync schema with COMPOSE specification - Improve failure report for missing mandatory environment variables - Bump attrs to 20.3.0 - Bump more_itertools to 8.6.0 - Bump cryptograhy to 3.2.1 - Bump cffi to 1.14.4 - Bump virtualenv to 20.2.2 - Bump bcrypt to 3.2.0 - Bump gitpython to 3.1.11 - Bump docker-py to 4.4.1 - Bump Python to 3.9 - Linux: bump Debian base image from stretch to buster (required for Python 3.9) - macOS: OpenSSL 1.1.1g to 1.1.1h, Python 3.7.7 to 3.9.0 - Bump pyinstaller 4.1 - Loosen restriction on base images to latest minor - Updates of READMEs 1.27.4 (2020-09-24) ------------------- ### Bugs - Remove path checks for bind mounts - Fix port rendering to output long form syntax for non-v1 - Add protocol to the docker socket address 1.27.3 (2020-09-16) ------------------- ### Bugs - Merge `max_replicas_per_node` on `docker-compose config` - Fix `depends_on` serialization on `docker-compose config` - Fix scaling when some containers are not running on `docker-compose up` - Enable relative paths for `driver_opts.device` for `local` driver - Allow strings for `cpus` fields 1.27.2 (2020-09-10) ------------------- ### Bugs - Fix bug on `docker-compose run` container attach 1.27.1 (2020-09-10) ------------------- ### Bugs - Fix `docker-compose run` when `service.scale` is specified - Allow `driver` property for external networks as temporary workaround for swarm network propagation issue - Pin new internal schema version to `3.9` as the default - Preserve the version when configured in the compose file 1.27.0 (2020-09-07) ------------------- ### Features - Merge 2.x and 3.x compose formats and align with COMPOSE_SPEC schema - Implement service mode for ipc - Pass `COMPOSE_PROJECT_NAME` environment variable in container mode - Make run behave in the same way as up - Use `docker build` on `docker-compose run` when `COMPOSE_DOCKER_CLI_BUILD` environment variable is set - Use docker-py default API version for engine queries (`auto`) - Parse `network_mode` on build ### Bugs - Ignore build context path validation when building is not required - Fix float to bytes conversion via docker-py bump to 4.3.1 - Fix scale bug when deploy section is set - Fix `docker-py` bump in `setup.py` - Fix experimental build failure detection - Fix context propagation to docker cli ### Miscellaneous - Drop support for Python 2.7 - Bump `docker-py` to 4.3.1 - Bump `tox` to 3.19.0 - Bump `virtualenv` to 20.0.30 - Add script for docs syncronization 1.26.2 (2020-07-02) ------------------- ### Bugs - Enforce `docker-py` 4.2.2 as minimum version when installing with pip 1.26.1 (2020-06-30) ------------------- ### Features - Bump `docker-py` from 4.2.1 to 4.2.2 ### Bugs - Enforce `docker-py` 4.2.1 as minimum version when installing with pip - Fix context load for non-docker endpoints 1.26.0 (2020-06-03) ------------------- ### Features - Add `docker context` support - Add missing test dependency `ddt` to `setup.py` - Add `--attach-dependencies` to command `up` for attaching to dependencies - Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable - Bump `Pytest` to 5.3.4 and add refactor compatibility with new version - Bump `OpenSSL` from 1.1.1f to 1.1.1g - Bump `docker-py` from 4.2.0 to 4.2.1 ### Bugs - Properly escape values coming from env_files - Sync compose-schemas with upstream (docker/cli) - Remove `None` entries on exec command - Add `python-dotenv` to delegate `.env` file processing - Don't adjust output on terminal width when piped into another command - Show an error message when `version` attribute is malformed - Fix HTTPS connection when DOCKER_HOST is remote 1.25.5 (2020-02-04) ------------------- ### Features - Bump OpenSSL from 1.1.1d to 1.1.1f - Add 3.8 compose version 1.25.4 (2020-01-23) ------------------- ### Bugfixes - Fix CI script to enforce the minimal MacOS version to 10.11 - Fix docker-compose exec for keys with no value 1.25.3 (2020-01-23) ------------------- ### Bugfixes - Fix CI script to enforce the compilation with Python3 - Fix binary's sha256 in the release page 1.25.2 (2020-01-20) ------------------- ### Features - Allow compatibility option with `COMPOSE_COMPATIBILITY` environment variable - Bump PyInstaller from 3.5 to 3.6 - Bump pysocks from 1.6.7 to 1.7.1 - Bump websocket-client from 0.32.0 to 0.57.0 - Bump urllib3 from 1.24.2 to 1.25.7 - Bump jsonschema from 3.0.1 to 3.2.0 - Bump PyYAML from 4.2b1 to 5.3 - Bump certifi from 2017.4.17 to 2019.11.28 - Bump coverage from 4.5.4 to 5.0.3 - Bump paramiko from 2.6.0 to 2.7.1 - Bump cached-property from 1.3.0 to 1.5.1 - Bump minor Linux and MacOSX dependencies ### Bugfixes - Validate version format on formats 2+ - Assume infinite terminal width when not running in a terminal 1.25.1 (2020-01-06) ------------------- ### Features - Bump `pytest-cov` 2.8.1 - Bump `flake8` 3.7.9 - Bump `coverage` 4.5.4 ### Bugfixes - Decode APIError explanation to unicode before usage on start and create of a container - Reports when images that cannot be pulled and must be built - Discard label `com.docker.compose.filepaths` having None as value. Typically, when coming from stdin - Added OSX binary as a directory to solve slow start up time caused by MacOS Catalina binary scan - Passed in HOME env-var in container mode (running with `script/run/run.sh`) - Reverted behavior of "only pull images that we can't build" and replace by a warning informing the image we can't pull and must be built 1.25.0 (2019-11-18) ------------------- ### Features - Set no-colors to true if CLICOLOR env variable is set to 0 - Add working dir, config files and env file in service labels - Add dependencies for ARM build - Add BuildKit support, use `DOCKER_BUILDKIT=1` and `COMPOSE_DOCKER_CLI_BUILD=1` - Bump paramiko to 2.6.0 - Add working dir, config files and env file in service labels - Add tag `docker-compose:latest` - Add `docker-compose:-alpine` image/tag - Add `docker-compose:-debian` image/tag - Bumped `docker-py` 4.1.0 - Supports `requests` up to 2.22.0 version - Drops empty tag on `build:cache_from` - `Dockerfile` now generates `libmusl` binaries for alpine - Only pull images that can't be built - Attribute `scale` can now accept `0` as a value - Added `--quiet` build flag - Added `--no-interpolate` to `docker-compose config` - Bump OpenSSL for macOS build (`1.1.0j` to `1.1.1c`) - Added `--no-rm` to `build` command - Added support for `credential_spec` - Resolve digests without pulling image - Upgrade `pyyaml` to `4.2b1` - Lowered severity to `warning` if `down` tries to remove nonexisting image - Use improved API fields for project events when possible - Update `setup.py` for modern `pypi/setuptools` and remove `pandoc` dependencies - Removed `Dockerfile.armhf` which is no longer needed ### Bugfixes - Make container service color deterministic, remove red from chosen colors - Fix non ascii chars error. Python2 only - Format image size as decimal to be align with Docker CLI - Use Python Posix support to get tty size - Fix same file 'extends' optimization - Use python POSIX support to get tty size - Format image size as decimal to be align with Docker CLI - Fixed stdin_open - Fixed `--remove-orphans` when used with `up --no-start` - Fixed `docker-compose ps --all` - Fixed `depends_on` dependency recreation behavior - Fixed bash completion for `build --memory` - Fixed misleading warning concerning env vars when performing an `exec` command - Fixed failure check in parallel_execute_watch - Fixed race condition after pulling image - Fixed error on duplicate mount points - Fixed merge on networks section - Always connect Compose container to `stdin` - Fixed the presentation of failed services on 'docker-compose start' when containers are not available 1.24.1 (2019-06-24) ------------------- ### Bugfixes - Fixed acceptance tests 1.24.0 (2019-03-28) ------------------- ### Features - Added support for connecting to the Docker Engine using the `ssh` protocol. - Added a `--all` flag to `docker-compose ps` to include stopped one-off containers in the command's output. - Add bash completion for `ps --all|-a` - Support for credential_spec - Add `--parallel` to `docker build`'s options in `bash` and `zsh` completion ### Bugfixes - Fixed a bug where some valid credential helpers weren't properly handled by Compose when attempting to pull images from private registries. - Fixed an issue where the output of `docker-compose start` before containers were created was misleading - To match the Docker CLI behavior and to avoid confusing issues, Compose will no longer accept whitespace in variable names sourced from environment files. - Compose will now report a configuration error if a service attempts to declare duplicate mount points in the volumes section. - Fixed an issue with the containerized version of Compose that prevented users from writing to stdin during interactive sessions started by `run` or `exec`. - One-off containers started by `run` no longer adopt the restart policy of the service, and are instead set to never restart. - Fixed an issue that caused some container events to not appear in the output of the `docker-compose events` command. - Missing images will no longer stop the execution of `docker-compose down` commands (a warning will be displayed instead). - Force `virtualenv` version for macOS CI - Fix merging of compose files when network has `None` config - Fix `CTRL+C` issues by enabling `bootloader_ignore_signals` in `pyinstaller` - Bump `docker-py` version to `3.7.2` to fix SSH and proxy config issues - Fix release script and some typos on release documentation 1.23.2 (2018-11-28) ------------------- ### Bugfixes - Reverted a 1.23.0 change that appended random strings to container names created by `docker-compose up`, causing addressability issues. Note: Containers created by `docker-compose run` will continue to use randomly generated names to avoid collisions during parallel runs. - Fixed an issue where some `dockerfile` paths would fail unexpectedly when attempting to build on Windows. - Fixed a bug where build context URLs would fail to build on Windows. - Fixed a bug that caused `run` and `exec` commands to fail for some otherwise accepted values of the `--host` parameter. - Fixed an issue where overrides for the `storage_opt` and `isolation` keys in service definitions weren't properly applied. - Fixed a bug where some invalid Compose files would raise an uncaught exception during validation. 1.23.1 (2018-11-01) ------------------- ### Bugfixes - Fixed a bug where working with containers created with a previous (< 1.23.0) version of Compose would cause unexpected crashes - Fixed an issue where the behavior of the `--project-directory` flag would vary depending on which subcommand was being used. 1.23.0 (2018-10-30) ------------------- ### Important note The default naming scheme for containers created by Compose in this version has changed from `__` to `___`, where `` is a randomly-generated hexadecimal string. Please make sure to update scripts relying on the old naming scheme accordingly before upgrading. ### Features - Logs for containers restarting after a crash will now appear in the output of the `up` and `logs` commands. - Added `--hash` option to the `docker-compose config` command, allowing users to print a hash string for each service's configuration to facilitate rolling updates. - Added `--parallel` flag to the `docker-compose build` command, allowing Compose to build up to 5 images simultaneously. - Output for the `pull` command now reports status / progress even when pulling multiple images in parallel. - For images with multiple names, Compose will now attempt to match the one present in the service configuration in the output of the `images` command. ### Bugfixes - Parallel `run` commands for the same service will no longer fail due to name collisions. - Fixed an issue where paths longer than 260 characters on Windows clients would cause `docker-compose build` to fail. - Fixed a bug where attempting to mount `/var/run/docker.sock` with Docker Desktop for Windows would result in failure. - The `--project-directory` option is now used by Compose to determine where to look for the `.env` file. - `docker-compose build` no longer fails when attempting to pull an image with credentials provided by the gcloud credential helper. - Fixed the `--exit-code-from` option in `docker-compose up` to always report the actual exit code even when the watched container isn't the cause of the exit. - Fixed an issue that would prevent recreating a service in some cases where a volume would be mapped to the same mountpoint as a volume declared inside the image's Dockerfile. - Fixed a bug that caused hash configuration with multiple networks to be inconsistent, causing some services to be unnecessarily restarted. - Fixed a bug that would cause failures with variable substitution for services with a name containing one or more dot characters - Fixed a pipe handling issue when using the containerized version of Compose. - Fixed a bug causing `external: false` entries in the Compose file to be printed as `external: true` in the output of `docker-compose config` - Fixed a bug where issuing a `docker-compose pull` command on services without a defined image key would cause Compose to crash - Volumes and binds are now mounted in the order they're declared in the service definition ### Miscellaneous - The `zsh` completion script has been updated with new options, and no longer suggests container names where service names are expected. 1.22.0 (2018-07-17) ------------------- ### Features #### Compose format version 3.7 - Introduced version 3.7 of the `docker-compose.yml` specification. This version requires Docker Engine 18.06.0 or above. - Added support for `rollback_config` in the deploy configuration - Added support for the `init` parameter in service configurations - Added support for extension fields in service, network, volume, secret, and config configurations #### Compose format version 2.4 - Added support for extension fields in service, network, and volume configurations ### Bugfixes - Fixed a bug that prevented deployment with some Compose files when `DOCKER_DEFAULT_PLATFORM` was set - Compose will no longer try to create containers or volumes with invalid starting characters - Fixed several bugs that prevented Compose commands from working properly with containers created with an older version of Compose - Fixed an issue with the output of `docker-compose config` with the `--compatibility-mode` flag enabled when the source file contains attachable networks - Fixed a bug that prevented the `gcloud` credential store from working properly when used with the Compose binary on UNIX - Fixed a bug that caused connection errors when trying to operate over a non-HTTPS TCP connection on Windows - Fixed a bug that caused builds to fail on Windows if the Dockerfile was located in a subdirectory of the build context - Fixed an issue that prevented proper parsing of UTF-8 BOM encoded Compose files on Windows - Fixed an issue with handling of the double-wildcard (`**`) pattern in `.dockerignore` files when using `docker-compose build` - Fixed a bug that caused auth values in legacy `.dockercfg` files to be ignored - `docker-compose build` will no longer attempt to create image names starting with an invalid character 1.21.2 (2018-05-03) ------------------- ### Bugfixes - Fixed a bug where the ip_range attribute in IPAM configs was prevented from passing validation 1.21.1 (2018-04-27) ------------------- ### Bugfixes - In 1.21.0, we introduced a change to how project names are sanitized for internal use in resource names. This caused issues when manipulating an existing, deployed application whose name had changed as a result. This release properly detects resources using "legacy" naming conventions. - Fixed an issue where specifying an in-context Dockerfile using an absolute path would fail despite being valid. - Fixed a bug where IPAM option changes were incorrectly detected, preventing redeployments. - Validation of v2 files now properly checks the structure of IPAM configs. - Improved support for credentials stores on Windows to include binaries using extensions other than `.exe`. The list of valid extensions is determined by the contents of the `PATHEXT` environment variable. - Fixed a bug where Compose would generate invalid binds containing duplicate elements with some v3.2 files, triggering errors at the Engine level during deployment. 1.21.0 (2018-04-10) ------------------- ### New features #### Compose file version 2.4 - Introduced version 2.4 of the `docker-compose.yml` specification. This version requires Docker Engine 17.12.0 or above. - Added support for the `platform` parameter in service definitions. If supplied, the parameter is also used when performing build for the service. #### Compose file version 2.2 and up - Added support for the `cpu_rt_period` and `cpu_rt_runtime` parameters in service definitions (2.x only). #### Compose file version 2.1 and up - Added support for the `cpu_period` parameter in service definitions (2.x only). - Added support for the `isolation` parameter in service build configurations. Additionally, the `isolation` parameter is used for builds as well if no `build.isolation` parameter is defined. (2.x only) #### All formats - Added support for the `--workdir` flag in `docker-compose exec`. - Added support for the `--compress` flag in `docker-compose build`. - `docker-compose pull` is now performed in parallel by default. You can opt out using the `--no-parallel` flag. The `--parallel` flag is now deprecated and will be removed in a future version. - Dashes and underscores in project names are no longer stripped out. - `docker-compose build` now supports the use of Dockerfile from outside the build context. ### Bugfixes - Compose now checks that the volume's configuration matches the remote volume, and errors out if a mismatch is detected. - Fixed a bug that caused Compose to raise unexpected errors when attempting to create several one-off containers in parallel. - Fixed a bug with argument parsing when using `docker-machine config` to generate TLS flags for `exec` and `run` commands. - Fixed a bug where variable substitution with an empty default value (e.g. `${VAR:-}`) would print an incorrect warning. - Improved resilience when encoding of the Compose file doesn't match the system's. Users are encouraged to use UTF-8 when possible. - Fixed a bug where external overlay networks in Swarm would be incorrectly recognized as inexistent by Compose, interrupting otherwise valid operations. 1.20.1 (2018-03-21) ------------------- ### Bugfixes - Fixed an issue where `docker-compose build` would error out if the build context contained directory symlinks 1.20.0 (2018-03-20) ------------------- ### New features #### Compose file version 3.6 - Introduced version 3.6 of the `docker-compose.yml` specification. This version requires Docker Engine 18.02.0 or above. - Added support for the `tmpfs.size` property in volume mappings #### Compose file version 3.2 and up - The `--build-arg` option can now be used without specifying a service in `docker-compose build` #### Compose file version 2.3 - Added support for `device_cgroup_rules` in service definitions - Added support for the `tmpfs.size` property in long-form volume mappings - The `--build-arg` option can now be used without specifying a service in `docker-compose build` #### All formats - Added a `--log-level` option to the top-level `docker-compose` command. Accepted values are `debug`, `info`, `warning`, `error`, `critical`. Default log level is `info` - `docker-compose run` now allows users to unset the container's entrypoint - Proxy configuration found in the `~/.docker/config.json` file now populates environment and build args for containers created by Compose - Added the `--use-aliases` flag to `docker-compose run`, indicating that network aliases declared in the service's config should be used for the running container - Added the `--include-deps` flag to `docker-compose pull` - `docker-compose run` now kills and removes the running container upon receiving `SIGHUP` - `docker-compose ps` now shows the containers' health status if available - Added the long-form `--detach` option to the `exec`, `run` and `up` commands ### Bugfixes - Fixed `.dockerignore` handling, notably with regard to absolute paths and last-line precedence rules - Fixed an issue where Compose would make costly DNS lookups when connecting to the Engine when using Docker For Mac - Fixed a bug introduced in 1.19.0 which caused the default certificate path to not be honored by Compose - Fixed a bug where Compose would incorrectly check whether a symlink's destination was accessible when part of a build context - Fixed a bug where `.dockerignore` files containing lines of whitespace caused Compose to error out on Windows - Fixed a bug where `--tls*` and `--host` options wouldn't be properly honored for interactive `run` and `exec` commands - A `seccomp:` entry in the `security_opt` config now correctly sends the contents of the file to the engine - ANSI output for `up` and `down` operations should no longer affect the wrong lines - Improved support for non-unicode locales - Fixed a crash occurring on Windows when the user's home directory name contained non-ASCII characters - Fixed a bug occurring during builds caused by files with a negative `mtime` values in the build context - Fixed an encoding bug when streaming build progress 1.19.0 (2018-02-07) ------------------- ### Breaking changes - On UNIX platforms, interactive `run` and `exec` commands now require the `docker` CLI to be installed on the client by default. To revert to the previous behavior, users may set the `COMPOSE_INTERACTIVE_NO_CLI` environment variable. ### New features #### Compose file version 3.x - The output of the `config` command should now merge `deploy` options from several Compose files in a more accurate manner #### Compose file version 2.3 - Added support for the `runtime` option in service definitions #### Compose file version 2.1 and up - Added support for the `${VAR:?err}` and `${VAR?err}` variable interpolation syntax to indicate mandatory variables #### Compose file version 2.x - Added `priority` key to service network mappings, allowing the user to define in which order the specified service will connect to each network #### All formats - Added `--renew-anon-volumes` (shorthand `-V`) to the `up` command, preventing Compose from recovering volume data from previous containers for anonymous volumes - Added limit for number of simultaneous parallel operations, which should prevent accidental resource exhaustion of the server. Default is 64 and can be configured using the `COMPOSE_PARALLEL_LIMIT` environment variable - Added `--always-recreate-deps` flag to the `up` command to force recreating dependent services along with the dependency owner - Added `COMPOSE_IGNORE_ORPHANS` environment variable to forgo orphan container detection and suppress warnings - Added `COMPOSE_FORCE_WINDOWS_HOST` environment variable to force Compose to parse volume definitions as if the Docker host was a Windows system, even if Compose itself is currently running on UNIX - Bash completion should now be able to better differentiate between running, stopped and paused services ### Bugfixes - Fixed a bug that would cause the `build` command to report a connection error when the build context contained unreadable files or FIFO objects. These file types will now be handled appropriately - Fixed various issues around interactive `run`/`exec` sessions. - Fixed a bug where setting TLS options with environment and CLI flags simultaneously would result in part of the configuration being ignored - Fixed a bug where the DOCKER_TLS_VERIFY environment variable was being ignored by Compose - Fixed a bug where the `-d` and `--timeout` flags in `up` were erroneously marked as incompatible - Fixed a bug where the recreation of a service would break if the image associated with the previous container had been removed - Fixed a bug where updating a mount's target would break Compose when trying to recreate the associated service - Fixed a bug where `tmpfs` volumes declared using the extended syntax in Compose files using version 3.2 would be erroneously created as anonymous volumes instead - Fixed a bug where type conversion errors would print a stacktrace instead of exiting gracefully - Fixed some errors related to unicode handling - Dependent services no longer get recreated along with the dependency owner if their configuration hasn't changed - Added better validation of `labels` fields in Compose files. Label values containing scalar types (number, boolean) now get automatically converted to strings 1.18.0 (2017-12-15) ------------------- ### New features #### Compose file version 3.5 - Introduced version 3.5 of the `docker-compose.yml` specification. This version requires Docker Engine 17.06.0 or above - Added support for the `shm_size` parameter in build configurations - Added support for the `isolation` parameter in service definitions - Added support for custom names for network, secret and config definitions #### Compose file version 2.3 - Added support for `extra_hosts` in build configuration - Added support for the [long syntax](https://docs.docker.com/compose/compose-file/#long-syntax-3) for volume entries, as previously introduced in the 3.2 format. Note that using this syntax will create [mounts](https://docs.docker.com/engine/admin/volumes/bind-mounts/) instead of volumes. #### Compose file version 2.1 and up - Added support for the `oom_kill_disable` parameter in service definitions (2.x only) - Added support for custom names for network definitions (2.x only) #### All formats - Values interpolated from the environment will now be converted to the proper type when used in non-string fields. - Added support for `--label` in `docker-compose run` - Added support for `--timeout` in `docker-compose down` - Added support for `--memory` in `docker-compose build` - Setting `stop_grace_period` in service definitions now also sets the container's `stop_timeout` ### Bugfixes - Fixed an issue where Compose was still handling service hostname according to legacy engine behavior, causing hostnames containing dots to be cut up - Fixed a bug where the `X-Y:Z` syntax for ports was considered invalid by Compose - Fixed an issue with CLI logging causing duplicate messages and inelegant output to occur - Fixed an issue that caused `stop_grace_period` to be ignored when using multiple Compose files - Fixed a bug that caused `docker-compose images` to crash when using untagged images - Fixed a bug where the valid `${VAR:-}` syntax would cause Compose to error out - Fixed a bug where `env_file` entries using an UTF-8 BOM were being read incorrectly - Fixed a bug where missing secret files would generate an empty directory in their place - Fixed character encoding issues in the CLI's error handlers - Added validation for the `test` field in healthchecks - Added validation for the `subnet` field in IPAM configurations - Added validation for `volumes` properties when using the long syntax in service definitions - The CLI now explicit prevents using `-d` and `--timeout` together in `docker-compose up` 1.17.1 (2017-11-08) ------------------ ### Bugfixes - Fixed a bug that would prevent creating new containers when using container labels in the list format as part of the service's definition. 1.17.0 (2017-11-02) ------------------- ### New features #### Compose file version 3.4 - Introduced version 3.4 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 17.06.0 or above. - Added support for `cache_from`, `network` and `target` options in build configurations - Added support for the `order` parameter in the `update_config` section - Added support for setting a custom name in volume definitions using the `name` parameter #### Compose file version 2.3 - Added support for `shm_size` option in build configuration #### Compose file version 2.x - Added support for extension fields (`x-*`). Also available for v3.4 files #### All formats - Added new `--no-start` to the `up` command, allowing users to create all resources (networks, volumes, containers) without starting services. The `create` command is deprecated in favor of this new option ### Bugfixes - Fixed a bug where `extra_hosts` values would be overridden by extension files instead of merging together - Fixed a bug where the validation for v3.2 files would prevent using the `consistency` field in service volume definitions - Fixed a bug that would cause a crash when configuration fields expecting unique items would contain duplicates - Fixed a bug where mount overrides with a different mode would create a duplicate entry instead of overriding the original entry - Fixed a bug where build labels declared as a list wouldn't be properly parsed - Fixed a bug where the output of `docker-compose config` would be invalid for some versions if the file contained custom-named external volumes - Improved error handling when issuing a build command on Windows using an unsupported file version - Fixed an issue where networks with identical names would sometimes be created when running `up` commands concurrently. 1.16.1 (2017-09-01) ------------------- ### Bugfixes - Fixed bug that prevented using `extra_hosts` in several configuration files. 1.16.0 (2017-08-31) ------------------- ### New features #### Compose file version 2.3 - Introduced version 2.3 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 17.06.0 or above. - Added support for the `target` parameter in build configurations - Added support for the `start_period` parameter in healthcheck configurations #### Compose file version 2.x - Added support for the `blkio_config` parameter in service definitions - Added support for setting a custom name in volume definitions using the `name` parameter (not available for version 2.0) #### All formats - Added new CLI flag `--no-ansi` to suppress ANSI control characters in output ### Bugfixes - Fixed a bug where nested `extends` instructions weren't resolved properly, causing "file not found" errors - Fixed several issues with `.dockerignore` parsing - Fixed issues where logs of TTY-enabled services were being printed incorrectly and causing `MemoryError` exceptions - Fixed a bug where printing application logs would sometimes be interrupted by a `UnicodeEncodeError` exception on Python 3 - The `$` character in the output of `docker-compose config` is now properly escaped - Fixed a bug where running `docker-compose top` would sometimes fail with an uncaught exception - Fixed a bug where `docker-compose pull` with the `--parallel` flag would return a `0` exit code when failing - Fixed an issue where keys in `deploy.resources` were not being validated - Fixed an issue where the `logging` options in the output of `docker-compose config` would be set to `null`, an invalid value - Fixed the output of the `docker-compose images` command when an image would come from a private repository using an explicit port number - Fixed the output of `docker-compose config` when a port definition used `0` as the value for the published port 1.15.0 (2017-07-26) ------------------- ### New features #### Compose file version 2.2 - Added support for the `network` parameter in build configurations. #### Compose file version 2.1 and up - The `pid` option in a service's definition now supports a `service:` value. - Added support for the `storage_opt` parameter in in service definitions. This option is not available for the v3 format #### All formats - Added `--quiet` flag to `docker-compose pull`, suppressing progress output - Some improvements to CLI output ### Bugfixes - Volumes specified through the `--volume` flag of `docker-compose run` now complement volumes declared in the service's definition instead of replacing them - Fixed a bug where using multiple Compose files would unset the scale value defined inside the Compose file. - Fixed an issue where the `credHelpers` entries in the `config.json` file were not being honored by Compose - Fixed a bug where using multiple Compose files with port declarations would cause failures in Python 3 environments - Fixed a bug where some proxy-related options present in the user's environment would prevent Compose from running - Fixed an issue where the output of `docker-compose config` would be invalid if the original file used `Y` or `N` values - Fixed an issue preventing `up` operations on a previously created stack on Windows Engine. 1.14.0 (2017-06-19) ------------------- ### New features #### Compose file version 3.3 - Introduced version 3.3 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 17.06.0 or above. Note: the `credential_spec` and `configs` keys only apply to Swarm services and will be ignored by Compose #### Compose file version 2.2 - Added the following parameters in service definitions: `cpu_count`, `cpu_percent`, `cpus` #### Compose file version 2.1 - Added support for build labels. This feature is also available in the 2.2 and 3.3 formats. #### All formats - Added shorthand `-u` for `--user` flag in `docker-compose exec` - Differences in labels between the Compose file and remote network will now print a warning instead of preventing redeployment. ### Bugfixes - Fixed a bug where service's dependencies were being rescaled to their default scale when running a `docker-compose run` command - Fixed a bug where `docker-compose rm` with the `--stop` flag was not behaving properly when provided with a list of services to remove - Fixed a bug where `cache_from` in the build section would be ignored when using more than one Compose file. - Fixed a bug that prevented binding the same port to different IPs when using more than one Compose file. - Fixed a bug where override files would not be picked up by Compose if they had the `.yaml` extension - Fixed a bug on Windows Engine where networks would be incorrectly flagged for recreation - Fixed a bug where services declaring ports would cause crashes on some versions of Python 3 - Fixed a bug where the output of `docker-compose config` would sometimes contain invalid port definitions 1.13.0 (2017-05-02) ------------------- ### Breaking changes - `docker-compose up` now resets a service's scaling to its default value. You can use the newly introduced `--scale` option to specify a custom scale value ### New features #### Compose file version 2.2 - Introduced version 2.2 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 1.13.0 or above - Added support for `init` in service definitions. - Added support for `scale` in service definitions. The configuration's value can be overridden using the `--scale` flag in `docker-compose up`. Please note that the `scale` command is disabled for this file format #### Compose file version 2.x - Added support for `options` in the `ipam` section of network definitions ### Bugfixes - Fixed a bug where paths provided to compose via the `-f` option were not being resolved properly - Fixed a bug where the `ext_ip::target_port` notation in the ports section was incorrectly marked as invalid - Fixed an issue where the `exec` command would sometimes not return control to the terminal when using the `-d` flag - Fixed a bug where secrets were missing from the output of the `config` command for v3.2 files - Fixed an issue where `docker-compose` would hang if no internet connection was available - Fixed an issue where paths containing unicode characters passed via the `-f` flag were causing Compose to crash - Fixed an issue where the output of `docker-compose config` would be invalid if the Compose file contained external secrets - Fixed a bug where using `--exit-code-from` with `up` would fail if Compose was installed in a Python 3 environment - Fixed a bug where recreating containers using a combination of `tmpfs` and `volumes` would result in an invalid config state 1.12.0 (2017-04-04) ------------------- ### New features #### Compose file version 3.2 - Introduced version 3.2 of the `docker-compose.yml` specification - Added support for `cache_from` in the `build` section of services - Added support for the new expanded ports syntax in service definitions - Added support for the new expanded volumes syntax in service definitions #### Compose file version 2.1 - Added support for `pids_limit` in service definitions #### Compose file version 2.0 and up - Added `--volumes` option to `docker-compose config` that lists named volumes declared for that project - Added support for `mem_reservation` in service definitions (2.x only) - Added support for `dns_opt` in service definitions (2.x only) #### All formats - Added a new `docker-compose images` command that lists images used by the current project's containers - Added a `--stop` (shorthand `-s`) option to `docker-compose rm` that stops the running containers before removing them - Added a `--resolve-image-digests` option to `docker-compose config` that pins the image version for each service to a permanent digest - Added a `--exit-code-from SERVICE` option to `docker-compose up`. When used, `docker-compose` will exit on any container's exit with the code corresponding to the specified service's exit code - Added a `--parallel` option to `docker-compose pull` that enables images for multiple services to be pulled simultaneously - Added a `--build-arg` option to `docker-compose build` - Added a `--volume ` (shorthand `-v`) option to `docker-compose run` to declare runtime volumes to be mounted - Added a `--project-directory PATH` option to `docker-compose` that will affect path resolution for the project - When using `--abort-on-container-exit` in `docker-compose up`, the exit code for the container that caused the abort will be the exit code of the `docker-compose up` command - Users can now configure which path separator character they want to use to separate the `COMPOSE_FILE` environment value using the `COMPOSE_PATH_SEPARATOR` environment variable - Added support for port range to single port in port mappings (e.g. `8000-8010:80`) ### Bugfixes - `docker-compose run --rm` now removes anonymous volumes after execution, matching the behavior of `docker run --rm`. - Fixed a bug where override files containing port lists would cause a TypeError to be raised - Fixed a bug where the `deploy` key would be missing from the output of `docker-compose config` - Fixed a bug where scaling services up or down would sometimes re-use obsolete containers - Fixed a bug where the output of `docker-compose config` would be invalid if the project declared anonymous volumes - Variable interpolation now properly occurs in the `secrets` section of the Compose file - The `secrets` section now properly appears in the output of `docker-compose config` - Fixed a bug where changes to some networks properties would not be detected against previously created networks - Fixed a bug where `docker-compose` would crash when trying to write into a closed pipe - Fixed an issue where Compose would not pick up on the value of COMPOSE_TLS_VERSION when used in combination with command-line TLS flags 1.11.2 (2017-02-17) ------------------- ### Bugfixes - Fixed a bug that was preventing secrets configuration from being loaded properly - Fixed a bug where the `docker-compose config` command would fail if the config file contained secrets definitions - Fixed an issue where Compose on some linux distributions would pick up and load an outdated version of the requests library - Fixed an issue where socket-type files inside a build folder would cause `docker-compose` to crash when trying to build that service - Fixed an issue where recursive wildcard patterns `**` were not being recognized in `.dockerignore` files. 1.11.1 (2017-02-09) ------------------- ### Bugfixes - Fixed a bug where the 3.1 file format was not being recognized as valid by the Compose parser 1.11.0 (2017-02-08) ------------------- ### New Features #### Compose file version 3.1 - Introduced version 3.1 of the `docker-compose.yml` specification. This version requires Docker Engine 1.13.0 or above. It introduces support for secrets. See the documentation for more information #### Compose file version 2.0 and up - Introduced the `docker-compose top` command that displays processes running for the different services managed by Compose. ### Bugfixes - Fixed a bug where extending a service defining a healthcheck dictionary would cause `docker-compose` to error out. - Fixed an issue where the `pid` entry in a service definition was being ignored when using multiple Compose files. 1.10.1 (2017-02-01) ------------------ ### Bugfixes - Fixed an issue where presence of older versions of the docker-py package would cause unexpected crashes while running Compose - Fixed an issue where healthcheck dependencies would be lost when using multiple compose files for a project - Fixed a few issues that made the output of the `config` command invalid - Fixed an issue where adding volume labels to v3 Compose files would result in an error - Fixed an issue on Windows where build context paths containing unicode characters were being improperly encoded - Fixed a bug where Compose would occasionally crash while streaming logs when containers would stop or restart 1.10.0 (2017-01-18) ------------------- ### New Features #### Compose file version 3.0 - Introduced version 3.0 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 1.13 or above and is specifically designed to work with the `docker stack` commands. #### Compose file version 2.1 and up - Healthcheck configuration can now be done in the service definition using the `healthcheck` parameter - Containers dependencies can now be set up to wait on positive healthchecks when declared using `depends_on`. See the documentation for the updated syntax. **Note:** This feature will not be ported to version 3 Compose files. - Added support for the `sysctls` parameter in service definitions - Added support for the `userns_mode` parameter in service definitions - Compose now adds identifying labels to networks and volumes it creates #### Compose file version 2.0 and up - Added support for the `stop_grace_period` option in service definitions. ### Bugfixes - Colored output now works properly on Windows. - Fixed a bug where docker-compose run would fail to set up link aliases in interactive mode on Windows. - Networks created by Compose are now always made attachable (Compose files v2.1 and up). - Fixed a bug where falsy values of `COMPOSE_CONVERT_WINDOWS_PATHS` (`0`, `false`, empty value) were being interpreted as true. - Fixed a bug where forward slashes in some .dockerignore patterns weren't being parsed correctly on Windows 1.9.0 (2016-11-16) ----------------- **Breaking changes** - When using Compose with Docker Toolbox/Machine on Windows, volume paths are no longer converted from `C:\Users` to `/c/Users`-style by default. To re-enable this conversion so that your volumes keep working, set the environment variable `COMPOSE_CONVERT_WINDOWS_PATHS=1`. Users of Docker for Windows are not affected and do not need to set the variable. New Features - Interactive mode for `docker-compose run` and `docker-compose exec` is now supported on Windows platforms. Please note that the `docker` binary is required to be present on the system for this feature to work. - Introduced version 2.1 of the `docker-compose.yml` specification. This version requires to be used with Docker Engine 1.12 or above. - Added support for setting volume labels and network labels in `docker-compose.yml`. - Added support for the `isolation` parameter in service definitions. - Added support for link-local IPs in the service networks definitions. - Added support for shell-style inline defaults in variable interpolation. The supported forms are `${FOO-default}` (fall back if FOO is unset) and `${FOO:-default}` (fall back if FOO is unset or empty). - Added support for the `group_add` and `oom_score_adj` parameters in service definitions. - Added support for the `internal` and `enable_ipv6` parameters in network definitions. - Compose now defaults to using the `npipe` protocol on Windows. - Overriding a `logging` configuration will now properly merge the `options` mappings if the `driver` values do not conflict. Bug Fixes - Fixed several bugs related to `npipe` protocol support on Windows. - Fixed an issue with Windows paths being incorrectly converted when using Docker on Windows Server. - Fixed a bug where an empty `restart` value would sometimes result in an exception being raised. - Fixed an issue where service logs containing unicode characters would sometimes cause an error to occur. - Fixed a bug where unicode values in environment variables would sometimes raise a unicode exception when retrieved. - Fixed an issue where Compose would incorrectly detect a configuration mismatch for overlay networks. 1.8.1 (2016-09-22) ----------------- Bug Fixes - Fixed a bug where users using a credentials store were not able to access their private images. - Fixed a bug where users using identity tokens to authenticate were not able to access their private images. - Fixed a bug where an `HttpHeaders` entry in the docker configuration file would cause Compose to crash when trying to build an image. - Fixed a few bugs related to the handling of Windows paths in volume binding declarations. - Fixed a bug where Compose would sometimes crash while trying to read a streaming response from the engine. - Fixed an issue where Compose would crash when encountering an API error while streaming container logs. - Fixed an issue where Compose would erroneously try to output logs from drivers not handled by the Engine's API. - Fixed a bug where options from the `docker-machine config` command would not be properly interpreted by Compose. - Fixed a bug where the connection to the Docker Engine would sometimes fail when running a large number of services simultaneously. - Fixed an issue where Compose would sometimes print a misleading suggestion message when running the `bundle` command. - Fixed a bug where connection errors would not be handled properly by Compose during the project initialization phase. - Fixed a bug where a misleading error would appear when encountering a connection timeout. 1.8.0 (2016-06-14) ----------------- **Breaking Changes** - As announced in 1.7.0, `docker-compose rm` now removes containers created by `docker-compose run` by default. - Setting `entrypoint` on a service now empties out any default command that was set on the image (i.e. any `CMD` instruction in the Dockerfile used to build it). This makes it consistent with the `--entrypoint` flag to `docker run`. New Features - Added `docker-compose bundle`, a command that builds a bundle file to be consumed by the new *Docker Stack* commands in Docker 1.12. - Added `docker-compose push`, a command that pushes service images to a registry. - Compose now supports specifying a custom TLS version for interaction with the Docker Engine using the `COMPOSE_TLS_VERSION` environment variable. Bug Fixes - Fixed a bug where Compose would erroneously try to read `.env` at the project's root when it is a directory. - `docker-compose run -e VAR` now passes `VAR` through from the shell to the container, as with `docker run -e VAR`. - Improved config merging when multiple compose files are involved for several service sub-keys. - Fixed a bug where volume mappings containing Windows drives would sometimes be parsed incorrectly. - Fixed a bug in Windows environment where volume mappings of the host's root directory would be parsed incorrectly. - Fixed a bug where `docker-compose config` would output an invalid Compose file if external networks were specified. - Fixed an issue where unset buildargs would be assigned a string containing `'None'` instead of the expected empty value. - Fixed a bug where yes/no prompts on Windows would not show before receiving input. - Fixed a bug where trying to `docker-compose exec` on Windows without the `-d` option would exit with a stacktrace. This will still fail for the time being, but should do so gracefully. - Fixed a bug where errors during `docker-compose up` would show an unrelated stacktrace at the end of the process. - `docker-compose create` and `docker-compose start` show more descriptive error messages when something goes wrong. 1.7.1 (2016-05-04) ----------------- Bug Fixes - Fixed a bug where the output of `docker-compose config` for v1 files would be an invalid configuration file. - Fixed a bug where `docker-compose config` would not check the validity of links. - Fixed an issue where `docker-compose help` would not output a list of available commands and generic options as expected. - Fixed an issue where filtering by service when using `docker-compose logs` would not apply for newly created services. - Fixed a bug where unchanged services would sometimes be recreated in in the up phase when using Compose with Python 3. - Fixed an issue where API errors encountered during the up phase would not be recognized as a failure state by Compose. - Fixed a bug where Compose would raise a NameError because of an undefined exception name on non-Windows platforms. - Fixed a bug where the wrong version of `docker-py` would sometimes be installed alongside Compose. - Fixed a bug where the host value output by `docker-machine config default` would not be recognized as valid options by the `docker-compose` command line. - Fixed an issue where Compose would sometimes exit unexpectedly while reading events broadcasted by a Swarm cluster. - Corrected a statement in the docs about the location of the `.env` file, which is indeed read from the current directory, instead of in the same location as the Compose file. 1.7.0 (2016-04-13) ------------------ **Breaking Changes** - `docker-compose logs` no longer follows log output by default. It now matches the behaviour of `docker logs` and exits after the current logs are printed. Use `-f` to get the old default behaviour. - Booleans are no longer allows as values for mappings in the Compose file (for keys `environment`, `labels` and `extra_hosts`). Previously this was a warning. Boolean values should be quoted so they become string values. New Features - Compose now looks for a `.env` file in the directory where it's run and reads any environment variables defined inside, if they're not already set in the shell environment. This lets you easily set defaults for variables used in the Compose file, or for any of the `COMPOSE_*` or `DOCKER_*` variables. - Added a `--remove-orphans` flag to both `docker-compose up` and `docker-compose down` to remove containers for services that were removed from the Compose file. - Added a `--all` flag to `docker-compose rm` to include containers created by `docker-compose run`. This will become the default behavior in the next version of Compose. - Added support for all the same TLS configuration flags used by the `docker` client: `--tls`, `--tlscert`, `--tlskey`, etc. - Compose files now support the `tmpfs` and `shm_size` options. - Added the `--workdir` flag to `docker-compose run` - `docker-compose logs` now shows logs for new containers that are created after it starts. - The `COMPOSE_FILE` environment variable can now contain multiple files, separated by the host system's standard path separator (`:` on Mac/Linux, `;` on Windows). - You can now specify a static IP address when connecting a service to a network with the `ipv4_address` and `ipv6_address` options. - Added `--follow`, `--timestamp`, and `--tail` flags to the `docker-compose logs` command. - `docker-compose up`, and `docker-compose start` will now start containers in parallel where possible. - `docker-compose stop` now stops containers in reverse dependency order instead of all at once. - Added the `--build` flag to `docker-compose up` to force it to build a new image. It now shows a warning if an image is automatically built when the flag is not used. - Added the `docker-compose exec` command for executing a process in a running container. Bug Fixes - `docker-compose down` now removes containers created by `docker-compose run`. - A more appropriate error is shown when a timeout is hit during `up` when using a tty. - Fixed a bug in `docker-compose down` where it would abort if some resources had already been removed. - Fixed a bug where changes to network aliases would not trigger a service to be recreated. - Fix a bug where a log message was printed about creating a new volume when it already existed. - Fixed a bug where interrupting `up` would not always shut down containers. - Fixed a bug where `log_opt` and `log_driver` were not properly carried over when extending services in the v1 Compose file format. - Fixed a bug where empty values for build args would cause file validation to fail. 1.6.2 (2016-02-23) ------------------ - Fixed a bug where connecting to a TLS-enabled Docker Engine would fail with a certificate verification error. 1.6.1 (2016-02-23) ------------------ Bug Fixes - Fixed a bug where recreating a container multiple times would cause the new container to be started without the previous volumes. - Fixed a bug where Compose would set the value of unset environment variables to an empty string, instead of a key without a value. - Provide a better error message when Compose requires a more recent version of the Docker API. - Add a missing config field `network.aliases` which allows setting a network scoped alias for a service. - Fixed a bug where `run` would not start services listed in `depends_on`. - Fixed a bug where `networks` and `network_mode` where not merged when using extends or multiple Compose files. - Fixed a bug with service aliases where the short container id alias was only contained 10 characters, instead of the 12 characters used in previous versions. - Added a missing log message when creating a new named volume. - Fixed a bug where `build.args` was not merged when using `extends` or multiple Compose files. - Fixed some bugs with config validation when null values or incorrect types were used instead of a mapping. - Fixed a bug where a `build` section without a `context` would show a stack trace instead of a helpful validation message. - Improved compatibility with swarm by only setting a container affinity to the previous instance of a services' container when the service uses an anonymous container volume. Previously the affinity was always set on all containers. - Fixed the validation of some `driver_opts` would cause an error if a number was used instead of a string. - Some improvements to the `run.sh` script used by the Compose container install option. - Fixed a bug with `up --abort-on-container-exit` where Compose would exit, but would not stop other containers. - Corrected the warning message that is printed when a boolean value is used as a value in a mapping. 1.6.0 (2016-01-15) ------------------ Major Features: - Compose 1.6 introduces a new format for `docker-compose.yml` which lets you define networks and volumes in the Compose file as well as services. It also makes a few changes to the structure of some configuration options. You don't have to use it - your existing Compose files will run on Compose 1.6 exactly as they do today. Check the upgrade guide for full details: https://docs.docker.com/compose/compose-file#upgrading - Support for networking has exited experimental status and is the recommended way to enable communication between containers. If you use the new file format, your app will use networking. If you aren't ready yet, just leave your Compose file as it is and it'll continue to work just the same. By default, you don't have to configure any networks. In fact, using networking with Compose involves even less configuration than using links. Consult the networking guide for how to use it: https://docs.docker.com/compose/networking The experimental flags `--x-networking` and `--x-network-driver`, introduced in Compose 1.5, have been removed. - You can now pass arguments to a build if you're using the new file format: build: context: . args: buildno: 1 - You can now specify both a `build` and an `image` key if you're using the new file format. `docker-compose build` will build the image and tag it with the name you've specified, while `docker-compose pull` will attempt to pull it. - There's a new `events` command for monitoring container events from the application, much like `docker events`. This is a good primitive for building tools on top of Compose for performing actions when particular things happen, such as containers starting and stopping. - There's a new `depends_on` option for specifying dependencies between services. This enforces the order of startup, and ensures that when you run `docker-compose up SERVICE` on a service with dependencies, those are started as well. New Features: - Added a new command `config` which validates and prints the Compose configuration after interpolating variables, resolving relative paths, and merging multiple files and `extends`. - Added a new command `create` for creating containers without starting them. - Added a new command `down` to stop and remove all the resources created by `up` in a single command. - Added support for the `cpu_quota` configuration option. - Added support for the `stop_signal` configuration option. - Commands `start`, `restart`, `pause`, and `unpause` now exit with an error status code if no containers were modified. - Added a new `--abort-on-container-exit` flag to `up` which causes `up` to stop all container and exit once the first container exits. - Removed support for `FIG_FILE`, `FIG_PROJECT_NAME`, and no longer reads `fig.yml` as a default Compose file location. - Removed the `migrate-to-labels` command. - Removed the `--allow-insecure-ssl` flag. Bug Fixes: - Fixed a validation bug that prevented the use of a range of ports in the `expose` field. - Fixed a validation bug that prevented the use of arrays in the `entrypoint` field if they contained duplicate entries. - Fixed a bug that caused `ulimits` to be ignored when used with `extends`. - Fixed a bug that prevented ipv6 addresses in `extra_hosts`. - Fixed a bug that caused `extends` to be ignored when included from multiple Compose files. - Fixed an incorrect warning when a container volume was defined in the Compose file. - Fixed a bug that prevented the force shutdown behaviour of `up` and `logs`. - Fixed a bug that caused `None` to be printed as the network driver name when the default network driver was used. - Fixed a bug where using the string form of `dns` or `dns_search` would cause an error. - Fixed a bug where a container would be reported as "Up" when it was in the restarting state. - Fixed a confusing error message when DOCKER_CERT_PATH was not set properly. - Fixed a bug where attaching to a container would fail if it was using a non-standard logging driver (or none at all). 1.5.2 (2015-12-03) ------------------ - Fixed a bug which broke the use of `environment` and `env_file` with `extends`, and caused environment keys without values to have a `None` value, instead of a value from the host environment. - Fixed a regression in 1.5.1 that caused a warning about volumes to be raised incorrectly when containers were recreated. - Fixed a bug which prevented building a `Dockerfile` that used `ADD ` - Fixed a bug with `docker-compose restart` which prevented it from starting stopped containers. - Fixed handling of SIGTERM and SIGINT to properly stop containers - Add support for using a url as the value of `build` - Improved the validation of the `expose` option 1.5.1 (2015-11-12) ------------------ - Add the `--force-rm` option to `build`. - Add the `ulimit` option for services in the Compose file. - Fixed a bug where `up` would error with "service needs to be built" if a service changed from using `image` to using `build`. - Fixed a bug that would cause incorrect output of parallel operations on some terminals. - Fixed a bug that prevented a container from being recreated when the mode of a `volumes_from` was changed. - Fixed a regression in 1.5.0 where non-utf-8 unicode characters would cause `up` or `logs` to crash. - Fixed a regression in 1.5.0 where Compose would use a success exit status code when a command fails due to an HTTP timeout communicating with the docker daemon. - Fixed a regression in 1.5.0 where `name` was being accepted as a valid service option which would override the actual name of the service. - When using `--x-networking` Compose no longer sets the hostname to the container name. - When using `--x-networking` Compose will only create the default network if at least one container is using the network. - When printings logs during `up` or `logs`, flush the output buffer after each line to prevent buffering issues from hiding logs. - Recreate a container if one of its dependencies is being created. Previously a container was only recreated if it's dependencies already existed, but were being recreated as well. - Add a warning when a `volume` in the Compose file is being ignored and masked by a container volume from a previous container. - Improve the output of `pull` when run without a tty. - When using multiple Compose files, validate each before attempting to merge them together. Previously invalid files would result in not helpful errors. - Allow dashes in keys in the `environment` service option. - Improve validation error messages by including the filename as part of the error message. 1.5.0 (2015-11-03) ------------------ **Breaking changes:** With the introduction of variable substitution support in the Compose file, any Compose file that uses an environment variable (`$VAR` or `${VAR}`) in the `command:` or `entrypoint:` field will break. Previously these values were interpolated inside the container, with a value from the container environment. In Compose 1.5.0, the values will be interpolated on the host, with a value from the host environment. To migrate a Compose file to 1.5.0, escape the variables with an extra `$` (ex: `$$VAR` or `$${VAR}`). See https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution Major features: - Compose is now available for Windows. - Environment variables can be used in the Compose file. See https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-substitution - Multiple compose files can be specified, allowing you to override settings in the default Compose file. See https://github.com/docker/compose/blob/8cc8e61/docs/reference/docker-compose.md for more details. - Compose now produces better error messages when a file contains invalid configuration. - `up` now waits for all services to exit before shutting down, rather than shutting down as soon as one container exits. - Experimental support for the new docker networking system can be enabled with the `--x-networking` flag. Read more here: https://github.com/docker/docker/blob/8fee1c20/docs/userguide/dockernetworks.md New features: - You can now optionally pass a mode to `volumes_from`, e.g. `volumes_from: ["servicename:ro"]`. - Since Docker now lets you create volumes with names, you can refer to those volumes by name in `docker-compose.yml`. For example, `volumes: ["mydatavolume:/data"]` will mount the volume named `mydatavolume` at the path `/data` inside the container. If the first component of an entry in `volumes` starts with a `.`, `/` or `~`, it is treated as a path and expansion of relative paths is performed as necessary. Otherwise, it is treated as a volume name and passed straight through to Docker. Read more on named volumes and volume drivers here: https://github.com/docker/docker/blob/244d9c33/docs/userguide/dockervolumes.md - `docker-compose build --pull` instructs Compose to pull the base image for each Dockerfile before building. - `docker-compose pull --ignore-pull-failures` instructs Compose to continue if it fails to pull a single service's image, rather than aborting. - You can now specify an IPC namespace in `docker-compose.yml` with the `ipc` option. - Containers created by `docker-compose run` can now be named with the `--name` flag. - If you install Compose with pip or use it as a library, it now works with Python 3. - `image` now supports image digests (in addition to ids and tags), e.g. `image: "busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d"` - `ports` now supports ranges of ports, e.g. ports: - "3000-3005" - "9000-9001:8000-8001" - `docker-compose run` now supports a `-p|--publish` parameter, much like `docker run -p`, for publishing specific ports to the host. - `docker-compose pause` and `docker-compose unpause` have been implemented, analogous to `docker pause` and `docker unpause`. - When using `extends` to copy configuration from another service in the same Compose file, you can omit the `file` option. - Compose can be installed and run as a Docker image. This is an experimental feature. Bug fixes: - All values for the `log_driver` option which are supported by the Docker daemon are now supported by Compose. - `docker-compose build` can now be run successfully against a Swarm cluster. 1.4.2 (2015-09-22) ------------------ - Fixed a regression in the 1.4.1 release that would cause `docker-compose up` without the `-d` option to exit immediately. 1.4.1 (2015-09-10) ------------------ The following bugs have been fixed: - Some configuration changes (notably changes to `links`, `volumes_from`, and `net`) were not properly triggering a container recreate as part of `docker-compose up`. - `docker-compose up ` was showing logs for all services instead of just the specified services. - Containers with custom container names were showing up in logs as `service_number` instead of their custom container name. - When scaling a service sometimes containers would be recreated even when the configuration had not changed. 1.4.0 (2015-08-04) ------------------ - By default, `docker-compose up` now only recreates containers for services whose configuration has changed since they were created. This should result in a dramatic speed-up for many applications. The experimental `--x-smart-recreate` flag which introduced this feature in Compose 1.3.0 has been removed, and a `--force-recreate` flag has been added for when you want to recreate everything. - Several of Compose's commands - `scale`, `stop`, `kill` and `rm` - now perform actions on multiple containers in parallel, rather than in sequence, which will run much faster on larger applications. - You can now specify a custom name for a service's container with `container_name`. Because Docker container names must be unique, this means you can't scale the service beyond one container. - You no longer have to specify a `file` option when using `extends` - it will default to the current file. - Service names can now contain dots, dashes and underscores. - Compose can now read YAML configuration from standard input, rather than from a file, by specifying `-` as the filename. This makes it easier to generate configuration dynamically: $ echo 'redis: {"image": "redis"}' | docker-compose --file - up - There's a new `docker-compose version` command which prints extended information about Compose's bundled dependencies. - `docker-compose.yml` now supports `log_opt` as well as `log_driver`, allowing you to pass extra configuration to a service's logging driver. - `docker-compose.yml` now supports `memswap_limit`, similar to `docker run --memory-swap`. - When mounting volumes with the `volumes` option, you can now pass in any mode supported by the daemon, not just `:ro` or `:rw`. For example, SELinux users can pass `:z` or `:Z`. - You can now specify a custom volume driver with the `volume_driver` option in `docker-compose.yml`, much like `docker run --volume-driver`. - A bug has been fixed where Compose would fail to pull images from private registries serving plain (unsecured) HTTP. The `--allow-insecure-ssl` flag, which was previously used to work around this issue, has been deprecated and now has no effect. - A bug has been fixed where `docker-compose build` would fail if the build depended on a private Hub image or an image from a private registry. - A bug has been fixed where Compose would crash if there were containers which the Docker daemon had not finished removing. - Two bugs have been fixed where Compose would sometimes fail with a "Duplicate bind mount" error, or fail to attach volumes to a container, if there was a volume path specified in `docker-compose.yml` with a trailing slash. Thanks @mnowster, @dnephin, @ekristen, @funkyfuture, @jeffk and @lukemarsden! 1.3.3 (2015-07-15) ------------------ Two regressions have been fixed: - When stopping containers gracefully, Compose was setting the timeout to 0, effectively forcing a SIGKILL every time. - Compose would sometimes crash depending on the formatting of container data returned from the Docker API. 1.3.2 (2015-07-14) ------------------ The following bugs have been fixed: - When there were one-off containers created by running `docker-compose run` on an older version of Compose, `docker-compose run` would fail with a name collision. Compose now shows an error if you have leftover containers of this type lying around, and tells you how to remove them. - Compose was not reading Docker authentication config files created in the new location, `~/docker/config.json`, and authentication against private registries would therefore fail. - When a container had a pseudo-TTY attached, its output in `docker-compose up` would be truncated. - `docker-compose up --x-smart-recreate` would sometimes fail when an image tag was updated. - `docker-compose up` would sometimes create two containers with the same numeric suffix. - `docker-compose rm` and `docker-compose ps` would sometimes list services that aren't part of the current project (though no containers were erroneously removed). - Some `docker-compose` commands would not show an error if invalid service names were passed in. Thanks @dano, @josephpage, @kevinsimper, @lieryan, @phemmer, @soulrebel and @sschepens! 1.3.1 (2015-06-21) ------------------ The following bugs have been fixed: - `docker-compose build` would always attempt to pull the base image before building. - `docker-compose help migrate-to-labels` failed with an error. - If no network mode was specified, Compose would set it to "bridge", rather than allowing the Docker daemon to use its configured default network mode. 1.3.0 (2015-06-18) ------------------ Firstly, two important notes: - **This release contains breaking changes, and you will need to either remove or migrate your existing containers before running your app** - see the [upgrading section of the install docs](https://github.com/docker/compose/blob/1.3.0rc1/docs/install.md#upgrading) for details. - Compose now requires Docker 1.6.0 or later. We've done a lot of work in this release to remove hacks and make Compose more stable: - Compose now uses container labels, rather than names, to keep track of containers. This makes Compose both faster and easier to integrate with your own tools. - Compose no longer uses "intermediate containers" when recreating containers for a service. This makes `docker-compose up` less complex and more resilient to failure. There are some new features: - `docker-compose up` has an **experimental** new behaviour: it will only recreate containers for services whose configuration has changed in `docker-compose.yml`. This will eventually become the default, but for now you can take it for a spin: $ docker-compose up --x-smart-recreate - When invoked in a subdirectory of a project, `docker-compose` will now climb up through parent directories until it finds a `docker-compose.yml`. Several new configuration keys have been added to `docker-compose.yml`: - `dockerfile`, like `docker build --file`, lets you specify an alternate Dockerfile to use with `build`. - `labels`, like `docker run --labels`, lets you add custom metadata to containers. - `extra_hosts`, like `docker run --add-host`, lets you add entries to a container's `/etc/hosts` file. - `pid: host`, like `docker run --pid=host`, lets you reuse the same PID namespace as the host machine. - `cpuset`, like `docker run --cpuset-cpus`, lets you specify which CPUs to allow execution in. - `read_only`, like `docker run --read-only`, lets you mount a container's filesystem as read-only. - `security_opt`, like `docker run --security-opt`, lets you specify [security options](https://docs.docker.com/engine/reference/run/#security-configuration). - `log_driver`, like `docker run --log-driver`, lets you specify a [log driver](https://docs.docker.com/engine/reference/run/#logging-drivers-log-driver). Many bugs have been fixed, including the following: - The output of `docker-compose run` was sometimes truncated, especially when running under Jenkins. - A service's volumes would sometimes not update after volume configuration was changed in `docker-compose.yml`. - Authenticating against third-party registries would sometimes fail. - `docker-compose run --rm` would fail to remove the container if the service had a `restart` policy in place. - `docker-compose scale` would refuse to scale a service beyond 1 container if it exposed a specific port number on the host. - Compose would refuse to create multiple volume entries with the same host path. Thanks @ahromis, @albers, @aleksandr-vin, @antoineco, @ccverak, @chernjie, @dnephin, @edmorley, @fordhurley, @josephpage, @KyleJamesWalker, @lsowen, @mchasal, @noironetworks, @sdake, @sdurrheimer, @sherter, @stephenlawrence, @thaJeztah, @thieman, @turtlemonvh, @twhiteman, @vdemeester, @xuxinkun and @zwily! 1.2.0 (2015-04-16) ------------------ - `docker-compose.yml` now supports an `extends` option, which enables a service to inherit configuration from another service in another configuration file. This is really good for sharing common configuration between apps, or for configuring the same app for different environments. Here's the [documentation](https://github.com/docker/compose/blob/master/docs/yml.md#extends). - When using Compose with a Swarm cluster, containers that depend on one another will be co-scheduled on the same node. This means that most Compose apps will now work out of the box, as long as they don't use `build`. - Repeated invocations of `docker-compose up` when using Compose with a Swarm cluster now work reliably. - Directories passed to `build`, filenames passed to `env_file` and volume host paths passed to `volumes` are now treated as relative to the *directory of the configuration file*, not the directory that `docker-compose` is being run in. In the majority of cases, those are the same, but if you use the `-f|--file` argument to specify a configuration file in another directory, **this is a breaking change**. - A service can now share another service's network namespace with `net: container:`. - `volumes_from` and `net: container:` entries are taken into account when resolving dependencies, so `docker-compose up ` will correctly start all dependencies of ``. - `docker-compose run` now accepts a `--user` argument to specify a user to run the command as, just like `docker run`. - The `up`, `stop` and `restart` commands now accept a `--timeout` (or `-t`) argument to specify how long to wait when attempting to gracefully stop containers, just like `docker stop`. - `docker-compose rm` now accepts `-f` as a shorthand for `--force`, just like `docker rm`. Thanks, @abesto, @albers, @alunduil, @dnephin, @funkyfuture, @gilclark, @IanVS, @KingsleyKelly, @knutwalker, @thaJeztah and @vmalloc! 1.1.0 (2015-02-25) ------------------ Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: - The command you type is now `docker-compose`, not `fig`. - You should rename your fig.yml to docker-compose.yml. - If you’re installing via PyPI, the package is now `docker-compose`, so install it with `pip install docker-compose`. Besides that, there’s a lot of new stuff in this release: - We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. - `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger. - You can now link to containers outside your app with the `external_links` option in docker-compose.yml. - You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster. - If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags. - `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers. - docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control. - docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options. - Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md - A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+ Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! 1.0.1 (2014-11-04) ------------------ - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. - Fixed `fig run` not showing output in Jenkins. - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. 1.0.0 (2014-10-16) ------------------ The highlights: - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself. This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode. - Fig can be used with the [official Docker OS X installer](https://docs.docker.com/installation/mac/). Boot2Docker will mount the home directory from your host machine so volumes work as expected. - Fig supports Docker 1.3. - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables. - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`. - There is a new `fig pull` command which pulls the latest images for a service. - There is a new `fig restart` command which restarts a service's containers. - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`). This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables. - `.dockerignore` is supported when building. - The project name can be set with the `FIG_PROJECT_NAME` environment variable. - The `--env` and `--entrypoint` options have been added to `fig run`. - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy. Other things: - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon. - `--verbose` displays more useful debugging output. - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started. - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout. Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. 0.5.2 (2014-07-28) ------------------ - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. - Fixed the `dns:` fig.yml option, which was causing fig to error out. - Fixed a bug where fig couldn't start under Python 2.6. - Fixed a log-streaming bug that occasionally caused fig to exit. Thanks @dnephin and @marksteve! 0.5.1 (2014-07-11) ------------------ - If a service has a command defined, `fig run [service]` with no further arguments will run it. - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) - `volumes_from` now works properly with containers as well as services - Fixed a race condition when recreating containers in `fig up` Thanks @ryanbrainard and @d11wtq! 0.5.0 (2014-07-11) ------------------ - Fig now starts links when you run `fig run` or `fig up`. For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service. - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved: ``` environment: RACK_ENV: development SESSION_SECRET: ``` - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted: ``` volumes_from: - service_name - container_name ``` - A host address can now be specified in `ports`: ``` ports: - "0.0.0.0:8000:8000" - "127.0.0.1:8001:8001" ``` - The `net` and `workdir` options are now supported in `fig.yml`. - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option. - TTY behaviour is far more robust, and resizes are supported correctly. - Load YAML files safely. Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! 0.4.2 (2014-06-18) ------------------ - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. 0.4.1 (2014-05-08) ------------------ - Add support for Docker 0.11.0. (Thanks @marksteve!) - Make project name configurable. (Thanks @jefmathiot!) - Return correct exit code from `fig run`. 0.4.0 (2014-04-29) ------------------ - Support Docker 0.9 and 0.10 - Display progress bars correctly when pulling images (no more ski slopes) - `fig up` now stops all services when any container exits - Added support for the `privileged` config option in fig.yml (thanks @kvz!) - Shortened and aligned log prefixes in `fig up` output - Only containers started with `fig run` link back to their own service - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) - Error message improvements 0.3.2 (2014-03-05) ------------------ - Added an `--rm` option to `fig run`. (Thanks @marksteve!) - Added an `expose` option to `fig.yml`. 0.3.1 (2014-03-04) ------------------ - Added contribution instructions. (Thanks @kvz!) - Fixed `fig rm` throwing an error. - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. 0.3.0 (2014-03-03) ------------------ - We now ship binaries for OS X and Linux. No more having to install with Pip! - Add `-f` flag to specify alternate `fig.yml` files - Add support for custom link names - Fix a bug where recreating would sometimes hang - Update docker-py to support Docker 0.8.0. - Various documentation improvements - Various error message improvements Thanks @marksteve, @Gazler and @teozkr! 0.2.2 (2014-02-17) ------------------ - Resolve dependencies using Cormen/Tarjan topological sort - Fix `fig up` not printing log output - Stop containers in reverse order to starting - Fix scale command not binding ports Thanks to @barnybug and @dustinlacewell for their work on this release. 0.2.1 (2014-02-04) ------------------ - General improvements to error reporting (#77, #79) 0.2.0 (2014-01-31) ------------------ - Link services to themselves so run commands can access the running service. (#67) - Much better documentation. - Make service dependency resolution more reliable. (#48) - Load Fig configurations with a `.yaml` extension. (#58) Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. 0.1.4 (2014-01-27) ------------------ - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) 0.1.3 (2014-01-23) ------------------ - Fix ports sometimes being configured incorrectly. (#46) - Fix log output sometimes not displaying. (#47) 0.1.2 (2014-01-22) ------------------ - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) 0.1.1 (2014-01-17) ------------------ - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! 0.1.0 (2014-01-16) ------------------ - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) - Add `fig scale` command (#9) - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19) - Truncate long commands in `fig ps` (#18) - Fill out CLI help banners for commands (#15, #16) - Show a friendlier error when `fig.yml` is missing (#4) - Fix bug with `fig build` logging (#3) - Fix bug where builds would time out if a step took a long time without generating output (#6) - Fix bug where streaming container output over the Unix socket raised an error (#7) Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. 0.0.2 (2014-01-02) ------------------ - Improve documentation - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. - Improve `fig up` behaviour - Add confirmation prompt to `fig rm` - Add `fig build` command 0.0.1 (2013-12-20) ------------------ Initial release. compose-1.29.2/CHANGES.md000077700000000000000000000000001404620552300164512CHANGELOG.mdustar00rootroot00000000000000compose-1.29.2/CONTRIBUTING.md000066400000000000000000000067221404620552300155130ustar00rootroot00000000000000# Contributing to Compose Compose is a part of the Docker project, and follows the same rules and principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) to get an overview. ## TL;DR Pull requests will need: - Tests - Documentation - [To be signed off](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) - A logical series of [well written commits](https://github.com/alphagov/styleguides/blob/master/git.md) ## Development environment If you're looking contribute to Compose but you're new to the project or maybe even to Python, here are the steps that should get you started. 1. Fork [https://github.com/docker/compose](https://github.com/docker/compose) to your username. 2. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`. 3. You must [configure a remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) for your fork so that you can [sync changes you make](https://help.github.com/articles/syncing-a-fork/) with the original repository. 4. Enter the local directory `cd compose`. 5. Set up a development environment by running `python setup.py develop`. This will install the dependencies and set up a symlink from your `docker-compose` executable to the checkout of the repository. When you now run `docker-compose` from anywhere on your machine, it will run your development version of Compose. ## Install pre-commit hooks This step is optional, but recommended. Pre-commit hooks will run style checks and in some cases fix style issues for you, when you commit code. Install the git pre-commit hooks using [tox](https://tox.readthedocs.io) by running `tox -e pre-commit` or by following the [pre-commit install guide](http://pre-commit.com/#install). To run the style checks at any time run `tox -e pre-commit`. ## Submitting a pull request See Docker's [basic contribution workflow](https://docs.docker.com/v17.06/opensource/code/#code-contribution-workflow) for a guide on how to submit a pull request for code. ## Documentation changes Issues and pull requests to update the documentation should be submitted to the [docs repo](https://github.com/docker/docker.github.io). You can learn more about contributing to the documentation [here](https://docs.docker.com/opensource/#how-to-contribute-to-the-docs). ## Running the test suite Use the test script to run linting checks and then the full test suite against different Python interpreters: $ script/test/default Tests are run against a Docker daemon inside a container, so that we can test against multiple Docker versions. By default they'll run against only the latest Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run against all supported versions: $ DOCKER_VERSIONS=all script/test/default Arguments to `script/test/default` are passed through to the `tox` executable, so you can specify a test directory, file, module, class or method: $ script/test/default tests/unit $ script/test/default tests/unit/cli_test.py $ script/test/default tests/unit/config/config_test.py::ConfigTest $ script/test/default tests/unit/config/config_test.py::ConfigTest::test_load ## Finding things to work on [Issues marked with the `exp/beginner` label](https://github.com/docker/compose/issues?q=is%3Aopen+is%3Aissue+label%3Aexp%2Fbeginner) are a good starting point for people looking to make their first contribution to the project. compose-1.29.2/Dockerfile000066400000000000000000000055501404620552300152520ustar00rootroot00000000000000ARG DOCKER_VERSION=19.03 ARG PYTHON_VERSION=3.7.10 ARG BUILD_ALPINE_VERSION=3.12 ARG BUILD_CENTOS_VERSION=7 ARG BUILD_DEBIAN_VERSION=slim-stretch ARG RUNTIME_ALPINE_VERSION=3.12 ARG RUNTIME_CENTOS_VERSION=7 ARG RUNTIME_DEBIAN_VERSION=stretch-slim ARG DISTRO=alpine FROM docker:${DOCKER_VERSION} AS docker-cli FROM python:${PYTHON_VERSION}-alpine${BUILD_ALPINE_VERSION} AS build-alpine RUN apk add --no-cache \ bash \ build-base \ ca-certificates \ curl \ gcc \ git \ libc-dev \ libffi-dev \ libgcc \ make \ musl-dev \ openssl \ openssl-dev \ zlib-dev ENV BUILD_BOOTLOADER=1 FROM python:${PYTHON_VERSION}-${BUILD_DEBIAN_VERSION} AS build-debian RUN apt-get update && apt-get install --no-install-recommends -y \ curl \ gcc \ git \ libc-dev \ libffi-dev \ libgcc-6-dev \ libssl-dev \ make \ openssl \ zlib1g-dev FROM centos:${BUILD_CENTOS_VERSION} AS build-centos RUN yum install -y \ gcc \ git \ libffi-devel \ make \ openssl \ openssl-devel WORKDIR /tmp/python3/ ARG PYTHON_VERSION RUN curl -L https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz | tar xzf - \ && cd Python-${PYTHON_VERSION} \ && ./configure --enable-optimizations --enable-shared --prefix=/usr LDFLAGS="-Wl,-rpath /usr/lib" \ && make altinstall RUN alternatives --install /usr/bin/python python /usr/bin/python2.7 50 RUN alternatives --install /usr/bin/python python /usr/bin/python$(echo "${PYTHON_VERSION%.*}") 60 RUN curl https://bootstrap.pypa.io/get-pip.py | python - FROM build-${DISTRO} AS build ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"] WORKDIR /code/ COPY docker-compose-entrypoint.sh /usr/local/bin/ COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker RUN pip install \ virtualenv==20.4.0 \ tox==3.21.2 COPY requirements-dev.txt . COPY requirements-indirect.txt . COPY requirements.txt . RUN pip install -r requirements.txt -r requirements-indirect.txt -r requirements-dev.txt COPY .pre-commit-config.yaml . COPY tox.ini . COPY setup.py . COPY README.md . COPY compose compose/ RUN tox -e py37 --notest COPY . . ARG GIT_COMMIT=unknown ENV DOCKER_COMPOSE_GITSHA=$GIT_COMMIT RUN script/build/linux-entrypoint FROM scratch AS bin ARG TARGETARCH ARG TARGETOS COPY --from=build /usr/local/bin/docker-compose /docker-compose-${TARGETOS}-${TARGETARCH} FROM alpine:${RUNTIME_ALPINE_VERSION} AS runtime-alpine FROM debian:${RUNTIME_DEBIAN_VERSION} AS runtime-debian FROM centos:${RUNTIME_CENTOS_VERSION} AS runtime-centos FROM runtime-${DISTRO} AS runtime COPY docker-compose-entrypoint.sh /usr/local/bin/ ENTRYPOINT ["sh", "/usr/local/bin/docker-compose-entrypoint.sh"] COPY --from=docker-cli /usr/local/bin/docker /usr/local/bin/docker COPY --from=build /usr/local/bin/docker-compose /usr/local/bin/docker-compose compose-1.29.2/Jenkinsfile000066400000000000000000000076611404620552300154510ustar00rootroot00000000000000#!groovy def dockerVersions = ['19.03.13'] def baseImages = ['alpine', 'debian'] def pythonVersions = ['py37'] pipeline { agent none options { skipDefaultCheckout(true) buildDiscarder(logRotator(daysToKeepStr: '30')) timeout(time: 2, unit: 'HOURS') timestamps() } environment { DOCKER_BUILDKIT="1" } stages { stage('Build test images') { // TODO use declarative 1.5.0 `matrix` once available on CI parallel { stage('alpine') { agent { label 'ubuntu-2004 && amd64 && !zfs && cgroup1' } steps { buildImage('alpine') } } stage('debian') { agent { label 'ubuntu-2004 && amd64 && !zfs && cgroup1' } steps { buildImage('debian') } } } } stage('Test') { steps { // TODO use declarative 1.5.0 `matrix` once available on CI script { def testMatrix = [:] baseImages.each { baseImage -> dockerVersions.each { dockerVersion -> pythonVersions.each { pythonVersion -> testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage) } } } parallel testMatrix } } } } } def buildImage(baseImage) { def scmvar = checkout(scm) def imageName = "dockerpinata/compose:${baseImage}-${scmvar.GIT_COMMIT}" image = docker.image(imageName) withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') { try { image.pull() } catch (Exception exc) { ansiColor('xterm') { sh """docker build -t ${imageName} \\ --target build \\ --build-arg DISTRO="${baseImage}" \\ --build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\ .\\ """ sh "docker push ${imageName}" } echo "${imageName}" return imageName } } } def runTests(dockerVersion, pythonVersion, baseImage) { return { stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") { node("ubuntu-2004 && amd64 && !zfs && cgroup1") { def scmvar = checkout(scm) def imageName = "dockerpinata/compose:${baseImage}-${scmvar.GIT_COMMIT}" def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim() echo "Using local system's storage driver: ${storageDriver}" withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') { sh """docker run \\ -t \\ --rm \\ --privileged \\ --volume="\$(pwd)/.git:/code/.git" \\ --volume="/var/run/docker.sock:/var/run/docker.sock" \\ --volume="\${DOCKER_CONFIG}/config.json:/root/.docker/config.json" \\ -e "DOCKER_TLS_CERTDIR=" \\ -e "TAG=${imageName}" \\ -e "STORAGE_DRIVER=${storageDriver}" \\ -e "DOCKER_VERSIONS=${dockerVersion}" \\ -e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\ -e "PY_TEST_VERSIONS=${pythonVersion}" \\ --entrypoint="script/test/ci" \\ ${imageName} \\ --verbose """ } } } } } compose-1.29.2/LICENSE000066400000000000000000000250061404620552300142630ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. compose-1.29.2/MAINTAINERS000066400000000000000000000051211404620552300147470ustar00rootroot00000000000000# Compose maintainers file # # This file describes who runs the docker/compose project and how. # This is a living document - if you see something out of date or missing, speak up! # # It is structured to be consumable by both humans and programs. # To extract its contents programmatically, use any TOML-compliant parser. # # This file is compiled into the MAINTAINERS file in docker/opensource. # [Org] [Org."Core maintainers"] people = [ "aiordache", "ndeloof", "rumpl", "ulyssessouza", ] [Org.Alumni] people = [ # Aanand Prasad is one of the two creators of the fig project # which later went on to become docker-compose, and a longtime # maintainer responsible for several keystone features "aanand", # Ben Firshman is also one of the fig creators and contributed # heavily to the project's design and UX as well as the # day-to-day maintenance "bfirsh", # Mazz Mosley made significant contributions to the project # in 2015 with solid bugfixes and improved error handling # among them "mnowster", # Daniel Nephin is one of the longest-running maintainers on # the Compose project, and has contributed several major features # including muti-file support, variable interpolation, secrets # emulation and many more "dnephin", "shin-", "mefyl", "mnottale", ] [people] # A reference list of all people associated with the project. # All other sections should refer to people by their canonical key # in the people section. # ADD YOURSELF HERE IN ALPHABETICAL ORDER [people.aanand] Name = "Aanand Prasad" Email = "aanand.prasad@gmail.com" GitHub = "aanand" [people.aiordache] Name = "Anca Iordache" Email = "anca.iordache@docker.com" GitHub = "aiordache" [people.bfirsh] Name = "Ben Firshman" Email = "ben@firshman.co.uk" GitHub = "bfirsh" [people.dnephin] Name = "Daniel Nephin" Email = "dnephin@gmail.com" GitHub = "dnephin" [people.mefyl] Name = "Quentin Hocquet" Email = "quentin.hocquet@docker.com" GitHub = "mefyl" [people.mnottale] Name = "Matthieu Nottale" Email = "matthieu.nottale@docker.com" GitHub = "mnottale" [people.mnowster] Name = "Mazz Mosley" Email = "mazz@houseofmnowster.com" GitHub = "mnowster" [people.ndeloof] Name = "Nicolas De Loof" Email = "nicolas.deloof@gmail.com" GitHub = "ndeloof" [people.rumpl] Name = "Djordje Lukic" Email = "djordje.lukic@docker.com" GitHub = "rumpl" [people.shin-] Name = "Joffrey F" Email = "f.joffrey@gmail.com" GitHub = "shin-" [people.ulyssessouza] Name = "Ulysses Domiciano Souza" Email = "ulysses.souza@docker.com" GitHub = "ulyssessouza" compose-1.29.2/MANIFEST.in000066400000000000000000000005371404620552300150160ustar00rootroot00000000000000include Dockerfile include LICENSE include requirements-indirect.txt include requirements.txt include requirements-dev.txt include tox.ini include *.md include README.md include compose/config/*.json include compose/GITSHA recursive-include contrib/completion * recursive-include tests * global-exclude *.pyc global-exclude *.pyo global-exclude *.un~ compose-1.29.2/Makefile000066400000000000000000000030451404620552300147150ustar00rootroot00000000000000TAG = "docker-compose:alpine-$(shell git rev-parse --short HEAD)" GIT_VOLUME = "--volume=$(shell pwd)/.git:/code/.git" DOCKERFILE ?="Dockerfile" DOCKER_BUILD_TARGET ?="build" UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) BUILD_SCRIPT = linux endif ifeq ($(UNAME_S),Darwin) BUILD_SCRIPT = osx endif COMPOSE_SPEC_SCHEMA_PATH = "compose/config/compose_spec.json" COMPOSE_SPEC_RAW_URL = "https://raw.githubusercontent.com/compose-spec/compose-spec/master/schema/compose-spec.json" all: cli cli: download-compose-spec ## Compile the cli ./script/build/$(BUILD_SCRIPT) download-compose-spec: ## Download the compose-spec schema from it's repo curl -so $(COMPOSE_SPEC_SCHEMA_PATH) $(COMPOSE_SPEC_RAW_URL) cache-clear: ## Clear the builder cache @docker builder prune --force --filter type=exec.cachemount --filter=unused-for=24h base-image: ## Builds base image docker build -f $(DOCKERFILE) -t $(TAG) --target $(DOCKER_BUILD_TARGET) . lint: base-image ## Run linter docker run --rm \ --tty \ $(GIT_VOLUME) \ $(TAG) \ tox -e pre-commit test-unit: base-image ## Run tests docker run --rm \ --tty \ $(GIT_VOLUME) \ $(TAG) \ pytest -v tests/unit/ test: ## Run all tests ./script/test/default pre-commit: lint test-unit cli help: ## Show help @echo Please specify a build target. The choices are: @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' FORCE: .PHONY: all cli download-compose-spec cache-clear base-image lint test-unit test pre-commit help compose-1.29.2/README.md000066400000000000000000000054361404620552300145420ustar00rootroot00000000000000Docker Compose ============== [![Build Status](https://ci-next.docker.com/public/buildStatus/icon?job=compose/master)](https://ci-next.docker.com/public/job/compose/job/master/) ![Docker Compose](logo.png?raw=true "Docker Compose Logo") Docker Compose is a tool for running multi-container applications on Docker defined using the [Compose file format](https://compose-spec.io). A Compose file is used to define how the one or more containers that make up your application are configured. Once you have a Compose file, you can create and start your application with a single command: `docker-compose up`. Compose files can be used to deploy applications locally, or to the cloud on [Amazon ECS](https://aws.amazon.com/ecs) or [Microsoft ACI](https://azure.microsoft.com/services/container-instances/) using the Docker CLI. You can read more about how to do this: - [Compose for Amazon ECS](https://docs.docker.com/engine/context/ecs-integration/) - [Compose for Microsoft ACI](https://docs.docker.com/engine/context/aci-integration/) Where to get Docker Compose ---------------------------- ### Windows and macOS Docker Compose is included in [Docker Desktop](https://www.docker.com/products/docker-desktop) for Windows and macOS. ### Linux You can download Docker Compose binaries from the [release page](https://github.com/docker/compose/releases) on this repository. ### Using pip If your platform is not supported, you can download Docker Compose using `pip`: ```console pip install docker-compose ``` > **Note:** Docker Compose requires Python 3.6 or later. Quick Start ----------- Using Docker Compose is basically a three-step process: 1. Define your app's environment with a `Dockerfile` so it can be reproduced anywhere. 2. Define the services that make up your app in `docker-compose.yml` so they can be run together in an isolated environment. 3. Lastly, run `docker-compose up` and Compose will start and run your entire app. A Compose file looks like this: ```yaml services: web: build: . ports: - "5000:5000" volumes: - .:/code redis: image: redis ``` You can find examples of Compose applications in our [Awesome Compose repository](https://github.com/docker/awesome-compose). For more information about the Compose format, see the [Compose file reference](https://docs.docker.com/compose/compose-file/). Contributing ------------ Want to help develop Docker Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md). If you find an issue, please report it on the [issue tracker](https://github.com/docker/compose/issues/new/choose). Releasing --------- Releases are built by maintainers, following an outline of the [release process](https://github.com/docker/compose/blob/master/project/RELEASE-PROCESS.md). compose-1.29.2/Release.Jenkinsfile000066400000000000000000000277051404620552300170310ustar00rootroot00000000000000#!groovy def dockerVersions = ['19.03.13', '18.09.9'] def baseImages = ['alpine', 'debian'] def pythonVersions = ['py37'] pipeline { agent none options { skipDefaultCheckout(true) buildDiscarder(logRotator(daysToKeepStr: '30')) timeout(time: 2, unit: 'HOURS') timestamps() } environment { DOCKER_BUILDKIT="1" } stages { stage('Build test images') { // TODO use declarative 1.5.0 `matrix` once available on CI parallel { stage('alpine') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { buildImage('alpine') } } stage('debian') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { buildImage('debian') } } } } stage('Test') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { // TODO use declarative 1.5.0 `matrix` once available on CI script { def testMatrix = [:] baseImages.each { baseImage -> dockerVersions.each { dockerVersion -> pythonVersions.each { pythonVersion -> testMatrix["${baseImage}_${dockerVersion}_${pythonVersion}"] = runTests(dockerVersion, pythonVersion, baseImage) } } } parallel testMatrix } } } stage('Generate Changelog') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { checkout scm withCredentials([string(credentialsId: 'github-compose-release-test-token', variable: 'GITHUB_TOKEN')]) { sh "./script/release/generate_changelog.sh" } archiveArtifacts artifacts: 'CHANGELOG.md' stash( name: "changelog", includes: 'CHANGELOG.md' ) } } stage('Package') { parallel { stage('macosx binary') { agent { label 'mac-python' } environment { DEPLOYMENT_TARGET="10.11" } steps { checkout scm sh './script/setup/osx' sh 'tox -e py39 -- tests/unit' sh './script/build/osx' dir ('dist') { checksum('docker-compose-Darwin-x86_64') checksum('docker-compose-Darwin-x86_64.tgz') } archiveArtifacts artifacts: 'dist/*', fingerprint: true dir("dist") { stash name: "bin-darwin" } } } stage('linux binary') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { checkout scm sh ' ./script/build/linux' dir ('dist') { checksum('docker-compose-Linux-x86_64') } archiveArtifacts artifacts: 'dist/*', fingerprint: true dir("dist") { stash name: "bin-linux" } } } stage('windows binary') { agent { label 'windows-python' } environment { PATH = "C:\\Python39;C:\\Python39\\Scripts;$PATH" } steps { checkout scm bat 'tox.exe -e py39 -- tests/unit' powershell '.\\script\\build\\windows.ps1' dir ('dist') { checksum('docker-compose-Windows-x86_64.exe') } archiveArtifacts artifacts: 'dist/*', fingerprint: true dir("dist") { stash name: "bin-win" } } } stage('alpine image') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { buildRuntimeImage('alpine') } } stage('debian image') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { buildRuntimeImage('debian') } } } } stage('Release') { when { buildingTag() } parallel { stage('Pushing images') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } steps { pushRuntimeImage('alpine') pushRuntimeImage('debian') } } stage('Creating Github Release') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } environment { GITHUB_TOKEN = credentials('github-release-token') } steps { checkout scm sh 'mkdir -p dist' dir("dist") { unstash "bin-darwin" unstash "bin-linux" unstash "bin-win" unstash "changelog" sh(""" curl -SfL https://github.com/github/hub/releases/download/v2.13.0/hub-linux-amd64-2.13.0.tgz | tar xzv --wildcards 'hub-*/bin/hub' --strip=2 ./hub release create --draft --prerelease=${env.TAG_NAME !=~ /v[0-9\.]+/} \\ -a docker-compose-Darwin-x86_64 \\ -a docker-compose-Darwin-x86_64.sha256 \\ -a docker-compose-Darwin-x86_64.tgz \\ -a docker-compose-Darwin-x86_64.tgz.sha256 \\ -a docker-compose-Linux-x86_64 \\ -a docker-compose-Linux-x86_64.sha256 \\ -a docker-compose-Windows-x86_64.exe \\ -a docker-compose-Windows-x86_64.exe.sha256 \\ -a ../script/run/run.sh \\ -F CHANGELOG.md \${TAG_NAME} """) } } } stage('Publishing Python packages') { agent { label 'linux && docker && ubuntu-2004 && amd64 && cgroup1' } environment { PYPIRC = credentials('pypirc-docker-dsg-cibot') } steps { checkout scm sh """ rm -rf build/ dist/ pip3 install wheel python3 setup.py sdist bdist_wheel pip3 install twine ~/.local/bin/twine upload --config-file ${PYPIRC} ./dist/docker-compose-*.tar.gz ./dist/docker_compose-*-py2.py3-none-any.whl """ } } } } } } def buildImage(baseImage) { def scmvar = checkout(scm) def imageName = "dockerpinata/compose:${baseImage}-${scmvar.GIT_COMMIT}" image = docker.image(imageName) withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') { try { image.pull() } catch (Exception exc) { ansiColor('xterm') { sh """docker build -t ${imageName} \\ --target build \\ --build-arg DISTRO="${baseImage}" \\ --build-arg GIT_COMMIT="${scmvar.GIT_COMMIT}" \\ .\\ """ sh "docker push ${imageName}" } echo "${imageName}" return imageName } } } def runTests(dockerVersion, pythonVersion, baseImage) { return { stage("python=${pythonVersion} docker=${dockerVersion} ${baseImage}") { node("linux && docker && ubuntu-2004 && amd64 && cgroup1") { def scmvar = checkout(scm) def imageName = "dockerpinata/compose:${baseImage}-${scmvar.GIT_COMMIT}" def storageDriver = sh(script: "docker info -f \'{{.Driver}}\'", returnStdout: true).trim() echo "Using local system's storage driver: ${storageDriver}" withDockerRegistry(credentialsId:'dockerbuildbot-index.docker.io') { sh """docker run \\ -t \\ --rm \\ --privileged \\ --volume="\$(pwd)/.git:/code/.git" \\ --volume="/var/run/docker.sock:/var/run/docker.sock" \\ --volume="\${DOCKER_CONFIG}/config.json:/root/.docker/config.json" \\ -e "DOCKER_TLS_CERTDIR=" \\ -e "TAG=${imageName}" \\ -e "STORAGE_DRIVER=${storageDriver}" \\ -e "DOCKER_VERSIONS=${dockerVersion}" \\ -e "BUILD_NUMBER=${env.BUILD_NUMBER}" \\ -e "PY_TEST_VERSIONS=${pythonVersion}" \\ --entrypoint="script/test/ci" \\ ${imageName} \\ --verbose """ } } } } } def buildRuntimeImage(baseImage) { scmvar = checkout scm def imageName = "docker/compose:${baseImage}-${env.BRANCH_NAME}" ansiColor('xterm') { sh """docker build -t ${imageName} \\ --build-arg DISTRO="${baseImage}" \\ --build-arg GIT_COMMIT="${scmvar.GIT_COMMIT.take(7)}" \\ . """ } sh "mkdir -p dist" sh "docker save ${imageName} -o dist/docker-compose-${baseImage}.tar" stash name: "compose-${baseImage}", includes: "dist/docker-compose-${baseImage}.tar" } def pushRuntimeImage(baseImage) { unstash "compose-${baseImage}" sh "docker load -i dist/docker-compose-${baseImage}.tar" withDockerRegistry(credentialsId: 'dockerhub-dockerdsgcibot') { sh "docker push docker/compose:${baseImage}-${env.TAG_NAME}" if (baseImage == "alpine" && env.TAG_NAME != null) { sh "docker tag docker/compose:alpine-${env.TAG_NAME} docker/compose:${env.TAG_NAME}" sh "docker push docker/compose:${env.TAG_NAME}" } } } def checksum(filepath) { if (isUnix()) { sh "openssl sha256 -r -out ${filepath}.sha256 ${filepath}" } else { powershell "(Get-FileHash -Path ${filepath} -Algorithm SHA256 | % hash).ToLower() + ' *${filepath}' | Out-File -encoding ascii ${filepath}.sha256" } } compose-1.29.2/SWARM.md000066400000000000000000000000771404620552300144720ustar00rootroot00000000000000This file has moved to: https://docs.docker.com/compose/swarm/ compose-1.29.2/bin/000077500000000000000000000000001404620552300140235ustar00rootroot00000000000000compose-1.29.2/bin/docker-compose000077500000000000000000000000771404620552300166670ustar00rootroot00000000000000#!/usr/bin/env python from compose.cli.main import main main() compose-1.29.2/compose/000077500000000000000000000000001404620552300147205ustar00rootroot00000000000000compose-1.29.2/compose/__init__.py000066400000000000000000000000271404620552300170300ustar00rootroot00000000000000__version__ = '1.29.2' compose-1.29.2/compose/__main__.py000066400000000000000000000000521404620552300170070ustar00rootroot00000000000000from compose.cli.main import main main() compose-1.29.2/compose/cli/000077500000000000000000000000001404620552300154675ustar00rootroot00000000000000compose-1.29.2/compose/cli/__init__.py000066400000000000000000000000001404620552300175660ustar00rootroot00000000000000compose-1.29.2/compose/cli/colors.py000066400000000000000000000023771404620552300173530ustar00rootroot00000000000000import enum import os from ..const import IS_WINDOWS_PLATFORM NAMES = [ 'grey', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' ] @enum.unique class AnsiMode(enum.Enum): """Enumeration for when to output ANSI colors.""" NEVER = "never" ALWAYS = "always" AUTO = "auto" def use_ansi_codes(self, stream): if self is AnsiMode.ALWAYS: return True if self is AnsiMode.NEVER or os.environ.get('CLICOLOR') == '0': return False return stream.isatty() def get_pairs(): for i, name in enumerate(NAMES): yield (name, str(30 + i)) yield ('intense_' + name, str(30 + i) + ';1') def ansi(code): return '\033[{}m'.format(code) def ansi_color(code, s): return '{}{}{}'.format(ansi(code), s, ansi(0)) def make_color_fn(code): return lambda s: ansi_color(code, s) if IS_WINDOWS_PLATFORM: import colorama colorama.init(strip=False) for (name, code) in get_pairs(): globals()[name] = make_color_fn(code) def rainbow(): cs = ['cyan', 'yellow', 'green', 'magenta', 'blue', 'intense_cyan', 'intense_yellow', 'intense_green', 'intense_magenta', 'intense_blue'] for c in cs: yield globals()[c] compose-1.29.2/compose/cli/command.py000066400000000000000000000151361404620552300174650ustar00rootroot00000000000000import logging import os import re from . import errors from .. import config from .. import parallel from ..config.environment import Environment from ..const import LABEL_CONFIG_FILES from ..const import LABEL_ENVIRONMENT_FILE from ..const import LABEL_WORKING_DIR from ..project import Project from .docker_client import get_client from .docker_client import load_context from .docker_client import make_context from .errors import UserError log = logging.getLogger(__name__) SILENT_COMMANDS = { 'events', 'exec', 'kill', 'logs', 'pause', 'ps', 'restart', 'rm', 'start', 'stop', 'top', 'unpause', } def project_from_options(project_dir, options, additional_options=None): additional_options = additional_options or {} override_dir = get_project_dir(options) environment_file = options.get('--env-file') environment = Environment.from_env_file(override_dir or project_dir, environment_file) environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS set_parallel_limit(environment) # get the context for the run context = None context_name = options.get('--context', None) if context_name: context = load_context(context_name) if not context: raise UserError("Context '{}' not found".format(context_name)) host = options.get('--host', None) if host is not None: if context: raise UserError( "-H, --host and -c, --context are mutually exclusive. Only one should be set.") host = host.lstrip('=') context = make_context(host, options, environment) return get_project( project_dir, get_config_path_from_options(options, environment), project_name=options.get('--project-name'), verbose=options.get('--verbose'), context=context, environment=environment, override_dir=override_dir, interpolate=(not additional_options.get('--no-interpolate')), environment_file=environment_file, enabled_profiles=get_profiles_from_options(options, environment) ) def set_parallel_limit(environment): parallel_limit = environment.get('COMPOSE_PARALLEL_LIMIT') if parallel_limit: try: parallel_limit = int(parallel_limit) except ValueError: raise errors.UserError( 'COMPOSE_PARALLEL_LIMIT must be an integer (found: "{}")'.format( environment.get('COMPOSE_PARALLEL_LIMIT') ) ) if parallel_limit <= 1: raise errors.UserError('COMPOSE_PARALLEL_LIMIT can not be less than 2') parallel.GlobalLimit.set_global_limit(parallel_limit) def get_project_dir(options): override_dir = None files = get_config_path_from_options(options, os.environ) if files: if files[0] == '-': return '.' override_dir = os.path.dirname(files[0]) return options.get('--project-directory') or override_dir def get_config_from_options(base_dir, options, additional_options=None): additional_options = additional_options or {} override_dir = get_project_dir(options) environment_file = options.get('--env-file') environment = Environment.from_env_file(override_dir or base_dir, environment_file) config_path = get_config_path_from_options(options, environment) return config.load( config.find(base_dir, config_path, environment, override_dir), not additional_options.get('--no-interpolate') ) def get_config_path_from_options(options, environment): def unicode_paths(paths): return [p.decode('utf-8') if isinstance(p, bytes) else p for p in paths] file_option = options.get('--file') if file_option: return unicode_paths(file_option) config_files = environment.get('COMPOSE_FILE') if config_files: pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep) return unicode_paths(config_files.split(pathsep)) return None def get_profiles_from_options(options, environment): profile_option = options.get('--profile') if profile_option: return profile_option profiles = environment.get('COMPOSE_PROFILES') if profiles: return profiles.split(',') return [] def get_project(project_dir, config_path=None, project_name=None, verbose=False, context=None, environment=None, override_dir=None, interpolate=True, environment_file=None, enabled_profiles=None): if not environment: environment = Environment.from_env_file(project_dir) config_details = config.find(project_dir, config_path, environment, override_dir) project_name = get_project_name( config_details.working_dir, project_name, environment ) config_data = config.load(config_details, interpolate) api_version = environment.get('COMPOSE_API_VERSION') client = get_client( verbose=verbose, version=api_version, context=context, environment=environment ) with errors.handle_connection_errors(client): return Project.from_config( project_name, config_data, client, environment.get('DOCKER_DEFAULT_PLATFORM'), execution_context_labels(config_details, environment_file), enabled_profiles, ) def execution_context_labels(config_details, environment_file): extra_labels = [ '{}={}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir)) ] if not use_config_from_stdin(config_details): extra_labels.append('{}={}'.format(LABEL_CONFIG_FILES, config_files_label(config_details))) if environment_file is not None: extra_labels.append('{}={}'.format( LABEL_ENVIRONMENT_FILE, os.path.normpath(environment_file)) ) return extra_labels def use_config_from_stdin(config_details): for c in config_details.config_files: if not c.filename: return True return False def config_files_label(config_details): return ",".join( os.path.normpath(c.filename) for c in config_details.config_files ) def get_project_name(working_dir, project_name=None, environment=None): def normalize_name(name): return re.sub(r'[^-_a-z0-9]', '', name.lower()) if not environment: environment = Environment.from_env_file(working_dir) project_name = project_name or environment.get('COMPOSE_PROJECT_NAME') if project_name: return normalize_name(project_name) project = os.path.basename(os.path.abspath(working_dir)) if project: return normalize_name(project) return 'default' compose-1.29.2/compose/cli/docker_client.py000066400000000000000000000137541404620552300206600ustar00rootroot00000000000000import logging import os.path import ssl from docker import APIClient from docker import Context from docker import ContextAPI from docker import TLSConfig from docker.errors import TLSParameterError from docker.utils import kwargs_from_env from docker.utils.config import home_dir from . import verbose_proxy from ..config.environment import Environment from ..const import HTTP_TIMEOUT from ..utils import unquote_path from .errors import UserError from .utils import generate_user_agent from .utils import get_version_info log = logging.getLogger(__name__) def default_cert_path(): return os.path.join(home_dir(), '.docker') def make_context(host, options, environment): tls = tls_config_from_options(options, environment) ctx = Context("compose", host=host, tls=tls.verify if tls else False) if tls: ctx.set_endpoint("docker", host, tls, skip_tls_verify=not tls.verify) return ctx def load_context(name=None): return ContextAPI.get_context(name) def get_client(environment, verbose=False, version=None, context=None): client = docker_client( version=version, context=context, environment=environment, tls_version=get_tls_version(environment) ) if verbose: version_info = client.version().items() log.info(get_version_info('full')) log.info("Docker base_url: %s", client.base_url) log.info("Docker version: %s", ", ".join("%s=%s" % item for item in version_info)) return verbose_proxy.VerboseProxy('docker', client) return client def get_tls_version(environment): compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None) if not compose_tls_version: return None tls_attr_name = "PROTOCOL_{}".format(compose_tls_version) if not hasattr(ssl, tls_attr_name): log.warning( 'The "{}" protocol is unavailable. You may need to update your ' 'version of Python or OpenSSL. Falling back to TLSv1 (default).' .format(compose_tls_version) ) return None return getattr(ssl, tls_attr_name) def tls_config_from_options(options, environment=None): environment = environment or Environment() cert_path = environment.get('DOCKER_CERT_PATH') or None tls = options.get('--tls', False) ca_cert = unquote_path(options.get('--tlscacert')) cert = unquote_path(options.get('--tlscert')) key = unquote_path(options.get('--tlskey')) # verify is a special case - with docopt `--tlsverify` = False means it # wasn't used, so we set it if either the environment or the flag is True # see https://github.com/docker/compose/issues/5632 verify = options.get('--tlsverify') or environment.get_boolean('DOCKER_TLS_VERIFY') skip_hostname_check = options.get('--skip-hostname-check', False) if cert_path is not None and not any((ca_cert, cert, key)): # FIXME: Modify TLSConfig to take a cert_path argument and do this internally cert = os.path.join(cert_path, 'cert.pem') key = os.path.join(cert_path, 'key.pem') ca_cert = os.path.join(cert_path, 'ca.pem') if verify and not any((ca_cert, cert, key)): # Default location for cert files is ~/.docker ca_cert = os.path.join(default_cert_path(), 'ca.pem') cert = os.path.join(default_cert_path(), 'cert.pem') key = os.path.join(default_cert_path(), 'key.pem') tls_version = get_tls_version(environment) advanced_opts = any([ca_cert, cert, key, verify, tls_version]) if tls is True and not advanced_opts: return True elif advanced_opts: # --tls is a noop client_cert = None if cert or key: client_cert = (cert, key) return TLSConfig( client_cert=client_cert, verify=verify, ca_cert=ca_cert, assert_hostname=False if skip_hostname_check else None, ssl_version=tls_version ) return None def docker_client(environment, version=None, context=None, tls_version=None): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ try: kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version) except TLSParameterError: raise UserError( "TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY " "and DOCKER_CERT_PATH are set correctly.\n" "You might need to run `eval \"$(docker-machine env default)\"`") if not context: # check env for DOCKER_HOST and certs path host = kwargs.get("base_url", None) tls = kwargs.get("tls", None) verify = False if not tls else tls.verify if host: context = Context("compose", host=host, tls=verify) else: context = ContextAPI.get_current_context() if tls: context.set_endpoint("docker", host=host, tls_cfg=tls, skip_tls_verify=not verify) if not context.is_docker_host(): raise UserError( "The platform targeted with the current context is not supported.\n" "Make sure the context in use targets a Docker Engine.\n") kwargs['base_url'] = context.Host if context.TLSConfig: kwargs['tls'] = context.TLSConfig if version: kwargs['version'] = version timeout = environment.get('COMPOSE_HTTP_TIMEOUT') if timeout: kwargs['timeout'] = int(timeout) else: kwargs['timeout'] = HTTP_TIMEOUT kwargs['user_agent'] = generate_user_agent() # Workaround for # https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations if 'LD_LIBRARY_PATH_ORIG' in environment: kwargs['credstore_env'] = { 'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'), } use_paramiko_ssh = int(environment.get('COMPOSE_PARAMIKO_SSH', 0)) client = APIClient(use_ssh_client=not use_paramiko_ssh, **kwargs) client._original_base_url = kwargs.get('base_url') return client compose-1.29.2/compose/cli/docopt_command.py000066400000000000000000000034501404620552300210310ustar00rootroot00000000000000from inspect import getdoc from docopt import docopt from docopt import DocoptExit def docopt_full_help(docstring, *args, **kwargs): try: return docopt(docstring, *args, **kwargs) except DocoptExit: raise SystemExit(docstring) class DocoptDispatcher: def __init__(self, command_class, options): self.command_class = command_class self.options = options @classmethod def get_command_and_options(cls, doc_entity, argv, options): command_help = getdoc(doc_entity) opt = docopt_full_help(command_help, argv, **options) command = opt['COMMAND'] return command_help, opt, command def parse(self, argv): command_help, options, command = DocoptDispatcher.get_command_and_options( self.command_class, argv, self.options) if command is None: raise SystemExit(command_help) handler = get_handler(self.command_class, command) docstring = getdoc(handler) if docstring is None: raise NoSuchCommand(command, self) command_options = docopt_full_help(docstring, options['ARGS'], options_first=True) return options, handler, command_options def get_handler(command_class, command): command = command.replace('-', '_') # we certainly want to have "exec" command, since that's what docker client has # but in python exec is a keyword if command == "exec": command = "exec_command" if not hasattr(command_class, command): raise NoSuchCommand(command, command_class) return getattr(command_class, command) class NoSuchCommand(Exception): def __init__(self, command, supercommand): super().__init__("No such command: %s" % command) self.command = command self.supercommand = supercommand compose-1.29.2/compose/cli/errors.py000066400000000000000000000121021404620552300173510ustar00rootroot00000000000000import contextlib import logging import socket from distutils.spawn import find_executable from textwrap import dedent from docker.errors import APIError from requests.exceptions import ConnectionError as RequestsConnectionError from requests.exceptions import ReadTimeout from requests.exceptions import SSLError from requests.packages.urllib3.exceptions import ReadTimeoutError from ..const import API_VERSION_TO_ENGINE_VERSION from .utils import binarystr_to_unicode from .utils import is_docker_for_mac_installed from .utils import is_mac from .utils import is_ubuntu from .utils import is_windows log = logging.getLogger(__name__) class UserError(Exception): def __init__(self, msg): self.msg = dedent(msg).strip() def __str__(self): return self.msg class ConnectionError(Exception): pass @contextlib.contextmanager def handle_connection_errors(client): try: yield except SSLError as e: log.error('SSL error: %s' % e) raise ConnectionError() except RequestsConnectionError as e: if e.args and isinstance(e.args[0], ReadTimeoutError): log_timeout_error(client.timeout) raise ConnectionError() exit_with_error(get_conn_error_message(client.base_url)) except APIError as e: log_api_error(e, client.api_version) raise ConnectionError() except (ReadTimeout, socket.timeout): log_timeout_error(client.timeout) raise ConnectionError() except Exception as e: if is_windows(): import pywintypes if isinstance(e, pywintypes.error): log_windows_pipe_error(e) raise ConnectionError() raise def log_windows_pipe_error(exc): if exc.winerror == 2: log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.") elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005 log.error( "The current Compose file version is not compatible with your engine version. " "Please upgrade your Compose file to a more recent version, or set " "a COMPOSE_API_VERSION in your environment." ) else: log.error( "Windows named pipe error: {} (code: {})".format( binarystr_to_unicode(exc.strerror), exc.winerror ) ) def log_timeout_error(timeout): log.error( "An HTTP request took too long to complete. Retry with --verbose to " "obtain debug information.\n" "If you encounter this issue regularly because of slow network " "conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher " "value (current value: %s)." % timeout) def log_api_error(e, client_version): explanation = binarystr_to_unicode(e.explanation) if 'client is newer than server' not in explanation: log.error(explanation) return version = API_VERSION_TO_ENGINE_VERSION.get(client_version) if not version: # They've set a custom API version log.error(explanation) return log.error( "The Docker Engine version is less than the minimum required by " "Compose. Your current project requires a Docker Engine of " "version {version} or greater.".format(version=version) ) def exit_with_error(msg): log.error(dedent(msg).strip()) raise ConnectionError() def get_conn_error_message(url): try: if find_executable('docker') is None: return docker_not_found_msg("Couldn't connect to Docker daemon.") if is_docker_for_mac_installed(): return conn_error_docker_for_mac if find_executable('docker-machine') is not None: return conn_error_docker_machine except UnicodeDecodeError: # https://github.com/docker/compose/issues/5442 # Ignore the error and print the generic message instead. pass return conn_error_generic.format(url=url) def docker_not_found_msg(problem): return "{} You might need to install Docker:\n\n{}".format( problem, docker_install_url()) def docker_install_url(): if is_mac(): return docker_install_url_mac elif is_ubuntu(): return docker_install_url_ubuntu elif is_windows(): return docker_install_url_windows else: return docker_install_url_generic docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/" docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/" docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/" docker_install_url_generic = "https://docs.docker.com/engine/installation/" conn_error_docker_machine = """ Couldn't connect to Docker daemon - you might need to run `docker-machine start default`. """ conn_error_docker_for_mac = """ Couldn't connect to Docker daemon. You might need to start Docker for Mac. """ conn_error_generic = """ Couldn't connect to Docker daemon at {url} - is it running? If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. """ compose-1.29.2/compose/cli/formatter.py000066400000000000000000000032421404620552300200450ustar00rootroot00000000000000import logging from shutil import get_terminal_size import texttable from compose.cli import colors def get_tty_width(): try: # get_terminal_size can't determine the size if compose is piped # to another command. But in such case it doesn't make sense to # try format the output by terminal size as this output is consumed # by another command. So let's pretend we have a huge terminal so # output is single-lined width, _ = get_terminal_size(fallback=(999, 0)) return int(width) except OSError: return 0 class Formatter: """Format tabular data for printing.""" @staticmethod def table(headers, rows): table = texttable.Texttable(max_width=get_tty_width()) table.set_cols_dtype(['t' for h in headers]) table.add_rows([headers] + rows) table.set_deco(table.HEADER) table.set_chars(['-', '|', '+', '-']) return table.draw() class ConsoleWarningFormatter(logging.Formatter): """A logging.Formatter which prints WARNING and ERROR messages with a prefix of the log level colored appropriate for the log level. """ def get_level_message(self, record): separator = ': ' if record.levelno >= logging.ERROR: return colors.red(record.levelname) + separator if record.levelno >= logging.WARNING: return colors.yellow(record.levelname) + separator return '' def format(self, record): if isinstance(record.msg, bytes): record.msg = record.msg.decode('utf-8') message = super().format(record) return '{}{}'.format(self.get_level_message(record), message) compose-1.29.2/compose/cli/log_printer.py000066400000000000000000000176321404620552300203760ustar00rootroot00000000000000import _thread as thread import sys from collections import namedtuple from itertools import cycle from operator import attrgetter from queue import Empty from queue import Queue from threading import Thread from docker.errors import APIError from . import colors from compose.cli.signals import ShutdownException from compose.utils import split_buffer class LogPresenter: def __init__(self, prefix_width, color_func, keep_prefix=True): self.prefix_width = prefix_width self.color_func = color_func self.keep_prefix = keep_prefix def present(self, container, line): to_log = '{line}'.format(line=line) if self.keep_prefix: prefix = container.name_without_project.ljust(self.prefix_width) to_log = '{prefix} '.format(prefix=self.color_func(prefix + ' |')) + to_log return to_log def build_log_presenters(service_names, monochrome, keep_prefix=True): """Return an iterable of functions. Each function can be used to format the logs output of a container. """ prefix_width = max_name_width(service_names) def no_color(text): return text for color_func in cycle([no_color] if monochrome else colors.rainbow()): yield LogPresenter(prefix_width, color_func, keep_prefix) def max_name_width(service_names, max_index_width=3): """Calculate the maximum width of container names so we can make the log prefixes line up like so: db_1 | Listening web_1 | Listening """ return max(len(name) for name in service_names) + max_index_width class LogPrinter: """Print logs from many containers to a single output stream.""" def __init__(self, containers, presenters, event_stream, output=sys.stdout, cascade_stop=False, log_args=None): self.containers = containers self.presenters = presenters self.event_stream = event_stream self.output = output self.cascade_stop = cascade_stop self.log_args = log_args or {} def run(self): if not self.containers: return queue = Queue() thread_args = queue, self.log_args thread_map = build_thread_map(self.containers, self.presenters, thread_args) start_producer_thread(( thread_map, self.event_stream, self.presenters, thread_args)) for line in consume_queue(queue, self.cascade_stop): remove_stopped_threads(thread_map) if self.cascade_stop: matching_container = [cont.name for cont in self.containers if cont.name == line] if line in matching_container: # Returning the name of the container that started the # the cascade_stop so we can return the correct exit code return line if not line: if not thread_map: # There are no running containers left to tail, so exit return # We got an empty line because of a timeout, but there are still # active containers to tail, so continue continue self.write(line) def write(self, line): try: self.output.write(line) except UnicodeEncodeError: # This may happen if the user's locale settings don't support UTF-8 # and UTF-8 characters are present in the log line. The following # will output a "degraded" log with unsupported characters # replaced by `?` self.output.write(line.encode('ascii', 'replace').decode()) self.output.flush() def remove_stopped_threads(thread_map): for container_id, tailer_thread in list(thread_map.items()): if not tailer_thread.is_alive(): thread_map.pop(container_id, None) def build_thread(container, presenter, queue, log_args): tailer = Thread( target=tail_container_logs, args=(container, presenter, queue, log_args)) tailer.daemon = True tailer.start() return tailer def build_thread_map(initial_containers, presenters, thread_args): return { container.id: build_thread(container, next(presenters), *thread_args) # Container order is unspecified, so they are sorted by name in order to make # container:presenter (log color) assignment deterministic when given a list of containers # with the same names. for container in sorted(initial_containers, key=attrgetter('name')) } class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')): @classmethod def new(cls, item): return cls(item, None, None) @classmethod def exception(cls, exc): return cls(None, None, exc) @classmethod def stop(cls, item=None): return cls(item, True, None) def tail_container_logs(container, presenter, queue, log_args): try: for item in build_log_generator(container, log_args): queue.put(QueueItem.new(presenter.present(container, item))) except Exception as e: queue.put(QueueItem.exception(e)) return if log_args.get('follow'): queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container)))) queue.put(QueueItem.stop(container.name)) def build_log_generator(container, log_args): # if the container doesn't have a log_stream we need to attach to container # before log printer starts running if container.log_stream is None: stream = container.logs(stdout=True, stderr=True, stream=True, **log_args) else: stream = container.log_stream return split_buffer(stream) def wait_on_exit(container): try: exit_code = container.wait() return "{} exited with code {}\n".format(container.name, exit_code) except APIError as e: return "Unexpected API error for {} (HTTP code {})\nResponse body:\n{}\n".format( container.name, e.response.status_code, e.response.text or '[empty]' ) def start_producer_thread(thread_args): producer = Thread(target=watch_events, args=thread_args) producer.daemon = True producer.start() def watch_events(thread_map, event_stream, presenters, thread_args): crashed_containers = set() for event in event_stream: if event['action'] == 'stop': thread_map.pop(event['id'], None) if event['action'] == 'die': thread_map.pop(event['id'], None) crashed_containers.add(event['id']) if event['action'] != 'start': continue if event['id'] in thread_map: if thread_map[event['id']].is_alive(): continue # Container was stopped and started, we need a new thread thread_map.pop(event['id'], None) # Container crashed so we should reattach to it if event['id'] in crashed_containers: container = event['container'] if not container.is_restarting: try: container.attach_log_stream() except APIError: # Just ignore errors when reattaching to already crashed containers pass crashed_containers.remove(event['id']) thread_map[event['id']] = build_thread( event['container'], next(presenters), *thread_args ) def consume_queue(queue, cascade_stop): """Consume the queue by reading lines off of it and yielding them.""" while True: try: item = queue.get(timeout=0.1) except Empty: yield None continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if item.exc: raise item.exc if item.is_stop and not cascade_stop: continue yield item.item compose-1.29.2/compose/cli/main.py000066400000000000000000001734171404620552300170020ustar00rootroot00000000000000import contextlib import functools import json import logging import pipes import re import subprocess import sys from distutils.spawn import find_executable from inspect import getdoc from operator import attrgetter import docker.errors import docker.utils from . import errors from . import signals from .. import __version__ from ..config import ConfigurationError from ..config import parse_environment from ..config import parse_labels from ..config import resolve_build_args from ..config.environment import Environment from ..config.serialize import serialize_config from ..config.types import VolumeSpec from ..const import IS_LINUX_PLATFORM from ..const import IS_WINDOWS_PLATFORM from ..errors import StreamParseError from ..metrics.decorator import metrics from ..parallel import ParallelStreamWriter from ..progress_stream import StreamOutputError from ..project import get_image_digests from ..project import MissingDigests from ..project import NoSuchService from ..project import OneOffFilter from ..project import ProjectError from ..service import BuildAction from ..service import BuildError from ..service import ConvergenceStrategy from ..service import ImageType from ..service import NeedsBuildError from ..service import OperationFailedError from ..utils import filter_attached_for_up from .colors import AnsiMode from .command import get_config_from_options from .command import get_project_dir from .command import project_from_options from .docopt_command import DocoptDispatcher from .docopt_command import get_handler from .docopt_command import NoSuchCommand from .errors import UserError from .formatter import ConsoleWarningFormatter from .formatter import Formatter from .log_printer import build_log_presenters from .log_printer import LogPrinter from .utils import get_version_info from .utils import human_readable_file_size from .utils import yesno from compose.metrics.client import MetricsCommand from compose.metrics.client import Status if not IS_WINDOWS_PLATFORM: from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation log = logging.getLogger(__name__) def main(): # noqa: C901 signals.ignore_sigpipe() command = None try: _, opts, command = DocoptDispatcher.get_command_and_options( TopLevelCommand, get_filtered_args(sys.argv[1:]), {'options_first': True, 'version': get_version_info('compose')}) except Exception: pass try: command_func = dispatch() command_func() if not IS_LINUX_PLATFORM and command == 'help': print("\nDocker Compose is now in the Docker CLI, try `docker compose` help") except (KeyboardInterrupt, signals.ShutdownException): exit_with_metrics(command, "Aborting.", status=Status.CANCELED) except (UserError, NoSuchService, ConfigurationError, ProjectError, OperationFailedError) as e: exit_with_metrics(command, e.msg, status=Status.FAILURE) except BuildError as e: reason = "" if e.reason: reason = " : " + e.reason exit_with_metrics(command, "Service '{}' failed to build{}".format(e.service.name, reason), status=Status.FAILURE) except StreamOutputError as e: exit_with_metrics(command, e, status=Status.FAILURE) except NeedsBuildError as e: exit_with_metrics(command, "Service '{}' needs to be built, but --no-build was passed.".format( e.service.name), status=Status.FAILURE) except NoSuchCommand as e: commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand))) if not IS_LINUX_PLATFORM: commands += "\n\nDocker Compose is now in the Docker CLI, try `docker compose`" exit_with_metrics("", log_msg="No such command: {}\n\n{}".format( e.command, commands), status=Status.FAILURE) except (errors.ConnectionError, StreamParseError): exit_with_metrics(command, status=Status.FAILURE) except SystemExit as e: status = Status.SUCCESS if len(sys.argv) > 1 and '--help' not in sys.argv: status = Status.FAILURE if command and len(sys.argv) >= 3 and sys.argv[2] == '--help': command = '--help ' + command if not command and len(sys.argv) >= 2 and sys.argv[1] == '--help': command = '--help' msg = e.args[0] if len(e.args) else "" code = 0 if isinstance(e.code, int): code = e.code if not IS_LINUX_PLATFORM and not command: msg += "\n\nDocker Compose is now in the Docker CLI, try `docker compose`" exit_with_metrics(command, log_msg=msg, status=status, exit_code=code) def get_filtered_args(args): if args[0] in ('-h', '--help'): return [] if args[0] == '--version': return ['version'] def exit_with_metrics(command, log_msg=None, status=Status.SUCCESS, exit_code=1): if log_msg and command != 'exec': if not exit_code: log.info(log_msg) else: log.error(log_msg) MetricsCommand(command, status=status).send_metrics() sys.exit(exit_code) def dispatch(): console_stream = sys.stderr console_handler = logging.StreamHandler(console_stream) setup_logging(console_handler) dispatcher = DocoptDispatcher( TopLevelCommand, {'options_first': True, 'version': get_version_info('compose')}) options, handler, command_options = dispatcher.parse(sys.argv[1:]) ansi_mode = AnsiMode.AUTO try: if options.get("--ansi"): ansi_mode = AnsiMode(options.get("--ansi")) except ValueError: raise UserError( 'Invalid value for --ansi: {}. Expected one of {}.'.format( options.get("--ansi"), ', '.join(m.value for m in AnsiMode) ) ) if options.get("--no-ansi"): if options.get("--ansi"): raise UserError("--no-ansi and --ansi cannot be combined.") log.warning('--no-ansi option is deprecated and will be removed in future versions. ' 'Use `--ansi never` instead.') ansi_mode = AnsiMode.NEVER setup_console_handler(console_handler, options.get('--verbose'), ansi_mode.use_ansi_codes(console_handler.stream), options.get("--log-level")) setup_parallel_logger(ansi_mode) if ansi_mode is AnsiMode.NEVER: command_options['--no-color'] = True return functools.partial(perform_command, options, handler, command_options) def perform_command(options, handler, command_options): if options['COMMAND'] in ('help', 'version'): # Skip looking up the compose file. handler(command_options) return if options['COMMAND'] == 'config': command = TopLevelCommand(None, options=options) handler(command, command_options) return project = project_from_options('.', options) command = TopLevelCommand(project, options=options) with errors.handle_connection_errors(project.client): handler(command, command_options) def setup_logging(console_handler): root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) # Disable requests and docker-py logging logging.getLogger("urllib3").propagate = False logging.getLogger("requests").propagate = False logging.getLogger("docker").propagate = False def setup_parallel_logger(ansi_mode): ParallelStreamWriter.set_default_ansi_mode(ansi_mode) def setup_console_handler(handler, verbose, use_console_formatter=True, level=None): if use_console_formatter: format_class = ConsoleWarningFormatter else: format_class = logging.Formatter if verbose: handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s')) loglevel = logging.DEBUG else: handler.setFormatter(format_class()) loglevel = logging.INFO if level is not None: levels = { 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL, } loglevel = levels.get(level.upper()) if loglevel is None: raise UserError( 'Invalid value for --log-level. Expected one of DEBUG, INFO, WARNING, ERROR, CRITICAL.' ) handler.setLevel(loglevel) # stolen from docopt master def parse_doc_section(name, source): pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)', re.IGNORECASE | re.MULTILINE) return [s.strip() for s in pattern.findall(source)] class TopLevelCommand: """Define and run multi-container applications with Docker. Usage: docker-compose [-f ...] [--profile ...] [options] [--] [COMMAND] [ARGS...] docker-compose -h|--help Options: -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) -p, --project-name NAME Specify an alternate project name (default: directory name) --profile NAME Specify a profile to enable -c, --context NAME Specify a context name --verbose Show more output --log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) --ansi (never|always|auto) Control when to print ANSI control characters --no-ansi Do not print ANSI control characters (DEPRECATED) -v, --version Print version and exit -H, --host HOST Daemon socket to connect to --tls Use TLS; implied by --tlsverify --tlscacert CA_PATH Trust certs signed only by this CA --tlscert CLIENT_CERT_PATH Path to TLS certificate file --tlskey TLS_KEY_PATH Path to TLS key file --tlsverify Use TLS and verify the remote --skip-hostname-check Don't check the daemon's hostname against the name specified in the client certificate --project-directory PATH Specify an alternate working directory (default: the path of the Compose file) --compatibility If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent (DEPRECATED) --env-file PATH Specify an alternate environment file Commands: build Build or rebuild services config Validate and view the Compose file create Create services down Stop and remove resources events Receive real time events from containers exec Execute a command in a running container help Get help on a command images List images kill Kill containers logs View output from containers pause Pause services port Print the public port for a port binding ps List containers pull Pull service images push Push service images restart Restart services rm Remove stopped containers run Run a one-off command scale Set number of containers for a service start Start services stop Stop services top Display the running processes unpause Unpause services up Create and start containers version Show version information and quit """ def __init__(self, project, options=None): self.project = project self.toplevel_options = options or {} @property def project_dir(self): return get_project_dir(self.toplevel_options) @property def toplevel_environment(self): environment_file = self.toplevel_options.get('--env-file') return Environment.from_env_file(self.project_dir, environment_file) @metrics() def build(self, options): """ Build or rebuild services. Services are built once and then tagged as `project_service`, e.g. `composetest_db`. If you change a service's `Dockerfile` or the contents of its build directory, you can run `docker-compose build` to rebuild it. Usage: build [options] [--build-arg key=val...] [--] [SERVICE...] Options: --build-arg key=val Set build-time variables for services. --compress Compress the build context using gzip. --force-rm Always remove intermediate containers. -m, --memory MEM Set memory limit for the build container. --no-cache Do not use cache when building the image. --no-rm Do not remove intermediate containers after a successful build. --parallel Build images in parallel. --progress string Set type of progress output (auto, plain, tty). --pull Always attempt to pull a newer version of the image. -q, --quiet Don't print anything to STDOUT """ service_names = options['SERVICE'] build_args = options.get('--build-arg', None) if build_args: if not service_names and docker.utils.version_lt(self.project.client.api_version, '1.25'): raise UserError( '--build-arg is only supported when services are specified for API version < 1.25.' ' Please use a Compose file version > 2.2 or specify which services to build.' ) build_args = resolve_build_args(build_args, self.toplevel_environment) native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True) self.project.build( service_names=options['SERVICE'], no_cache=bool(options.get('--no-cache', False)), pull=bool(options.get('--pull', False)), force_rm=bool(options.get('--force-rm', False)), memory=options.get('--memory'), rm=not bool(options.get('--no-rm', False)), build_args=build_args, gzip=options.get('--compress', False), parallel_build=options.get('--parallel', False), silent=options.get('--quiet', False), cli=native_builder, progress=options.get('--progress'), ) @metrics() def config(self, options): """ Validate and view the Compose file. Usage: config [options] Options: --resolve-image-digests Pin image tags to digests. --no-interpolate Don't interpolate environment variables. -q, --quiet Only validate the configuration, don't print anything. --profiles Print the profile names, one per line. --services Print the service names, one per line. --volumes Print the volume names, one per line. --hash="*" Print the service config hash, one per line. Set "service1,service2" for a list of specified services or use the wildcard symbol to display all services. """ additional_options = {'--no-interpolate': options.get('--no-interpolate')} compose_config = get_config_from_options('.', self.toplevel_options, additional_options) image_digests = None if options['--resolve-image-digests']: self.project = project_from_options('.', self.toplevel_options, additional_options) with errors.handle_connection_errors(self.project.client): image_digests = image_digests_for_project(self.project) if options['--quiet']: return if options['--profiles']: profiles = set() for service in compose_config.services: if 'profiles' in service: for profile in service['profiles']: profiles.add(profile) print('\n'.join(sorted(profiles))) return if options['--services']: print('\n'.join(service['name'] for service in compose_config.services)) return if options['--volumes']: print('\n'.join(volume for volume in compose_config.volumes)) return if options['--hash'] is not None: h = options['--hash'] self.project = project_from_options('.', self.toplevel_options, additional_options) services = [svc for svc in options['--hash'].split(',')] if h != '*' else None with errors.handle_connection_errors(self.project.client): for service in self.project.get_services(services): print('{} {}'.format(service.name, service.config_hash)) return print(serialize_config(compose_config, image_digests, not options['--no-interpolate'])) @metrics() def create(self, options): """ Creates containers for a service. This command is deprecated. Use the `up` command with `--no-start` instead. Usage: create [options] [SERVICE...] Options: --force-recreate Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate. --no-recreate If containers already exist, don't recreate them. Incompatible with --force-recreate. --no-build Don't build an image, even if it's missing. --build Build images before creating containers. """ service_names = options['SERVICE'] log.warning( 'The create command is deprecated. ' 'Use the up command with the --no-start flag instead.' ) self.project.create( service_names=service_names, strategy=convergence_strategy_from_opts(options), do_build=build_action_from_opts(options), ) @metrics() def down(self, options): """ Stops containers and removes containers, networks, volumes, and images created by `up`. By default, the only things removed are: - Containers for services defined in the Compose file - Networks defined in the `networks` section of the Compose file - The default network, if one is used Networks and volumes defined as `external` are never removed. Usage: down [options] Options: --rmi type Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the `image` field. -v, --volumes Remove named volumes declared in the `volumes` section of the Compose file and anonymous volumes attached to containers. --remove-orphans Remove containers for services not defined in the Compose file -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS') if ignore_orphans and options['--remove-orphans']: raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.") image_type = image_type_from_opt('--rmi', options['--rmi']) timeout = timeout_from_opts(options) self.project.down( image_type, options['--volumes'], options['--remove-orphans'], timeout=timeout, ignore_orphans=ignore_orphans) def events(self, options): """ Receive real time events from containers. Usage: events [options] [--] [SERVICE...] Options: --json Output events as a stream of json objects """ def format_event(event): attributes = ["%s=%s" % item for item in event['attributes'].items()] return ("{time} {type} {action} {id} ({attrs})").format( attrs=", ".join(sorted(attributes)), **event) def json_format_event(event): event['time'] = event['time'].isoformat() event.pop('container') return json.dumps(event) for event in self.project.events(): formatter = json_format_event if options['--json'] else format_event print(formatter(event)) sys.stdout.flush() @metrics("exec") def exec_command(self, options): """ Execute a command in a running container Usage: exec [options] [-e KEY=VAL...] [--] SERVICE COMMAND [ARGS...] Options: -d, --detach Detached mode: Run command in the background. --privileged Give extended privileges to the process. -u, --user USER Run the command as this user. -T Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY. --index=index index of the container if there are multiple instances of a service [default: 1] -e, --env KEY=VAL Set environment variables (can be used multiple times, not supported in API < 1.25) -w, --workdir DIR Path to workdir directory for this command. """ use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI') index = int(options.get('--index')) service = self.project.get_service(options['SERVICE']) detach = options.get('--detach') if options['--env'] and docker.utils.version_lt(self.project.client.api_version, '1.25'): raise UserError("Setting environment for exec is not supported in API < 1.25 (%s)" % self.project.client.api_version) if options['--workdir'] and docker.utils.version_lt(self.project.client.api_version, '1.35'): raise UserError("Setting workdir for exec is not supported in API < 1.35 (%s)" % self.project.client.api_version) try: container = service.get_container(number=index) except ValueError as e: raise UserError(str(e)) command = [options['COMMAND']] + options['ARGS'] tty = not options["-T"] if IS_WINDOWS_PLATFORM or use_cli and not detach: sys.exit(call_docker( build_exec_command(options, container.id, command), self.toplevel_options, self.toplevel_environment) ) create_exec_options = { "privileged": options["--privileged"], "user": options["--user"], "tty": tty, "stdin": True, "workdir": options["--workdir"], } if docker.utils.version_gte(self.project.client.api_version, '1.25'): create_exec_options["environment"] = options["--env"] exec_id = container.create_exec(command, **create_exec_options) if detach: container.start_exec(exec_id, tty=tty, stream=True) return signals.set_signal_handler_to_shutdown() try: operation = ExecOperation( self.project.client, exec_id, interactive=tty, ) pty = PseudoTerminal(self.project.client, operation) pty.start() except signals.ShutdownException: log.info("received shutdown exception: closing") exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode") sys.exit(exit_code) @classmethod @metrics() def help(cls, options): """ Get help on a command. Usage: help [COMMAND] """ if options['COMMAND']: subject = get_handler(cls, options['COMMAND']) else: subject = cls print(getdoc(subject)) @metrics() def images(self, options): """ List images used by the created containers. Usage: images [options] [--] [SERVICE...] Options: -q, --quiet Only display IDs """ containers = sorted( self.project.containers(service_names=options['SERVICE'], stopped=True) + self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only), key=attrgetter('name')) if options['--quiet']: for image in {c.image for c in containers}: print(image.split(':')[1]) return def add_default_tag(img_name): if ':' not in img_name.split('/')[-1]: return '{}:latest'.format(img_name) return img_name headers = [ 'Container', 'Repository', 'Tag', 'Image Id', 'Size' ] rows = [] for container in containers: image_config = container.image_config service = self.project.get_service(container.service) index = 0 img_name = add_default_tag(service.image_name) if img_name in image_config['RepoTags']: index = image_config['RepoTags'].index(img_name) repo_tags = ( image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags'] else ('', '') ) image_id = image_config['Id'].split(':')[1][:12] size = human_readable_file_size(image_config['Size']) rows.append([ container.name, repo_tags[0], repo_tags[1], image_id, size ]) print(Formatter.table(headers, rows)) @metrics() def kill(self, options): """ Force stop service containers. Usage: kill [options] [--] [SERVICE...] Options: -s SIGNAL SIGNAL to send to the container. Default signal is SIGKILL. """ signal = options.get('-s', 'SIGKILL') self.project.kill(service_names=options['SERVICE'], signal=signal) @metrics() def logs(self, options): """ View output from containers. Usage: logs [options] [--] [SERVICE...] Options: --no-color Produce monochrome output. -f, --follow Follow log output. -t, --timestamps Show timestamps. --tail="all" Number of lines to show from the end of the logs for each container. --no-log-prefix Don't print prefix in logs. """ containers = self.project.containers(service_names=options['SERVICE'], stopped=True) tail = options['--tail'] if tail is not None: if tail.isdigit(): tail = int(tail) elif tail != 'all': raise UserError("tail flag must be all or a number") log_args = { 'follow': options['--follow'], 'tail': tail, 'timestamps': options['--timestamps'] } print("Attaching to", list_containers(containers)) log_printer_from_project( self.project, containers, options['--no-color'], log_args, event_stream=self.project.events(service_names=options['SERVICE']), keep_prefix=not options['--no-log-prefix']).run() @metrics() def pause(self, options): """ Pause services. Usage: pause [SERVICE...] """ containers = self.project.pause(service_names=options['SERVICE']) exit_if(not containers, 'No containers to pause', 1) @metrics() def port(self, options): """ Print the public port for a port binding. Usage: port [options] [--] SERVICE PRIVATE_PORT Options: --protocol=proto tcp or udp [default: tcp] --index=index index of the container if there are multiple instances of a service [default: 1] """ index = int(options.get('--index')) service = self.project.get_service(options['SERVICE']) try: container = service.get_container(number=index) except ValueError as e: raise UserError(str(e)) print(container.get_local_port( options['PRIVATE_PORT'], protocol=options.get('--protocol') or 'tcp') or '') @metrics() def ps(self, options): """ List containers. Usage: ps [options] [--] [SERVICE...] Options: -q, --quiet Only display IDs --services Display services --filter KEY=VAL Filter services by a property -a, --all Show all stopped containers (including those created by the run command) """ if options['--quiet'] and options['--services']: raise UserError('--quiet and --services cannot be combined') if options['--services']: filt = build_filter(options.get('--filter')) services = self.project.services if filt: services = filter_services(filt, services, self.project) print('\n'.join(service.name for service in services)) return if options['--all']: containers = sorted(self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.include, stopped=True), key=attrgetter('name')) else: containers = sorted( self.project.containers(service_names=options['SERVICE'], stopped=True) + self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only), key=attrgetter('name')) if options['--quiet']: for container in containers: print(container.id) else: headers = [ 'Name', 'Command', 'State', 'Ports', ] rows = [] for container in containers: command = container.human_readable_command if len(command) > 30: command = '%s ...' % command[:26] rows.append([ container.name, command, container.human_readable_state, container.human_readable_ports, ]) print(Formatter.table(headers, rows)) @metrics() def pull(self, options): """ Pulls images for services defined in a Compose file, but does not start the containers. Usage: pull [options] [--] [SERVICE...] Options: --ignore-pull-failures Pull what it can and ignores images with pull failures. --parallel Deprecated, pull multiple images in parallel (enabled by default). --no-parallel Disable parallel pulling. -q, --quiet Pull without printing progress information --include-deps Also pull services declared as dependencies """ if options.get('--parallel'): log.warning('--parallel option is deprecated and will be removed in future versions.') self.project.pull( service_names=options['SERVICE'], ignore_pull_failures=options.get('--ignore-pull-failures'), parallel_pull=not options.get('--no-parallel'), silent=options.get('--quiet'), include_deps=options.get('--include-deps'), ) @metrics() def push(self, options): """ Pushes images for services. Usage: push [options] [--] [SERVICE...] Options: --ignore-push-failures Push what it can and ignores images with push failures. """ self.project.push( service_names=options['SERVICE'], ignore_push_failures=options.get('--ignore-push-failures') ) @metrics() def rm(self, options): """ Removes stopped service containers. By default, anonymous volumes attached to containers will not be removed. You can override this with `-v`. To list all volumes, use `docker volume ls`. Any data which is not in a volume will be lost. Usage: rm [options] [--] [SERVICE...] Options: -f, --force Don't ask to confirm removal -s, --stop Stop the containers, if required, before removing -v Remove any anonymous volumes attached to containers -a, --all Deprecated - no effect. """ if options.get('--all'): log.warning( '--all flag is obsolete. This is now the default behavior ' 'of `docker-compose rm`' ) one_off = OneOffFilter.include if options.get('--stop'): self.project.stop(service_names=options['SERVICE'], one_off=one_off) all_containers = self.project.containers( service_names=options['SERVICE'], stopped=True, one_off=one_off ) stopped_containers = [c for c in all_containers if not c.is_running] if len(stopped_containers) > 0: print("Going to remove", list_containers(stopped_containers)) if options.get('--force') \ or yesno("Are you sure? [yN] ", default=False): self.project.remove_stopped( service_names=options['SERVICE'], v=options.get('-v', False), one_off=one_off ) else: print("No stopped containers") @metrics() def run(self, options): """ Run a one-off command on a service. For example: $ docker-compose run web python manage.py shell By default, linked services will be started, unless they are already running. If you do not want to start linked services, use `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`. Usage: run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...] [--] SERVICE [COMMAND] [ARGS...] Options: -d, --detach Detached mode: Run container in the background, print new container name. --name NAME Assign a name to the container --entrypoint CMD Override the entrypoint of the image. -e KEY=VAL Set an environment variable (can be used multiple times) -l, --label KEY=VAL Add or override a label (can be used multiple times) -u, --user="" Run as specified username or uid --no-deps Don't start linked services. --rm Remove container after run. Ignored in detached mode. -p, --publish=[] Publish a container's port(s) to the host --service-ports Run command with the service's ports enabled and mapped to the host. --use-aliases Use the service's network aliases in the network(s) the container connects to. -v, --volume=[] Bind mount a volume (default []) -T Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY. -w, --workdir="" Working directory inside the container """ service = self.project.get_service(options['SERVICE']) detach = options.get('--detach') if options['--publish'] and options['--service-ports']: raise UserError( 'Service port mapping and manual port mapping ' 'can not be used together' ) if options['COMMAND'] is not None: command = [options['COMMAND']] + options['ARGS'] elif options['--entrypoint'] is not None: command = [] else: command = service.options.get('command') options['stdin_open'] = service.options.get('stdin_open', True) container_options = build_one_off_container_options(options, detach, command) run_one_off_container( container_options, self.project, service, options, self.toplevel_options, self.toplevel_environment ) @metrics() def scale(self, options): """ Set number of containers to run for a service. Numbers are specified in the form `service=num` as arguments. For example: $ docker-compose scale web=2 worker=3 This command is deprecated. Use the up command with the `--scale` flag instead. Usage: scale [options] [SERVICE=NUM...] Options: -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ timeout = timeout_from_opts(options) log.warning( 'The scale command is deprecated. ' 'Use the up command with the --scale flag instead.' ) for service_name, num in parse_scale_args(options['SERVICE=NUM']).items(): self.project.get_service(service_name).scale(num, timeout=timeout) @metrics() def start(self, options): """ Start existing containers. Usage: start [SERVICE...] """ containers = self.project.start(service_names=options['SERVICE']) exit_if(not containers, 'No containers to start', 1) @metrics() def stop(self, options): """ Stop running containers without removing them. They can be started again with `docker-compose start`. Usage: stop [options] [--] [SERVICE...] Options: -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ timeout = timeout_from_opts(options) self.project.stop(service_names=options['SERVICE'], timeout=timeout) @metrics() def restart(self, options): """ Restart running containers. Usage: restart [options] [--] [SERVICE...] Options: -t, --timeout TIMEOUT Specify a shutdown timeout in seconds. (default: 10) """ timeout = timeout_from_opts(options) containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout) exit_if(not containers, 'No containers to restart', 1) @metrics() def top(self, options): """ Display the running processes Usage: top [SERVICE...] """ containers = sorted( self.project.containers(service_names=options['SERVICE'], stopped=False) + self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only), key=attrgetter('name') ) for idx, container in enumerate(containers): if idx > 0: print() top_data = self.project.client.top(container.name) headers = top_data.get("Titles") rows = [] for process in top_data.get("Processes", []): rows.append(process) print(container.name) print(Formatter.table(headers, rows)) @metrics() def unpause(self, options): """ Unpause services. Usage: unpause [SERVICE...] """ containers = self.project.unpause(service_names=options['SERVICE']) exit_if(not containers, 'No containers to unpause', 1) @metrics() def up(self, options): """ Builds, (re)creates, starts, and attaches to containers for a service. Unless they are already running, this command also starts any linked services. The `docker-compose up` command aggregates the output of each container. When the command exits, all containers are stopped. Running `docker-compose up -d` starts the containers in the background and leaves them running. If there are existing containers for a service, and the service's configuration or image was changed after the container's creation, `docker-compose up` picks up the changes by stopping and recreating the containers (preserving mounted volumes). To prevent Compose from picking up changes, use the `--no-recreate` flag. If you want to force Compose to stop and recreate all containers, use the `--force-recreate` flag. Usage: up [options] [--scale SERVICE=NUM...] [--] [SERVICE...] Options: -d, --detach Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit. --no-color Produce monochrome output. --quiet-pull Pull without printing progress information --no-deps Don't start linked services. --force-recreate Recreate containers even if their configuration and image haven't changed. --always-recreate-deps Recreate dependent containers. Incompatible with --no-recreate. --no-recreate If containers already exist, don't recreate them. Incompatible with --force-recreate and -V. --no-build Don't build an image, even if it's missing. --no-start Don't start the services after creating them. --build Build images before starting containers. --abort-on-container-exit Stops all containers if any container was stopped. Incompatible with -d. --attach-dependencies Attach to dependent containers. -t, --timeout TIMEOUT Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10) -V, --renew-anon-volumes Recreate anonymous volumes instead of retrieving data from the previous containers. --remove-orphans Remove containers for services not defined in the Compose file. --exit-code-from SERVICE Return the exit code of the selected service container. Implies --abort-on-container-exit. --scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present. --no-log-prefix Don't print prefix in logs. """ start_deps = not options['--no-deps'] always_recreate_deps = options['--always-recreate-deps'] exit_value_from = exitval_from_opts(options, self.project) cascade_stop = options['--abort-on-container-exit'] service_names = options['SERVICE'] timeout = timeout_from_opts(options) remove_orphans = options['--remove-orphans'] detached = options.get('--detach') no_start = options.get('--no-start') attach_dependencies = options.get('--attach-dependencies') keep_prefix = not options.get('--no-log-prefix') if detached and (cascade_stop or exit_value_from or attach_dependencies): raise UserError( "-d cannot be combined with --abort-on-container-exit or --attach-dependencies.") ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS') if ignore_orphans and remove_orphans: raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.") opts = ['--detach', '--abort-on-container-exit', '--exit-code-from', '--attach-dependencies'] for excluded in [x for x in opts if options.get(x) and no_start]: raise UserError('--no-start and {} cannot be combined.'.format(excluded)) native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True) with up_shutdown_context(self.project, service_names, timeout, detached): warn_for_swarm_mode(self.project.client) def up(rebuild): return self.project.up( service_names=service_names, start_deps=start_deps, strategy=convergence_strategy_from_opts(options), do_build=build_action_from_opts(options), timeout=timeout, detached=detached, remove_orphans=remove_orphans, ignore_orphans=ignore_orphans, scale_override=parse_scale_args(options['--scale']), start=not no_start, always_recreate_deps=always_recreate_deps, reset_container_image=rebuild, renew_anonymous_volumes=options.get('--renew-anon-volumes'), silent=options.get('--quiet-pull'), cli=native_builder, attach_dependencies=attach_dependencies, ) try: to_attach = up(False) except docker.errors.ImageNotFound as e: log.error( "The image for the service you're trying to recreate has been removed. " "If you continue, volume data could be lost. Consider backing up your data " "before continuing.\n" ) res = yesno("Continue with the new image? [yN]", False) if res is None or not res: raise e to_attach = up(True) if detached or no_start: return attached_containers = filter_attached_containers( to_attach, service_names, attach_dependencies) log_printer = log_printer_from_project( self.project, attached_containers, options['--no-color'], {'follow': True}, cascade_stop, event_stream=self.project.events(service_names=service_names), keep_prefix=keep_prefix) print("Attaching to", list_containers(log_printer.containers)) cascade_starter = log_printer.run() if cascade_stop: print("Aborting on container exit...") all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True) exit_code = compute_exit_code( exit_value_from, attached_containers, cascade_starter, all_containers ) self.project.stop(service_names=service_names, timeout=timeout) if exit_value_from: exit_code = compute_service_exit_code(exit_value_from, attached_containers) sys.exit(exit_code) @classmethod @metrics() def version(cls, options): """ Show version information and quit. Usage: version [--short] Options: --short Shows only Compose's version number. """ if options['--short']: print(__version__) else: print(get_version_info('full')) def compute_service_exit_code(exit_value_from, attached_containers): candidates = list(filter( lambda c: c.service == exit_value_from, attached_containers)) if not candidates: log.error( 'No containers matching the spec "{}" ' 'were run.'.format(exit_value_from) ) return 2 if len(candidates) > 1: exit_values = filter( lambda e: e != 0, [c.inspect()['State']['ExitCode'] for c in candidates] ) return exit_values[0] return candidates[0].inspect()['State']['ExitCode'] def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers): exit_code = 0 for e in all_containers: if (not e.is_running and cascade_starter == e.name): if not e.exit_code == 0: exit_code = e.exit_code break return exit_code def convergence_strategy_from_opts(options): no_recreate = options['--no-recreate'] force_recreate = options['--force-recreate'] renew_anonymous_volumes = options.get('--renew-anon-volumes') if force_recreate and no_recreate: raise UserError("--force-recreate and --no-recreate cannot be combined.") if no_recreate and renew_anonymous_volumes: raise UserError('--no-recreate and --renew-anon-volumes cannot be combined.') if force_recreate or renew_anonymous_volumes: return ConvergenceStrategy.always if no_recreate: return ConvergenceStrategy.never return ConvergenceStrategy.changed def timeout_from_opts(options): timeout = options.get('--timeout') return None if timeout is None else int(timeout) def image_digests_for_project(project): try: return get_image_digests(project) except MissingDigests as e: def list_images(images): return "\n".join(" {}".format(name) for name in sorted(images)) paras = ["Some images are missing digests."] if e.needs_push: command_hint = ( "Use `docker push {}` to push them. " .format(" ".join(sorted(e.needs_push))) ) paras += [ "The following images can be pushed:", list_images(e.needs_push), command_hint, ] if e.needs_pull: command_hint = ( "Use `docker pull {}` to pull them. " .format(" ".join(sorted(e.needs_pull))) ) paras += [ "The following images need to be pulled:", list_images(e.needs_pull), command_hint, ] raise UserError("\n\n".join(paras)) def exitval_from_opts(options, project): exit_value_from = options.get('--exit-code-from') if exit_value_from: if not options.get('--abort-on-container-exit'): log.warning('using --exit-code-from implies --abort-on-container-exit') options['--abort-on-container-exit'] = True if exit_value_from not in [s.name for s in project.get_services()]: log.error('No service named "%s" was found in your compose file.', exit_value_from) sys.exit(2) return exit_value_from def image_type_from_opt(flag, value): if not value: return ImageType.none try: return ImageType[value] except KeyError: raise UserError("%s flag must be one of: all, local" % flag) def build_action_from_opts(options): if options['--build'] and options['--no-build']: raise UserError("--build and --no-build can not be combined.") if options['--build']: return BuildAction.force if options['--no-build']: return BuildAction.skip return BuildAction.none def build_one_off_container_options(options, detach, command): container_options = { 'command': command, 'tty': not (detach or options['-T'] or not sys.stdin.isatty()), 'stdin_open': options.get('stdin_open'), 'detach': detach, } if options['-e']: container_options['environment'] = Environment.from_command_line( parse_environment(options['-e']) ) if options['--label']: container_options['labels'] = parse_labels(options['--label']) if options.get('--entrypoint') is not None: container_options['entrypoint'] = ( [""] if options['--entrypoint'] == '' else options['--entrypoint'] ) # Ensure that run command remains one-off (issue #6302) container_options['restart'] = None if options['--user']: container_options['user'] = options.get('--user') if not options['--service-ports']: container_options['ports'] = [] if options['--publish']: container_options['ports'] = options.get('--publish') if options['--name']: container_options['name'] = options['--name'] if options['--workdir']: container_options['working_dir'] = options['--workdir'] if options['--volume']: volumes = [VolumeSpec.parse(i) for i in options['--volume']] container_options['volumes'] = volumes return container_options def run_one_off_container(container_options, project, service, options, toplevel_options, toplevel_environment): native_builder = toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD') detach = options.get('--detach') use_network_aliases = options.get('--use-aliases') service.scale_num = 1 containers = project.up( service_names=[service.name], start_deps=not options['--no-deps'], strategy=ConvergenceStrategy.never, detached=True, rescale=False, cli=native_builder, one_off=True, override_options=container_options, ) try: container = next(c for c in containers if c.service == service.name) except StopIteration: raise OperationFailedError('Could not bring up the requested service') if detach: service.start_container(container, use_network_aliases) print(container.name) return def remove_container(): if options['--rm']: project.client.remove_container(container.id, force=True, v=True) use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI') signals.set_signal_handler_to_shutdown() signals.set_signal_handler_to_hang_up() try: try: if IS_WINDOWS_PLATFORM or use_cli: service.connect_container_to_networks(container, use_network_aliases) exit_code = call_docker( get_docker_start_call(container_options, container.id), toplevel_options, toplevel_environment ) else: operation = RunOperation( project.client, container.id, interactive=not options['-T'], logs=False, ) pty = PseudoTerminal(project.client, operation) sockets = pty.sockets() service.start_container(container, use_network_aliases) pty.start(sockets) exit_code = container.wait() except (signals.ShutdownException): project.client.stop(container.id) exit_code = 1 except (signals.ShutdownException, signals.HangUpException): project.client.kill(container.id) remove_container() sys.exit(2) remove_container() sys.exit(exit_code) def get_docker_start_call(container_options, container_id): docker_call = ["start"] if not container_options.get('detach'): docker_call.append("--attach") if container_options.get('stdin_open'): docker_call.append("--interactive") docker_call.append(container_id) return docker_call def log_printer_from_project( project, containers, monochrome, log_args, cascade_stop=False, event_stream=None, keep_prefix=True, ): return LogPrinter( [c for c in containers if c.log_driver not in (None, 'none')], build_log_presenters(project.service_names, monochrome, keep_prefix), event_stream or project.events(), cascade_stop=cascade_stop, log_args=log_args) def filter_attached_containers(containers, service_names, attach_dependencies=False): return filter_attached_for_up( containers, service_names, attach_dependencies, lambda container: container.service) @contextlib.contextmanager def up_shutdown_context(project, service_names, timeout, detached): if detached: yield return signals.set_signal_handler_to_shutdown() try: try: yield except signals.ShutdownException: print("Gracefully stopping... (press Ctrl+C again to force)") project.stop(service_names=service_names, timeout=timeout) except signals.ShutdownException: project.kill(service_names=service_names) sys.exit(2) def list_containers(containers): return ", ".join(c.name for c in containers) def exit_if(condition, message, exit_code): if condition: log.error(message) raise SystemExit(exit_code) def call_docker(args, dockeropts, environment): executable_path = find_executable('docker') if not executable_path: raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary.")) tls = dockeropts.get('--tls', False) ca_cert = dockeropts.get('--tlscacert') cert = dockeropts.get('--tlscert') key = dockeropts.get('--tlskey') verify = dockeropts.get('--tlsverify') host = dockeropts.get('--host') context = dockeropts.get('--context') tls_options = [] if tls: tls_options.append('--tls') if ca_cert: tls_options.extend(['--tlscacert', ca_cert]) if cert: tls_options.extend(['--tlscert', cert]) if key: tls_options.extend(['--tlskey', key]) if verify: tls_options.append('--tlsverify') if host: tls_options.extend( ['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))] ) if context: tls_options.extend( ['--context', context] ) args = [executable_path] + tls_options + args log.debug(" ".join(map(pipes.quote, args))) filtered_env = {k: v for k, v in environment.items() if v is not None} return subprocess.call(args, env=filtered_env) def parse_scale_args(options): res = {} for s in options: if '=' not in s: raise UserError('Arguments to scale should be in the form service=num') service_name, num = s.split('=', 1) try: num = int(num) except ValueError: raise UserError( 'Number of containers for service "%s" is not a number' % service_name ) res[service_name] = num return res def build_exec_command(options, container_id, command): args = ["exec"] if options["--detach"]: args += ["--detach"] else: args += ["--interactive"] if not options["-T"]: args += ["--tty"] if options["--privileged"]: args += ["--privileged"] if options["--user"]: args += ["--user", options["--user"]] if options["--env"]: for env_variable in options["--env"]: args += ["--env", env_variable] if options["--workdir"]: args += ["--workdir", options["--workdir"]] args += [container_id] args += command return args def has_container_with_state(containers, state): states = { 'running': lambda c: c.is_running, 'stopped': lambda c: not c.is_running, 'paused': lambda c: c.is_paused, 'restarting': lambda c: c.is_restarting, } for container in containers: if state not in states: raise UserError("Invalid state: %s" % state) if states[state](container): return True def filter_services(filt, services, project): def should_include(service): for f in filt: if f == 'status': state = filt[f] containers = project.containers([service.name], stopped=True) if not has_container_with_state(containers, state): return False elif f == 'source': source = filt[f] if source == 'image' or source == 'build': if source not in service.options: return False else: raise UserError("Invalid value for source filter: %s" % source) else: raise UserError("Invalid filter: %s" % f) return True return filter(should_include, services) def build_filter(arg): filt = {} if arg is not None: if '=' not in arg: raise UserError("Arguments to --filter should be in form KEY=VAL") key, val = arg.split('=', 1) filt[key] = val return filt def warn_for_swarm_mode(client): info = client.info() if info.get('Swarm', {}).get('LocalNodeState') == 'active': if info.get('ServerVersion', '').startswith('ucp'): # UCP does multi-node scheduling with traditional Compose files. return log.warning( "The Docker Engine you're using is running in swarm mode.\n\n" "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. " "All containers will be scheduled on the current node.\n\n" "To deploy your application across the swarm, " "use `docker stack deploy`.\n" ) compose-1.29.2/compose/cli/signals.py000066400000000000000000000015511404620552300175030ustar00rootroot00000000000000import signal from ..const import IS_WINDOWS_PLATFORM class ShutdownException(Exception): pass class HangUpException(Exception): pass def shutdown(signal, frame): raise ShutdownException() def set_signal_handler(handler): signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) def set_signal_handler_to_shutdown(): set_signal_handler(shutdown) def hang_up(signal, frame): raise HangUpException() def set_signal_handler_to_hang_up(): # on Windows a ValueError will be raised if trying to set signal handler for SIGHUP if not IS_WINDOWS_PLATFORM: signal.signal(signal.SIGHUP, hang_up) def ignore_sigpipe(): # Restore default behavior for SIGPIPE instead of raising # an exception when encountered. if not IS_WINDOWS_PLATFORM: signal.signal(signal.SIGPIPE, signal.SIG_DFL) compose-1.29.2/compose/cli/utils.py000066400000000000000000000067471404620552300172170ustar00rootroot00000000000000import math import os import platform import ssl import subprocess import sys import distro import docker import compose from ..const import IS_WINDOWS_PLATFORM def yesno(prompt, default=None): """ Prompt the user for a yes or no. Can optionally specify a default value, which will only be used if they enter a blank line. Unrecognised input (anything other than "y", "n", "yes", "no" or "") will return None. """ answer = input(prompt).strip().lower() if answer == "y" or answer == "yes": return True elif answer == "n" or answer == "no": return False elif answer == "": return default else: return None def input(prompt): """ Version of input (raw_input in Python 2) which forces a flush of sys.stdout to avoid problems where the prompt fails to appear due to line buffering """ sys.stdout.write(prompt) sys.stdout.flush() return sys.stdin.readline().rstrip('\n') def call_silently(*args, **kwargs): """ Like subprocess.call(), but redirects stdout and stderr to /dev/null. """ with open(os.devnull, 'w') as shutup: try: return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs) except OSError: # On Windows, subprocess.call() can still raise exceptions. Normalize # to POSIXy behaviour by returning a nonzero exit code. return 1 def is_mac(): return platform.system() == 'Darwin' def is_ubuntu(): return platform.system() == 'Linux' and distro.linux_distribution()[0] == 'Ubuntu' def is_windows(): return IS_WINDOWS_PLATFORM def get_version_info(scope): versioninfo = 'docker-compose version {}, build {}'.format( compose.__version__, get_build_version()) if scope == 'compose': return versioninfo if scope == 'full': return ( "{}\n" "docker-py version: {}\n" "{} version: {}\n" "OpenSSL version: {}" ).format( versioninfo, docker.version, platform.python_implementation(), platform.python_version(), ssl.OPENSSL_VERSION) raise ValueError("{} is not a valid version scope".format(scope)) def get_build_version(): filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA') if not os.path.exists(filename): return 'unknown' with open(filename) as fh: return fh.read().strip() def is_docker_for_mac_installed(): return is_mac() and os.path.isdir('/Applications/Docker.app') def generate_user_agent(): parts = [ "docker-compose/{}".format(compose.__version__), "docker-py/{}".format(docker.__version__), ] try: p_system = platform.system() p_release = platform.release() except OSError: pass else: parts.append("{}/{}".format(p_system, p_release)) return " ".join(parts) def human_readable_file_size(size): suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ] order = int(math.log(size, 1000)) if size else 0 if order >= len(suffixes): order = len(suffixes) - 1 return '{:.4g} {}'.format( size / pow(10, order * 3), suffixes[order] ) def binarystr_to_unicode(s): if not isinstance(s, bytes): return s if IS_WINDOWS_PLATFORM: try: return s.decode('windows-1250') except UnicodeDecodeError: pass return s.decode('utf-8', 'replace') compose-1.29.2/compose/cli/verbose_proxy.py000066400000000000000000000031661404620552300207550ustar00rootroot00000000000000import functools import logging import pprint from itertools import chain def format_call(args, kwargs): args = (repr(a) for a in args) kwargs = ("{!s}={!r}".format(*item) for item in kwargs.items()) return "({})".format(", ".join(chain(args, kwargs))) def format_return(result, max_lines): if isinstance(result, (list, tuple, set)): return "({} with {} items)".format(type(result).__name__, len(result)) if result: lines = pprint.pformat(result).split('\n') extra = '\n...' if len(lines) > max_lines else '' return '\n'.join(lines[:max_lines]) + extra return result class VerboseProxy: """Proxy all function calls to another class and log method name, arguments and return values for each call. """ def __init__(self, obj_name, obj, log_name=None, max_lines=10): self.obj_name = obj_name self.obj = obj self.max_lines = max_lines self.log = logging.getLogger(log_name or __name__) def __getattr__(self, name): attr = getattr(self.obj, name) if not callable(attr): return attr return functools.partial(self.proxy_callable, name) def proxy_callable(self, call_name, *args, **kwargs): self.log.info("%s %s <- %s", self.obj_name, call_name, format_call(args, kwargs)) result = getattr(self.obj, call_name)(*args, **kwargs) self.log.info("%s %s -> %s", self.obj_name, call_name, format_return(result, self.max_lines)) return result compose-1.29.2/compose/config/000077500000000000000000000000001404620552300161655ustar00rootroot00000000000000compose-1.29.2/compose/config/__init__.py000066400000000000000000000005711404620552300203010ustar00rootroot00000000000000# flake8: noqa from . import environment from .config import ConfigurationError from .config import DOCKER_CONFIG_KEYS from .config import find from .config import is_url from .config import load from .config import merge_environment from .config import merge_labels from .config import parse_environment from .config import parse_labels from .config import resolve_build_args compose-1.29.2/compose/config/compose_spec.json000066400000000000000000000622121404620552300215420ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft/2019-09/schema#", "id": "compose_spec.json", "type": "object", "title": "Compose Specification", "description": "The Compose file is a YAML file defining a multi-containers based application.", "properties": { "version": { "type": "string", "description": "Version of the Compose specification used. Tools not implementing required version MUST reject the configuration file." }, "services": { "id": "#/properties/services", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/service" } }, "additionalProperties": false }, "networks": { "id": "#/properties/networks", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/network" } } }, "volumes": { "id": "#/properties/volumes", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/volume" } }, "additionalProperties": false }, "secrets": { "id": "#/properties/secrets", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/secret" } }, "additionalProperties": false }, "configs": { "id": "#/properties/configs", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/config" } }, "additionalProperties": false } }, "patternProperties": {"^x-": {}}, "additionalProperties": false, "definitions": { "service": { "id": "#/definitions/service", "type": "object", "properties": { "deploy": {"$ref": "#/definitions/deployment"}, "build": { "oneOf": [ {"type": "string"}, { "type": "object", "properties": { "context": {"type": "string"}, "dockerfile": {"type": "string"}, "args": {"$ref": "#/definitions/list_or_dict"}, "labels": {"$ref": "#/definitions/list_or_dict"}, "cache_from": {"type": "array", "items": {"type": "string"}}, "network": {"type": "string"}, "target": {"type": "string"}, "shm_size": {"type": ["integer", "string"]}, "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, "isolation": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } ] }, "blkio_config": { "type": "object", "properties": { "device_read_bps": { "type": "array", "items": {"$ref": "#/definitions/blkio_limit"} }, "device_read_iops": { "type": "array", "items": {"$ref": "#/definitions/blkio_limit"} }, "device_write_bps": { "type": "array", "items": {"$ref": "#/definitions/blkio_limit"} }, "device_write_iops": { "type": "array", "items": {"$ref": "#/definitions/blkio_limit"} }, "weight": {"type": "integer"}, "weight_device": { "type": "array", "items": {"$ref": "#/definitions/blkio_weight"} } }, "additionalProperties": false }, "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cgroup_parent": {"type": "string"}, "command": { "oneOf": [ {"type": "string"}, {"type": "array", "items": {"type": "string"}} ] }, "configs": { "type": "array", "items": { "oneOf": [ {"type": "string"}, { "type": "object", "properties": { "source": {"type": "string"}, "target": {"type": "string"}, "uid": {"type": "string"}, "gid": {"type": "string"}, "mode": {"type": "number"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } ] } }, "container_name": {"type": "string"}, "cpu_count": {"type": "integer", "minimum": 0}, "cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100}, "cpu_shares": {"type": ["number", "string"]}, "cpu_quota": {"type": ["number", "string"]}, "cpu_period": {"type": ["number", "string"]}, "cpu_rt_period": {"type": ["number", "string"]}, "cpu_rt_runtime": {"type": ["number", "string"]}, "cpus": {"type": ["number", "string"]}, "cpuset": {"type": "string"}, "credential_spec": { "type": "object", "properties": { "config": {"type": "string"}, "file": {"type": "string"}, "registry": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "depends_on": { "oneOf": [ {"$ref": "#/definitions/list_of_strings"}, { "type": "object", "additionalProperties": false, "patternProperties": { "^[a-zA-Z0-9._-]+$": { "type": "object", "additionalProperties": false, "properties": { "condition": { "type": "string", "enum": ["service_started", "service_healthy", "service_completed_successfully"] } }, "required": ["condition"] } } } ] }, "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"}, "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "dns": {"$ref": "#/definitions/string_or_list"}, "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, "dns_search": {"$ref": "#/definitions/string_or_list"}, "domainname": {"type": "string"}, "entrypoint": { "oneOf": [ {"type": "string"}, {"type": "array", "items": {"type": "string"}} ] }, "env_file": {"$ref": "#/definitions/string_or_list"}, "environment": {"$ref": "#/definitions/list_or_dict"}, "expose": { "type": "array", "items": { "type": ["string", "number"], "format": "expose" }, "uniqueItems": true }, "extends": { "oneOf": [ {"type": "string"}, { "type": "object", "properties": { "service": {"type": "string"}, "file": {"type": "string"} }, "required": ["service"], "additionalProperties": false } ] }, "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, "group_add": { "type": "array", "items": { "type": ["string", "number"] }, "uniqueItems": true }, "healthcheck": {"$ref": "#/definitions/healthcheck"}, "hostname": {"type": "string"}, "image": {"type": "string"}, "init": {"type": "boolean"}, "ipc": {"type": "string"}, "isolation": {"type": "string"}, "labels": {"$ref": "#/definitions/list_or_dict"}, "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "logging": { "type": "object", "properties": { "driver": {"type": "string"}, "options": { "type": "object", "patternProperties": { "^.+$": {"type": ["string", "number", "null"]} } } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "mac_address": {"type": "string"}, "mem_limit": {"type": ["number", "string"]}, "mem_reservation": {"type": ["string", "integer"]}, "mem_swappiness": {"type": "integer"}, "memswap_limit": {"type": ["number", "string"]}, "network_mode": {"type": "string"}, "networks": { "oneOf": [ {"$ref": "#/definitions/list_of_strings"}, { "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "oneOf": [ { "type": "object", "properties": { "aliases": {"$ref": "#/definitions/list_of_strings"}, "ipv4_address": {"type": "string"}, "ipv6_address": {"type": "string"}, "link_local_ips": {"$ref": "#/definitions/list_of_strings"}, "priority": {"type": "number"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, {"type": "null"} ] } }, "additionalProperties": false } ] }, "oom_kill_disable": {"type": "boolean"}, "oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000}, "pid": {"type": ["string", "null"]}, "pids_limit": {"type": ["number", "string"]}, "platform": {"type": "string"}, "ports": { "type": "array", "items": { "oneOf": [ {"type": "number", "format": "ports"}, {"type": "string", "format": "ports"}, { "type": "object", "properties": { "mode": {"type": "string"}, "target": {"type": "integer"}, "published": {"type": "integer"}, "protocol": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } ] }, "uniqueItems": true }, "privileged": {"type": "boolean"}, "profiles": {"$ref": "#/definitions/list_of_strings"}, "pull_policy": {"type": "string", "enum": [ "always", "never", "if_not_present", "build" ]}, "read_only": {"type": "boolean"}, "restart": {"type": "string"}, "runtime": { "type": "string" }, "scale": { "type": "integer" }, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "shm_size": {"type": ["number", "string"]}, "secrets": { "type": "array", "items": { "oneOf": [ {"type": "string"}, { "type": "object", "properties": { "source": {"type": "string"}, "target": {"type": "string"}, "uid": {"type": "string"}, "gid": {"type": "string"}, "mode": {"type": "number"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } ] } }, "sysctls": {"$ref": "#/definitions/list_or_dict"}, "stdin_open": {"type": "boolean"}, "stop_grace_period": {"type": "string", "format": "duration"}, "stop_signal": {"type": "string"}, "storage_opt": {"type": "object"}, "tmpfs": {"$ref": "#/definitions/string_or_list"}, "tty": {"type": "boolean"}, "ulimits": { "type": "object", "patternProperties": { "^[a-z]+$": { "oneOf": [ {"type": "integer"}, { "type": "object", "properties": { "hard": {"type": "integer"}, "soft": {"type": "integer"} }, "required": ["soft", "hard"], "additionalProperties": false, "patternProperties": {"^x-": {}} } ] } } }, "user": {"type": "string"}, "userns_mode": {"type": "string"}, "volumes": { "type": "array", "items": { "oneOf": [ {"type": "string"}, { "type": "object", "required": ["type"], "properties": { "type": {"type": "string"}, "source": {"type": "string"}, "target": {"type": "string"}, "read_only": {"type": "boolean"}, "consistency": {"type": "string"}, "bind": { "type": "object", "properties": { "propagation": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "volume": { "type": "object", "properties": { "nocopy": {"type": "boolean"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "tmpfs": { "type": "object", "properties": { "size": { "type": "integer", "minimum": 0 } }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} } ] }, "uniqueItems": true }, "volumes_from": { "type": "array", "items": {"type": "string"}, "uniqueItems": true }, "working_dir": {"type": "string"} }, "patternProperties": {"^x-": {}}, "additionalProperties": false }, "healthcheck": { "id": "#/definitions/healthcheck", "type": "object", "properties": { "disable": {"type": "boolean"}, "interval": {"type": "string", "format": "duration"}, "retries": {"type": "number"}, "test": { "oneOf": [ {"type": "string"}, {"type": "array", "items": {"type": "string"}} ] }, "timeout": {"type": "string", "format": "duration"}, "start_period": {"type": "string", "format": "duration"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "deployment": { "id": "#/definitions/deployment", "type": ["object", "null"], "properties": { "mode": {"type": "string"}, "endpoint_mode": {"type": "string"}, "replicas": {"type": "integer"}, "labels": {"$ref": "#/definitions/list_or_dict"}, "rollback_config": { "type": "object", "properties": { "parallelism": {"type": "integer"}, "delay": {"type": "string", "format": "duration"}, "failure_action": {"type": "string"}, "monitor": {"type": "string", "format": "duration"}, "max_failure_ratio": {"type": "number"}, "order": {"type": "string", "enum": [ "start-first", "stop-first" ]} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "update_config": { "type": "object", "properties": { "parallelism": {"type": "integer"}, "delay": {"type": "string", "format": "duration"}, "failure_action": {"type": "string"}, "monitor": {"type": "string", "format": "duration"}, "max_failure_ratio": {"type": "number"}, "order": {"type": "string", "enum": [ "start-first", "stop-first" ]} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "resources": { "type": "object", "properties": { "limits": { "type": "object", "properties": { "cpus": {"type": ["number", "string"]}, "memory": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "reservations": { "type": "object", "properties": { "cpus": {"type": ["number", "string"]}, "memory": {"type": "string"}, "generic_resources": {"$ref": "#/definitions/generic_resources"}, "devices": {"$ref": "#/definitions/devices"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "restart_policy": { "type": "object", "properties": { "condition": {"type": "string"}, "delay": {"type": "string", "format": "duration"}, "max_attempts": {"type": "integer"}, "window": {"type": "string", "format": "duration"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "placement": { "type": "object", "properties": { "constraints": {"type": "array", "items": {"type": "string"}}, "preferences": { "type": "array", "items": { "type": "object", "properties": { "spread": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "max_replicas_per_node": {"type": "integer"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "generic_resources": { "id": "#/definitions/generic_resources", "type": "array", "items": { "type": "object", "properties": { "discrete_resource_spec": { "type": "object", "properties": { "kind": {"type": "string"}, "value": {"type": "number"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "devices": { "id": "#/definitions/devices", "type": "array", "items": { "type": "object", "properties": { "capabilities": {"$ref": "#/definitions/list_of_strings"}, "count": {"type": ["string", "integer"]}, "device_ids": {"$ref": "#/definitions/list_of_strings"}, "driver":{"type": "string"}, "options":{"$ref": "#/definitions/list_or_dict"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "network": { "id": "#/definitions/network", "type": ["object", "null"], "properties": { "name": {"type": "string"}, "driver": {"type": "string"}, "driver_opts": { "type": "object", "patternProperties": { "^.+$": {"type": ["string", "number"]} } }, "ipam": { "type": "object", "properties": { "driver": {"type": "string"}, "config": { "type": "array", "items": { "type": "object", "properties": { "subnet": {"type": "string", "format": "subnet_ip_address"}, "ip_range": {"type": "string"}, "gateway": {"type": "string"}, "aux_addresses": { "type": "object", "additionalProperties": false, "patternProperties": {"^.+$": {"type": "string"}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} } }, "options": { "type": "object", "additionalProperties": false, "patternProperties": {"^.+$": {"type": "string"}} } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "external": { "type": ["boolean", "object"], "properties": { "name": { "deprecated": true, "type": "string" } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "internal": {"type": "boolean"}, "enable_ipv6": {"type": "boolean"}, "attachable": {"type": "boolean"}, "labels": {"$ref": "#/definitions/list_or_dict"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "volume": { "id": "#/definitions/volume", "type": ["object", "null"], "properties": { "name": {"type": "string"}, "driver": {"type": "string"}, "driver_opts": { "type": "object", "patternProperties": { "^.+$": {"type": ["string", "number"]} } }, "external": { "type": ["boolean", "object"], "properties": { "name": { "deprecated": true, "type": "string" } }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "labels": {"$ref": "#/definitions/list_or_dict"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "secret": { "id": "#/definitions/secret", "type": "object", "properties": { "name": {"type": "string"}, "file": {"type": "string"}, "external": { "type": ["boolean", "object"], "properties": { "name": {"type": "string"} } }, "labels": {"$ref": "#/definitions/list_or_dict"}, "driver": {"type": "string"}, "driver_opts": { "type": "object", "patternProperties": { "^.+$": {"type": ["string", "number"]} } }, "template_driver": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "config": { "id": "#/definitions/config", "type": "object", "properties": { "name": {"type": "string"}, "file": {"type": "string"}, "external": { "type": ["boolean", "object"], "properties": { "name": { "deprecated": true, "type": "string" } } }, "labels": {"$ref": "#/definitions/list_or_dict"}, "template_driver": {"type": "string"} }, "additionalProperties": false, "patternProperties": {"^x-": {}} }, "string_or_list": { "oneOf": [ {"type": "string"}, {"$ref": "#/definitions/list_of_strings"} ] }, "list_of_strings": { "type": "array", "items": {"type": "string"}, "uniqueItems": true }, "list_or_dict": { "oneOf": [ { "type": "object", "patternProperties": { ".+": { "type": ["string", "number", "null"] } }, "additionalProperties": false }, {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, "blkio_limit": { "type": "object", "properties": { "path": {"type": "string"}, "rate": {"type": ["integer", "string"]} }, "additionalProperties": false }, "blkio_weight": { "type": "object", "properties": { "path": {"type": "string"}, "weight": {"type": "integer"} }, "additionalProperties": false }, "constraints": { "service": { "id": "#/definitions/constraints/service", "anyOf": [ {"required": ["build"]}, {"required": ["image"]} ], "properties": { "build": { "required": ["context"] } } } } } } compose-1.29.2/compose/config/config.py000066400000000000000000001416571404620552300200220ustar00rootroot00000000000000import functools import logging import os import re import string import sys from collections import namedtuple from itertools import chain from operator import attrgetter from operator import itemgetter import yaml try: from functools import cached_property except ImportError: from cached_property import cached_property from . import types from ..const import COMPOSE_SPEC as VERSION from ..const import COMPOSEFILE_V1 as V1 from ..utils import build_string_dict from ..utils import json_hash from ..utils import parse_bytes from ..utils import parse_nanoseconds_int from ..utils import splitdrive from ..version import ComposeVersion from .environment import env_vars_from_file from .environment import Environment from .environment import split_env from .errors import CircularReference from .errors import ComposeFileNotFound from .errors import ConfigurationError from .errors import DuplicateOverrideFileFound from .errors import VERSION_EXPLANATION from .interpolation import interpolate_environment_variables from .sort_services import get_container_name_from_network_mode from .sort_services import get_service_name_from_network_mode from .sort_services import sort_service_dicts from .types import MountSpec from .types import parse_extra_hosts from .types import parse_restart_spec from .types import SecurityOpt from .types import ServiceLink from .types import ServicePort from .types import VolumeFromSpec from .types import VolumeSpec from .validation import match_named_volumes from .validation import validate_against_config_schema from .validation import validate_config_section from .validation import validate_cpu from .validation import validate_credential_spec from .validation import validate_depends_on from .validation import validate_extends_file_path from .validation import validate_healthcheck from .validation import validate_ipc_mode from .validation import validate_links from .validation import validate_network_mode from .validation import validate_pid_mode from .validation import validate_service_constraints from .validation import validate_top_level_object from .validation import validate_ulimits DOCKER_CONFIG_KEYS = [ 'cap_add', 'cap_drop', 'cgroup_parent', 'command', 'cpu_count', 'cpu_percent', 'cpu_period', 'cpu_quota', 'cpu_rt_period', 'cpu_rt_runtime', 'cpu_shares', 'cpus', 'cpuset', 'detach', 'device_cgroup_rules', 'devices', 'dns', 'dns_search', 'dns_opt', 'domainname', 'entrypoint', 'env_file', 'environment', 'extra_hosts', 'group_add', 'hostname', 'healthcheck', 'image', 'ipc', 'isolation', 'labels', 'links', 'mac_address', 'mem_limit', 'mem_reservation', 'memswap_limit', 'mem_swappiness', 'net', 'oom_score_adj', 'oom_kill_disable', 'pid', 'ports', 'privileged', 'read_only', 'restart', 'runtime', 'secrets', 'security_opt', 'shm_size', 'pids_limit', 'stdin_open', 'stop_signal', 'sysctls', 'tty', 'user', 'userns_mode', 'volume_driver', 'volumes', 'volumes_from', 'working_dir', ] ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [ 'blkio_config', 'build', 'container_name', 'credential_spec', 'dockerfile', 'init', 'log_driver', 'log_opt', 'logging', 'network_mode', 'platform', 'profiles', 'scale', 'stop_grace_period', ] DOCKER_VALID_URL_PREFIXES = ( 'http://', 'https://', 'git://', 'github.com/', 'git@', ) SUPPORTED_FILENAMES = [ 'docker-compose.yml', 'docker-compose.yaml', 'compose.yml', 'compose.yaml', ] DEFAULT_OVERRIDE_FILENAMES = ('docker-compose.override.yml', 'docker-compose.override.yaml', 'compose.override.yml', 'compose.override.yaml') log = logging.getLogger(__name__) class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')): """ :param working_dir: the directory to use for relative paths in the config :type working_dir: string :param config_files: list of configuration files to load :type config_files: list of :class:`ConfigFile` :param environment: computed environment values for this project :type environment: :class:`environment.Environment` """ def __new__(cls, working_dir, config_files, environment=None): if environment is None: environment = Environment.from_env_file(working_dir) return super().__new__( cls, working_dir, config_files, environment ) class ConfigFile(namedtuple('_ConfigFile', 'filename config')): """ :param filename: filename of the config file :type filename: string :param config: contents of the config file :type config: :class:`dict` """ @classmethod def from_filename(cls, filename): return cls(filename, load_yaml(filename)) @cached_property def config_version(self): version = self.config.get('version', None) if isinstance(version, dict): return V1 return ComposeVersion(version) if version else self.version @cached_property def version(self): version = self.config.get('version', None) if not version: # no version is specified in the config file services = self.config.get('services', None) networks = self.config.get('networks', None) volumes = self.config.get('volumes', None) if services or networks or volumes: # validate V2/V3 structure for section in ['services', 'networks', 'volumes']: validate_config_section( self.filename, self.config.get(section, {}), section) return VERSION # validate V1 structure validate_config_section( self.filename, self.config, 'services') return V1 if isinstance(version, dict): log.warning('Unexpected type for "version" key in "{}". Assuming ' '"version" is the name of a service, and defaulting to ' 'Compose file version {}.'.format(self.filename, V1)) return V1 if not isinstance(version, str): raise ConfigurationError( 'Version in "{}" is invalid - it should be a string.' .format(self.filename)) if isinstance(version, str): version_pattern = re.compile(r"^[1-3]+(\.\d+)?$") if not version_pattern.match(version): raise ConfigurationError( 'Version "{}" in "{}" is invalid.' .format(version, self.filename)) if version.startswith("1"): raise ConfigurationError( 'Version in "{}" is invalid. {}' .format(self.filename, VERSION_EXPLANATION) ) return VERSION def get_service(self, name): return self.get_service_dicts()[name] def get_service_dicts(self): if self.version == V1: return self.config return self.config.get('services', {}) def get_volumes(self): return {} if self.version == V1 else self.config.get('volumes', {}) def get_networks(self): return {} if self.version == V1 else self.config.get('networks', {}) def get_secrets(self): return {} if self.version == V1 else self.config.get('secrets', {}) def get_configs(self): return {} if self.version == V1 else self.config.get('configs', {}) class Config(namedtuple('_Config', 'config_version version services volumes networks secrets configs')): """ :param config_version: configuration file version :type config_version: int :param version: configuration version :type version: int :param services: List of service description dictionaries :type services: :class:`list` :param volumes: Dictionary mapping volume names to description dictionaries :type volumes: :class:`dict` :param networks: Dictionary mapping network names to description dictionaries :type networks: :class:`dict` :param secrets: Dictionary mapping secret names to description dictionaries :type secrets: :class:`dict` :param configs: Dictionary mapping config names to description dictionaries :type configs: :class:`dict` """ class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')): @classmethod def with_abs_paths(cls, working_dir, filename, name, config): if not working_dir: raise ValueError("No working_dir for ServiceConfig.") return cls( os.path.abspath(working_dir), os.path.abspath(filename) if filename else filename, name, config) def find(base_dir, filenames, environment, override_dir=None): if filenames == ['-']: return ConfigDetails( os.path.abspath(override_dir) if override_dir else os.getcwd(), [ConfigFile(None, yaml.safe_load(sys.stdin))], environment ) if filenames: filenames = [os.path.join(base_dir, f) for f in filenames] else: # search for compose files in the base dir and its parents filenames = get_default_config_files(base_dir) if not filenames and not override_dir: # none found in base_dir and no override_dir defined raise ComposeFileNotFound(SUPPORTED_FILENAMES) if not filenames: # search for compose files in the project directory and its parents filenames = get_default_config_files(override_dir) if not filenames: raise ComposeFileNotFound(SUPPORTED_FILENAMES) log.debug("Using configuration files: {}".format(",".join(filenames))) return ConfigDetails( override_dir if override_dir else os.path.dirname(filenames[0]), [ConfigFile.from_filename(f) for f in filenames], environment ) def validate_config_version(config_files): main_file = config_files[0] validate_top_level_object(main_file) for next_file in config_files[1:]: validate_top_level_object(next_file) if main_file.version != next_file.version: raise ConfigurationError( "Version mismatch: file {} specifies version {} but " "extension file {} uses version {}".format( main_file.filename, main_file.version, next_file.filename, next_file.version)) def get_default_config_files(base_dir): (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir) if not candidates: return None winner = candidates[0] if len(candidates) > 1: log.warning("Found multiple config files with supported names: %s", ", ".join(candidates)) log.warning("Using %s\n", winner) return [os.path.join(path, winner)] + get_default_override_file(path) def get_default_override_file(path): override_files_in_path = [os.path.join(path, override_filename) for override_filename in DEFAULT_OVERRIDE_FILENAMES if os.path.exists(os.path.join(path, override_filename))] if len(override_files_in_path) > 1: raise DuplicateOverrideFileFound(override_files_in_path) return override_files_in_path def find_candidates_in_parent_dirs(filenames, path): """ Given a directory path to start, looks for filenames in the directory, and then each parent directory successively, until found. Returns tuple (candidates, path). """ candidates = [filename for filename in filenames if os.path.exists(os.path.join(path, filename))] if not candidates: parent_dir = os.path.join(path, '..') if os.path.abspath(parent_dir) != os.path.abspath(path): return find_candidates_in_parent_dirs(filenames, parent_dir) return (candidates, path) def check_swarm_only_config(service_dicts): warning_template = ( "Some services ({services}) use the '{key}' key, which will be ignored. " "Compose does not support '{key}' configuration - use " "`docker stack deploy` to deploy to a swarm." ) key = 'configs' services = [s for s in service_dicts if s.get(key)] if services: log.warning( warning_template.format( services=", ".join(sorted(s['name'] for s in services)), key=key ) ) def load(config_details, interpolate=True): """Load the configuration from a working directory and a list of configuration files. Files are loaded in order, and merged on top of each other to create the final configuration. Return a fully interpolated, extended and validated configuration. """ # validate against latest version and if fails do it against v1 schema validate_config_version(config_details.config_files) processed_files = [ process_config_file(config_file, config_details.environment, interpolate=interpolate) for config_file in config_details.config_files ] config_details = config_details._replace(config_files=processed_files) main_file = config_details.config_files[0] volumes = load_mapping( config_details.config_files, 'get_volumes', 'Volume' ) networks = load_mapping( config_details.config_files, 'get_networks', 'Network' ) secrets = load_mapping( config_details.config_files, 'get_secrets', 'Secret', config_details.working_dir ) configs = load_mapping( config_details.config_files, 'get_configs', 'Config', config_details.working_dir ) service_dicts = load_services(config_details, main_file, interpolate=interpolate) if main_file.version != V1: for service_dict in service_dicts: match_named_volumes(service_dict, volumes) check_swarm_only_config(service_dicts) return Config(main_file.config_version, main_file.version, service_dicts, volumes, networks, secrets, configs) def load_mapping(config_files, get_func, entity_type, working_dir=None): mapping = {} for config_file in config_files: for name, config in getattr(config_file, get_func)().items(): mapping[name] = config or {} if not config: continue external = config.get('external') if external: validate_external(entity_type, name, config, config_file.version) if isinstance(external, dict): config['name'] = external.get('name') elif not config.get('name'): config['name'] = name if 'labels' in config: config['labels'] = parse_labels(config['labels']) if 'file' in config: config['file'] = expand_path(working_dir, config['file']) if 'driver_opts' in config: config['driver_opts'] = build_string_dict( config['driver_opts'] ) device = format_device_option(entity_type, config) if device: config['driver_opts']['device'] = device return mapping def format_device_option(entity_type, config): if entity_type != 'Volume': return # default driver is 'local' driver = config.get('driver', 'local') if driver != 'local': return o = config['driver_opts'].get('o') device = config['driver_opts'].get('device') if o and o == 'bind' and device: fullpath = os.path.abspath(os.path.expanduser(device)) return fullpath def validate_external(entity_type, name, config, version): for k in config.keys(): if entity_type == 'Network' and k == 'driver': continue if k not in ['external', 'name']: raise ConfigurationError( "{} {} declared as external but specifies additional attributes " "({}).".format( entity_type, name, ', '.join(k for k in config if k != 'external'))) def load_services(config_details, config_file, interpolate=True): def build_service(service_name, service_dict, service_names): service_config = ServiceConfig.with_abs_paths( config_details.working_dir, config_file.filename, service_name, service_dict) resolver = ServiceExtendsResolver( service_config, config_file, environment=config_details.environment ) service_dict = process_service(resolver.run()) service_config = service_config._replace(config=service_dict) validate_service(service_config, service_names, config_file) service_dict = finalize_service( service_config, service_names, config_file.version, config_details.environment, interpolate ) return service_dict def build_services(service_config): service_names = service_config.keys() return sort_service_dicts([ build_service(name, service_dict, service_names) for name, service_dict in service_config.items() ]) def merge_services(base, override): all_service_names = set(base) | set(override) return { name: merge_service_dicts_from_files( base.get(name, {}), override.get(name, {}), config_file.version) for name in all_service_names } service_configs = [ file.get_service_dicts() for file in config_details.config_files ] service_config = functools.reduce(merge_services, service_configs) return build_services(service_config) def interpolate_config_section(config_file, config, section, environment): return interpolate_environment_variables( config_file.version, config, section, environment ) def process_config_section(config_file, config, section, environment, interpolate): validate_config_section(config_file.filename, config, section) if interpolate: return interpolate_environment_variables( config_file.version, config, section, environment) else: return config def process_config_file(config_file, environment, service_name=None, interpolate=True): services = process_config_section( config_file, config_file.get_service_dicts(), 'service', environment, interpolate, ) if config_file.version > V1: processed_config = dict(config_file.config) processed_config['services'] = services processed_config['volumes'] = process_config_section( config_file, config_file.get_volumes(), 'volume', environment, interpolate, ) processed_config['networks'] = process_config_section( config_file, config_file.get_networks(), 'network', environment, interpolate, ) processed_config['secrets'] = process_config_section( config_file, config_file.get_secrets(), 'secret', environment, interpolate, ) processed_config['configs'] = process_config_section( config_file, config_file.get_configs(), 'config', environment, interpolate, ) else: processed_config = services config_file = config_file._replace(config=processed_config) validate_against_config_schema(config_file, config_file.version) if service_name and service_name not in services: raise ConfigurationError( "Cannot extend service '{}' in {}: Service not found".format( service_name, config_file.filename)) return config_file class ServiceExtendsResolver: def __init__(self, service_config, config_file, environment, already_seen=None): self.service_config = service_config self.working_dir = service_config.working_dir self.already_seen = already_seen or [] self.config_file = config_file self.environment = environment @property def signature(self): return self.service_config.filename, self.service_config.name def detect_cycle(self): if self.signature in self.already_seen: raise CircularReference(self.already_seen + [self.signature]) def run(self): self.detect_cycle() if 'extends' in self.service_config.config: service_dict = self.resolve_extends(*self.validate_and_construct_extends()) return self.service_config._replace(config=service_dict) return self.service_config def validate_and_construct_extends(self): extends = self.service_config.config['extends'] if not isinstance(extends, dict): extends = {'service': extends} config_path = self.get_extended_config_path(extends) service_name = extends['service'] if config_path == os.path.abspath(self.config_file.filename): try: service_config = self.config_file.get_service(service_name) except KeyError: raise ConfigurationError( "Cannot extend service '{}' in {}: Service not found".format( service_name, config_path) ) else: extends_file = ConfigFile.from_filename(config_path) validate_config_version([self.config_file, extends_file]) extended_file = process_config_file( extends_file, self.environment, service_name=service_name ) service_config = extended_file.get_service(service_name) return config_path, service_config, service_name def resolve_extends(self, extended_config_path, service_dict, service_name): resolver = ServiceExtendsResolver( ServiceConfig.with_abs_paths( os.path.dirname(extended_config_path), extended_config_path, service_name, service_dict), self.config_file, already_seen=self.already_seen + [self.signature], environment=self.environment ) service_config = resolver.run() other_service_dict = process_service(service_config) validate_extended_service_dict( other_service_dict, extended_config_path, service_name) return merge_service_dicts( other_service_dict, self.service_config.config, self.config_file.version) def get_extended_config_path(self, extends_options): """Service we are extending either has a value for 'file' set, which we need to obtain a full path too or we are extending from a service defined in our own file. """ filename = self.service_config.filename validate_extends_file_path( self.service_config.name, extends_options, filename) if 'file' in extends_options: return expand_path(self.working_dir, extends_options['file']) return filename def resolve_environment(service_dict, environment=None, interpolate=True): """Unpack any environment variables from an env_file, if set. Interpolate environment values if set. """ env = {} for env_file in service_dict.get('env_file', []): env.update(env_vars_from_file(env_file, interpolate)) env.update(parse_environment(service_dict.get('environment'))) return dict(resolve_env_var(k, v, environment) for k, v in env.items()) def resolve_build_args(buildargs, environment): args = parse_build_arguments(buildargs) return dict(resolve_env_var(k, v, environment) for k, v in args.items()) def validate_extended_service_dict(service_dict, filename, service): error_prefix = "Cannot extend service '{}' in {}:".format(service, filename) if 'links' in service_dict: raise ConfigurationError( "%s services with 'links' cannot be extended" % error_prefix) if 'volumes_from' in service_dict: raise ConfigurationError( "%s services with 'volumes_from' cannot be extended" % error_prefix) if 'net' in service_dict: if get_container_name_from_network_mode(service_dict['net']): raise ConfigurationError( "%s services with 'net: container' cannot be extended" % error_prefix) if 'network_mode' in service_dict: if get_service_name_from_network_mode(service_dict['network_mode']): raise ConfigurationError( "%s services with 'network_mode: service' cannot be extended" % error_prefix) if 'depends_on' in service_dict: raise ConfigurationError( "%s services with 'depends_on' cannot be extended" % error_prefix) def validate_service(service_config, service_names, config_file): def build_image(): args = sys.argv[1:] if 'pull' in args: return False if '--no-build' in args: return False return True service_dict, service_name = service_config.config, service_config.name validate_service_constraints(service_dict, service_name, config_file) if build_image(): # We only care about valid paths when actually building images validate_paths(service_dict) validate_cpu(service_config) validate_ulimits(service_config) validate_ipc_mode(service_config, service_names) validate_network_mode(service_config, service_names) validate_pid_mode(service_config, service_names) validate_depends_on(service_config, service_names) validate_links(service_config, service_names) validate_healthcheck(service_config) validate_credential_spec(service_config) if not service_dict.get('image') and has_uppercase(service_name): raise ConfigurationError( "Service '{name}' contains uppercase characters which are not valid " "as part of an image name. Either use a lowercase service name or " "use the `image` field to set a custom name for the service image." .format(name=service_name)) def process_service(service_config): working_dir = service_config.working_dir service_dict = dict(service_config.config) if 'env_file' in service_dict: service_dict['env_file'] = [ expand_path(working_dir, path) for path in to_list(service_dict['env_file']) ] if 'build' in service_dict: process_build_section(service_dict, working_dir) if 'volumes' in service_dict and service_dict.get('volume_driver') is None: service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict) if 'sysctls' in service_dict: service_dict['sysctls'] = build_string_dict(parse_sysctls(service_dict['sysctls'])) if 'labels' in service_dict: service_dict['labels'] = parse_labels(service_dict['labels']) service_dict = process_depends_on(service_dict) for field in ['dns', 'dns_search', 'tmpfs']: if field in service_dict: service_dict[field] = to_list(service_dict[field]) service_dict = process_security_opt(process_blkio_config(process_ports( process_healthcheck(service_dict) ))) return service_dict def process_build_section(service_dict, working_dir): if isinstance(service_dict['build'], str): service_dict['build'] = resolve_build_path(working_dir, service_dict['build']) elif isinstance(service_dict['build'], dict): if 'context' in service_dict['build']: path = service_dict['build']['context'] service_dict['build']['context'] = resolve_build_path(working_dir, path) if 'labels' in service_dict['build']: service_dict['build']['labels'] = parse_labels(service_dict['build']['labels']) def process_ports(service_dict): if 'ports' not in service_dict: return service_dict ports = [] for port_definition in service_dict['ports']: if isinstance(port_definition, ServicePort): ports.append(port_definition) else: ports.extend(ServicePort.parse(port_definition)) service_dict['ports'] = ports return service_dict def process_depends_on(service_dict): if 'depends_on' in service_dict and not isinstance(service_dict['depends_on'], dict): service_dict['depends_on'] = { svc: {'condition': 'service_started'} for svc in service_dict['depends_on'] } return service_dict def process_blkio_config(service_dict): if not service_dict.get('blkio_config'): return service_dict for field in ['device_read_bps', 'device_write_bps']: if field in service_dict['blkio_config']: for v in service_dict['blkio_config'].get(field, []): rate = v.get('rate', 0) v['rate'] = parse_bytes(rate) if v['rate'] is None: raise ConfigurationError('Invalid format for bytes value: "{}"'.format(rate)) for field in ['device_read_iops', 'device_write_iops']: if field in service_dict['blkio_config']: for v in service_dict['blkio_config'].get(field, []): try: v['rate'] = int(v.get('rate', 0)) except ValueError: raise ConfigurationError( 'Invalid IOPS value: "{}". Must be a positive integer.'.format(v.get('rate')) ) return service_dict def process_healthcheck(service_dict): if 'healthcheck' not in service_dict: return service_dict hc = service_dict['healthcheck'] if 'disable' in hc: del hc['disable'] hc['test'] = ['NONE'] for field in ['interval', 'timeout', 'start_period']: if field not in hc or isinstance(hc[field], int): continue hc[field] = parse_nanoseconds_int(hc[field]) return service_dict def finalize_service_volumes(service_dict, environment): if 'volumes' in service_dict: finalized_volumes = [] normalize = environment.get_boolean('COMPOSE_CONVERT_WINDOWS_PATHS') win_host = environment.get_boolean('COMPOSE_FORCE_WINDOWS_HOST') for v in service_dict['volumes']: if isinstance(v, dict): finalized_volumes.append(MountSpec.parse(v, normalize, win_host)) else: finalized_volumes.append(VolumeSpec.parse(v, normalize, win_host)) duplicate_mounts = [] mounts = [v.as_volume_spec() if isinstance(v, MountSpec) else v for v in finalized_volumes] for mount in mounts: if list(map(attrgetter('internal'), mounts)).count(mount.internal) > 1: duplicate_mounts.append(mount.repr()) if duplicate_mounts: raise ConfigurationError("Duplicate mount points: [%s]" % ( ', '.join(duplicate_mounts))) service_dict['volumes'] = finalized_volumes return service_dict def finalize_service(service_config, service_names, version, environment, interpolate=True): service_dict = dict(service_config.config) if 'environment' in service_dict or 'env_file' in service_dict: service_dict['environment'] = resolve_environment(service_dict, environment, interpolate) service_dict.pop('env_file', None) if 'volumes_from' in service_dict: service_dict['volumes_from'] = [ VolumeFromSpec.parse(vf, service_names, version) for vf in service_dict['volumes_from'] ] service_dict = finalize_service_volumes(service_dict, environment) if 'net' in service_dict: network_mode = service_dict.pop('net') container_name = get_container_name_from_network_mode(network_mode) if container_name and container_name in service_names: service_dict['network_mode'] = 'service:{}'.format(container_name) else: service_dict['network_mode'] = network_mode if 'networks' in service_dict: service_dict['networks'] = parse_networks(service_dict['networks']) if 'restart' in service_dict: service_dict['restart'] = parse_restart_spec(service_dict['restart']) if 'secrets' in service_dict: service_dict['secrets'] = [ types.ServiceSecret.parse(s) for s in service_dict['secrets'] ] if 'configs' in service_dict: service_dict['configs'] = [ types.ServiceConfig.parse(c) for c in service_dict['configs'] ] normalize_build(service_dict, service_config.working_dir, environment) service_dict['name'] = service_config.name return normalize_v1_service_format(service_dict) def normalize_v1_service_format(service_dict): if 'log_driver' in service_dict or 'log_opt' in service_dict: if 'logging' not in service_dict: service_dict['logging'] = {} if 'log_driver' in service_dict: service_dict['logging']['driver'] = service_dict['log_driver'] del service_dict['log_driver'] if 'log_opt' in service_dict: service_dict['logging']['options'] = service_dict['log_opt'] del service_dict['log_opt'] if 'dockerfile' in service_dict: service_dict['build'] = service_dict.get('build', {}) service_dict['build'].update({ 'dockerfile': service_dict.pop('dockerfile') }) return service_dict def merge_service_dicts_from_files(base, override, version): """When merging services from multiple files we need to merge the `extends` field. This is not handled by `merge_service_dicts()` which is used to perform the `extends`. """ new_service = merge_service_dicts(base, override, version) if 'extends' in override: new_service['extends'] = override['extends'] elif 'extends' in base: new_service['extends'] = base['extends'] return new_service class MergeDict(dict): """A dict-like object responsible for merging two dicts into one.""" def __init__(self, base, override): self.base = base self.override = override def needs_merge(self, field): return field in self.base or field in self.override def merge_field(self, field, merge_func, default=None): if not self.needs_merge(field): return self[field] = merge_func( self.base.get(field, default), self.override.get(field, default)) def merge_mapping(self, field, parse_func=None): if not self.needs_merge(field): return if parse_func is None: def parse_func(m): return m or {} self[field] = parse_func(self.base.get(field)) self[field].update(parse_func(self.override.get(field))) def merge_sequence(self, field, parse_func): def parse_sequence_func(seq): return to_mapping((parse_func(item) for item in seq), 'merge_field') if not self.needs_merge(field): return merged = parse_sequence_func(self.base.get(field, [])) merged.update(parse_sequence_func(self.override.get(field, []))) self[field] = [item.repr() for item in sorted(merged.values())] def merge_scalar(self, field): if self.needs_merge(field): self[field] = self.override.get(field, self.base.get(field)) def merge_service_dicts(base, override, version): md = MergeDict(base, override) md.merge_mapping('environment', parse_environment) md.merge_mapping('labels', parse_labels) md.merge_mapping('ulimits', parse_flat_dict) md.merge_mapping('sysctls', parse_sysctls) md.merge_mapping('depends_on', parse_depends_on) md.merge_mapping('storage_opt', parse_flat_dict) md.merge_sequence('links', ServiceLink.parse) md.merge_sequence('secrets', types.ServiceSecret.parse) md.merge_sequence('configs', types.ServiceConfig.parse) md.merge_sequence('security_opt', types.SecurityOpt.parse) md.merge_mapping('extra_hosts', parse_extra_hosts) md.merge_field('networks', merge_networks, default={}) for field in ['volumes', 'devices']: md.merge_field(field, merge_path_mappings) for field in [ 'cap_add', 'cap_drop', 'expose', 'external_links', 'volumes_from', 'device_cgroup_rules', 'profiles', ]: md.merge_field(field, merge_unique_items_lists, default=[]) for field in ['dns', 'dns_search', 'env_file', 'tmpfs']: md.merge_field(field, merge_list_or_string) md.merge_field('logging', merge_logging, default={}) merge_ports(md, base, override) md.merge_field('blkio_config', merge_blkio_config, default={}) md.merge_field('healthcheck', merge_healthchecks, default={}) md.merge_field('deploy', merge_deploy, default={}) for field in set(ALLOWED_KEYS) - set(md): md.merge_scalar(field) if version == V1: legacy_v1_merge_image_or_build(md, base, override) elif md.needs_merge('build'): md['build'] = merge_build(md, base, override) return dict(md) def merge_unique_items_lists(base, override): override = (str(o) for o in override) base = (str(b) for b in base) return sorted(set(chain(base, override))) def merge_healthchecks(base, override): if override.get('disabled') is True: return override result = base.copy() result.update(override) return result def merge_ports(md, base, override): def parse_sequence_func(seq): acc = [s for item in seq for s in ServicePort.parse(item)] return to_mapping(acc, 'merge_field') field = 'ports' if not md.needs_merge(field): return merged = parse_sequence_func(md.base.get(field, [])) merged.update(parse_sequence_func(md.override.get(field, []))) md[field] = [item for item in sorted(merged.values(), key=attrgetter("target"))] def merge_build(output, base, override): def to_dict(service): build_config = service.get('build', {}) if isinstance(build_config, str): return {'context': build_config} return build_config md = MergeDict(to_dict(base), to_dict(override)) md.merge_scalar('context') md.merge_scalar('dockerfile') md.merge_scalar('network') md.merge_scalar('target') md.merge_scalar('shm_size') md.merge_scalar('isolation') md.merge_mapping('args', parse_build_arguments) md.merge_field('cache_from', merge_unique_items_lists, default=[]) md.merge_mapping('labels', parse_labels) md.merge_mapping('extra_hosts', parse_extra_hosts) return dict(md) def merge_deploy(base, override): md = MergeDict(base or {}, override or {}) md.merge_scalar('mode') md.merge_scalar('endpoint_mode') md.merge_scalar('replicas') md.merge_mapping('labels', parse_labels) md.merge_mapping('update_config') md.merge_mapping('rollback_config') md.merge_mapping('restart_policy') if md.needs_merge('resources'): resources_md = MergeDict(md.base.get('resources') or {}, md.override.get('resources') or {}) resources_md.merge_mapping('limits') resources_md.merge_field('reservations', merge_reservations, default={}) md['resources'] = dict(resources_md) if md.needs_merge('placement'): placement_md = MergeDict(md.base.get('placement') or {}, md.override.get('placement') or {}) placement_md.merge_scalar('max_replicas_per_node') placement_md.merge_field('constraints', merge_unique_items_lists, default=[]) placement_md.merge_field('preferences', merge_unique_objects_lists, default=[]) md['placement'] = dict(placement_md) return dict(md) def merge_networks(base, override): merged_networks = {} all_network_names = set(base) | set(override) base = {k: {} for k in base} if isinstance(base, list) else base override = {k: {} for k in override} if isinstance(override, list) else override for network_name in all_network_names: md = MergeDict(base.get(network_name) or {}, override.get(network_name) or {}) md.merge_field('aliases', merge_unique_items_lists, []) md.merge_field('link_local_ips', merge_unique_items_lists, []) md.merge_scalar('priority') md.merge_scalar('ipv4_address') md.merge_scalar('ipv6_address') merged_networks[network_name] = dict(md) return merged_networks def merge_reservations(base, override): md = MergeDict(base, override) md.merge_scalar('cpus') md.merge_scalar('memory') md.merge_sequence('generic_resources', types.GenericResource.parse) md.merge_field('devices', merge_unique_objects_lists, default=[]) return dict(md) def merge_unique_objects_lists(base, override): result = {json_hash(i): i for i in base + override} return [i[1] for i in sorted(((k, v) for k, v in result.items()), key=itemgetter(0))] def merge_blkio_config(base, override): md = MergeDict(base, override) md.merge_scalar('weight') def merge_blkio_limits(base, override): get_path = itemgetter('path') index = {get_path(b): b for b in base} index.update((get_path(o), o) for o in override) return sorted(index.values(), key=get_path) for field in [ "device_read_bps", "device_read_iops", "device_write_bps", "device_write_iops", "weight_device", ]: md.merge_field(field, merge_blkio_limits, default=[]) return dict(md) def merge_logging(base, override): md = MergeDict(base, override) md.merge_scalar('driver') if md.get('driver') == base.get('driver') or base.get('driver') is None: md.merge_mapping('options', lambda m: m or {}) elif override.get('options'): md['options'] = override.get('options', {}) return dict(md) def legacy_v1_merge_image_or_build(output, base, override): output.pop('image', None) output.pop('build', None) if 'image' in override: output['image'] = override['image'] elif 'build' in override: output['build'] = override['build'] elif 'image' in base: output['image'] = base['image'] elif 'build' in base: output['build'] = base['build'] def merge_environment(base, override): env = parse_environment(base) env.update(parse_environment(override)) return env def merge_labels(base, override): labels = parse_labels(base) labels.update(parse_labels(override)) return labels def split_kv(kvpair): if '=' in kvpair: return kvpair.split('=', 1) else: return kvpair, '' def parse_dict_or_list(split_func, type_name, arguments): if not arguments: return {} if isinstance(arguments, list): return dict(split_func(e) for e in arguments) if isinstance(arguments, dict): return dict(arguments) raise ConfigurationError( "%s \"%s\" must be a list or mapping," % (type_name, arguments) ) parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments') parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment') parse_labels = functools.partial(parse_dict_or_list, split_kv, 'labels') parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks') parse_sysctls = functools.partial(parse_dict_or_list, split_kv, 'sysctls') parse_depends_on = functools.partial( parse_dict_or_list, lambda k: (k, {'condition': 'service_started'}), 'depends_on' ) def parse_flat_dict(d): if not d: return {} if isinstance(d, dict): return dict(d) raise ConfigurationError("Invalid type: expected mapping") def resolve_env_var(key, val, environment): if val is not None: return key, val elif environment and key in environment: return key, environment[key] else: return key, None def resolve_volume_paths(working_dir, service_dict): return [ resolve_volume_path(working_dir, volume) for volume in service_dict['volumes'] ] def resolve_volume_path(working_dir, volume): if isinstance(volume, dict): if volume.get('source', '').startswith(('.', '~')) and volume['type'] == 'bind': volume['source'] = expand_path(working_dir, volume['source']) return volume mount_params = None container_path, mount_params = split_path_mapping(volume) if mount_params is not None: host_path, mode = mount_params if host_path is None: return container_path if host_path.startswith('.'): host_path = expand_path(working_dir, host_path) host_path = os.path.expanduser(host_path) return "{}:{}{}".format(host_path, container_path, (':' + mode if mode else '')) return container_path def normalize_build(service_dict, working_dir, environment): if 'build' in service_dict: build = {} # Shortcut where specifying a string is treated as the build context if isinstance(service_dict['build'], str): build['context'] = service_dict.pop('build') else: build.update(service_dict['build']) if 'args' in build: build['args'] = build_string_dict( resolve_build_args(build.get('args'), environment) ) service_dict['build'] = build def resolve_build_path(working_dir, build_path): if is_url(build_path): return build_path return expand_path(working_dir, build_path) def is_url(build_path): return build_path.startswith(DOCKER_VALID_URL_PREFIXES) def validate_paths(service_dict): if 'build' in service_dict: build = service_dict.get('build', {}) if isinstance(build, str): build_path = build elif isinstance(build, dict) and 'context' in build: build_path = build['context'] else: # We have a build section but no context, so nothing to validate return if ( not is_url(build_path) and (not os.path.exists(build_path) or not os.access(build_path, os.R_OK)) ): raise ConfigurationError( "build path %s either does not exist, is not accessible, " "or is not a valid URL." % build_path) def merge_path_mappings(base, override): d = dict_from_path_mappings(base) d.update(dict_from_path_mappings(override)) return path_mappings_from_dict(d) def dict_from_path_mappings(path_mappings): if path_mappings: return dict(split_path_mapping(v) for v in path_mappings) else: return {} def path_mappings_from_dict(d): return [join_path_mapping(v) for v in sorted(d.items())] def split_path_mapping(volume_path): """ Ascertain if the volume_path contains a host path as well as a container path. Using splitdrive so windows absolute paths won't cause issues with splitting on ':'. """ if isinstance(volume_path, dict): return (volume_path.get('target'), volume_path) drive, volume_config = splitdrive(volume_path) if ':' in volume_config: (host, container) = volume_config.split(':', 1) container_drive, container_path = splitdrive(container) mode = None if ':' in container_path: container_path, mode = container_path.rsplit(':', 1) return (container_drive + container_path, (drive + host, mode)) else: return (volume_path, None) def process_security_opt(service_dict): security_opts = service_dict.get('security_opt', []) result = [] for value in security_opts: result.append(SecurityOpt.parse(value)) if result: service_dict['security_opt'] = result return service_dict def join_path_mapping(pair): (container, host) = pair if isinstance(host, dict): return host elif host is None: return container else: host, mode = host result = ":".join((host, container)) if mode: result += ":" + mode return result def expand_path(working_dir, path): return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path))) def merge_list_or_string(base, override): return to_list(base) + to_list(override) def to_list(value): if value is None: return [] elif isinstance(value, str): return [value] else: return value def to_mapping(sequence, key_field): return {getattr(item, key_field): item for item in sequence} def has_uppercase(name): return any(char in string.ascii_uppercase for char in name) def load_yaml(filename, encoding=None, binary=True): try: with open(filename, 'rb' if binary else 'r', encoding=encoding) as fh: return yaml.safe_load(fh) except (OSError, yaml.YAMLError, UnicodeDecodeError) as e: if encoding is None: # Sometimes the user's locale sets an encoding that doesn't match # the YAML files. Im such cases, retry once with the "default" # UTF-8 encoding return load_yaml(filename, encoding='utf-8-sig', binary=False) error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__ raise ConfigurationError("{}: {}".format(error_name, e)) compose-1.29.2/compose/config/config_schema_v1.json000066400000000000000000000133211404620552300222530ustar00rootroot00000000000000{ "$schema": "http://json-schema.org/draft-04/schema#", "id": "config_schema_v1.json", "type": "object", "patternProperties": { "^[a-zA-Z0-9._-]+$": { "$ref": "#/definitions/service" } }, "additionalProperties": false, "definitions": { "service": { "id": "#/definitions/service", "type": "object", "properties": { "build": {"type": "string"}, "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cgroup_parent": {"type": "string"}, "command": { "oneOf": [ {"type": "string"}, {"type": "array", "items": {"type": "string"}} ] }, "container_name": {"type": "string"}, "cpu_shares": {"type": ["number", "string"]}, "cpu_quota": {"type": ["number", "string"]}, "cpuset": {"type": "string"}, "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "dns": {"$ref": "#/definitions/string_or_list"}, "dns_search": {"$ref": "#/definitions/string_or_list"}, "dockerfile": {"type": "string"}, "domainname": {"type": "string"}, "entrypoint": { "oneOf": [ {"type": "string"}, {"type": "array", "items": {"type": "string"}} ] }, "env_file": {"$ref": "#/definitions/string_or_list"}, "environment": {"$ref": "#/definitions/list_or_dict"}, "expose": { "type": "array", "items": { "type": ["string", "number"], "format": "expose" }, "uniqueItems": true }, "extends": { "oneOf": [ { "type": "string" }, { "type": "object", "properties": { "service": {"type": "string"}, "file": {"type": "string"} }, "required": ["service"], "additionalProperties": false } ] }, "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "hostname": {"type": "string"}, "image": {"type": "string"}, "ipc": {"type": "string"}, "labels": {"$ref": "#/definitions/labels"}, "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "log_driver": {"type": "string"}, "log_opt": {"type": "object"}, "mac_address": {"type": "string"}, "mem_limit": {"type": ["number", "string"]}, "memswap_limit": {"type": ["number", "string"]}, "mem_swappiness": {"type": "integer"}, "net": {"type": "string"}, "pid": {"type": ["string", "null"]}, "ports": { "type": "array", "items": { "type": ["string", "number"], "format": "ports" }, "uniqueItems": true }, "privileged": {"type": "boolean"}, "read_only": {"type": "boolean"}, "restart": {"type": "string"}, "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "shm_size": {"type": ["number", "string"]}, "stdin_open": {"type": "boolean"}, "stop_signal": {"type": "string"}, "tty": {"type": "boolean"}, "ulimits": { "type": "object", "patternProperties": { "^[a-z]+$": { "oneOf": [ {"type": "integer"}, { "type":"object", "properties": { "hard": {"type": "integer"}, "soft": {"type": "integer"} }, "required": ["soft", "hard"], "additionalProperties": false } ] } } }, "user": {"type": "string"}, "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "volume_driver": {"type": "string"}, "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "working_dir": {"type": "string"} }, "dependencies": { "memswap_limit": ["mem_limit"] }, "additionalProperties": false }, "string_or_list": { "oneOf": [ {"type": "string"}, {"$ref": "#/definitions/list_of_strings"} ] }, "list_of_strings": { "type": "array", "items": {"type": "string"}, "uniqueItems": true }, "list_or_dict": { "oneOf": [ { "type": "object", "patternProperties": { ".+": { "type": ["string", "number", "null"] } }, "additionalProperties": false }, {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, "labels": { "oneOf": [ { "type": "object", "patternProperties": { ".+": { "type": "string" } }, "additionalProperties": false }, {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, "constraints": { "service": { "id": "#/definitions/constraints/service", "anyOf": [ { "required": ["build"], "not": {"required": ["image"]} }, { "required": ["image"], "not": {"anyOf": [ {"required": ["build"]}, {"required": ["dockerfile"]} ]} } ] } } } } compose-1.29.2/compose/config/environment.py000066400000000000000000000074311404620552300211100ustar00rootroot00000000000000import logging import os import re import dotenv from ..const import IS_WINDOWS_PLATFORM from .errors import ConfigurationError from .errors import EnvFileNotFound log = logging.getLogger(__name__) def split_env(env): if isinstance(env, bytes): env = env.decode('utf-8', 'replace') key = value = None if '=' in env: key, value = env.split('=', 1) else: key = env if re.search(r'\s', key): raise ConfigurationError( "environment variable name '{}' may not contain whitespace.".format(key) ) return key, value def env_vars_from_file(filename, interpolate=True): """ Read in a line delimited file of environment variables. """ if not os.path.exists(filename): raise EnvFileNotFound("Couldn't find env file: {}".format(filename)) elif not os.path.isfile(filename): raise EnvFileNotFound("{} is not a file.".format(filename)) env = dotenv.dotenv_values(dotenv_path=filename, encoding='utf-8-sig', interpolate=interpolate) for k, v in env.items(): env[k] = v if interpolate else v.replace('$', '$$') return env class Environment(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.missing_keys = [] self.silent = False @classmethod def from_env_file(cls, base_dir, env_file=None): def _initialize(): result = cls() if base_dir is None: return result if env_file: env_file_path = os.path.join(os.getcwd(), env_file) return cls(env_vars_from_file(env_file_path)) env_file_path = os.path.join(base_dir, '.env') try: return cls(env_vars_from_file(env_file_path)) except EnvFileNotFound: pass return result instance = _initialize() instance.update(os.environ) return instance @classmethod def from_command_line(cls, parsed_env_opts): result = cls() for k, v in parsed_env_opts.items(): # Values from the command line take priority, unless they're unset # in which case they take the value from the system's environment if v is None and k in os.environ: result[k] = os.environ[k] else: result[k] = v return result def __getitem__(self, key): try: return super().__getitem__(key) except KeyError: if IS_WINDOWS_PLATFORM: try: return super().__getitem__(key.upper()) except KeyError: pass if not self.silent and key not in self.missing_keys: log.warning( "The {} variable is not set. Defaulting to a blank string." .format(key) ) self.missing_keys.append(key) return "" def __contains__(self, key): result = super().__contains__(key) if IS_WINDOWS_PLATFORM: return ( result or super().__contains__(key.upper()) ) return result def get(self, key, *args, **kwargs): if IS_WINDOWS_PLATFORM: return super().get( key, super().get(key.upper(), *args, **kwargs) ) return super().get(key, *args, **kwargs) def get_boolean(self, key, default=False): # Convert a value to a boolean using "common sense" rules. # Unset, empty, "0" and "false" (i-case) yield False. # All other values yield True. value = self.get(key) if not value: return default if value.lower() in ['0', 'false']: return False return True compose-1.29.2/compose/config/errors.py000066400000000000000000000032751404620552300200620ustar00rootroot00000000000000VERSION_EXPLANATION = ( 'You might be seeing this error because you\'re using the wrong Compose file version. ' 'Either specify a supported version (e.g "2.2" or "3.3") and place ' 'your service definitions under the `services` key, or omit the `version` key ' 'and place your service definitions at the root of the file to use ' 'version 1.\nFor more on the Compose file format versions, see ' 'https://docs.docker.com/compose/compose-file/') class ConfigurationError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class EnvFileNotFound(ConfigurationError): pass class DependencyError(ConfigurationError): pass class CircularReference(ConfigurationError): def __init__(self, trail): self.trail = trail @property def msg(self): lines = [ "{} in {}".format(service_name, filename) for (filename, service_name) in self.trail ] return "Circular reference:\n {}".format("\n extends ".join(lines)) class ComposeFileNotFound(ConfigurationError): def __init__(self, supported_filenames): super().__init__(""" Can't find a suitable configuration file in this directory or any parent. Are you in the right directory? Supported filenames: %s """ % ", ".join(supported_filenames)) class DuplicateOverrideFileFound(ConfigurationError): def __init__(self, override_filenames): self.override_filenames = override_filenames super().__init__( "Multiple override files found: {}. You may only use a single " "override file.".format(", ".join(override_filenames)) ) compose-1.29.2/compose/config/interpolation.py000066400000000000000000000235051404620552300214330ustar00rootroot00000000000000import logging import re from string import Template from .errors import ConfigurationError from compose.const import COMPOSEFILE_V1 as V1 from compose.utils import parse_bytes from compose.utils import parse_nanoseconds_int log = logging.getLogger(__name__) class Interpolator: def __init__(self, templater, mapping): self.templater = templater self.mapping = mapping def interpolate(self, string): try: return self.templater(string).substitute(self.mapping) except ValueError: raise InvalidInterpolation(string) def interpolate_environment_variables(version, config, section, environment): if version == V1: interpolator = Interpolator(Template, environment) else: interpolator = Interpolator(TemplateWithDefaults, environment) def process_item(name, config_dict): return { key: interpolate_value(name, key, val, section, interpolator) for key, val in (config_dict or {}).items() } return { name: process_item(name, config_dict or {}) for name, config_dict in config.items() } def get_config_path(config_key, section, name): return '{}/{}/{}'.format(section, name, config_key) def interpolate_value(name, config_key, value, section, interpolator): try: return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name)) except InvalidInterpolation as e: raise ConfigurationError( 'Invalid interpolation format for "{config_key}" option ' 'in {section} "{name}": "{string}"'.format( config_key=config_key, name=name, section=section, string=e.string)) except UnsetRequiredSubstitution as e: raise ConfigurationError( 'Missing mandatory value for "{config_key}" option interpolating {value} ' 'in {section} "{name}": {err}'.format(config_key=config_key, value=value, name=name, section=section, err=e.err) ) def recursive_interpolate(obj, interpolator, config_path): def append(config_path, key): return '{}/{}'.format(config_path, key) if isinstance(obj, str): return converter.convert(config_path, interpolator.interpolate(obj)) if isinstance(obj, dict): return { key: recursive_interpolate(val, interpolator, append(config_path, key)) for key, val in obj.items() } if isinstance(obj, list): return [recursive_interpolate(val, interpolator, config_path) for val in obj] return converter.convert(config_path, obj) class TemplateWithDefaults(Template): pattern = r""" {delim}(?: (?P{delim}) | (?P{id}) | {{(?P{bid})}} | (?P) ) """.format( delim=re.escape('$'), id=r'[_a-z][_a-z0-9]*', bid=r'[_a-z][_a-z0-9]*(?:(?P:?[-?])[^}]*)?', ) @staticmethod def process_braced_group(braced, sep, mapping): if ':-' == sep: var, _, default = braced.partition(':-') return mapping.get(var) or default elif '-' == sep: var, _, default = braced.partition('-') return mapping.get(var, default) elif ':?' == sep: var, _, err = braced.partition(':?') result = mapping.get(var) if not result: err = err or var raise UnsetRequiredSubstitution(err) return result elif '?' == sep: var, _, err = braced.partition('?') if var in mapping: return mapping.get(var) err = err or var raise UnsetRequiredSubstitution(err) # Modified from python2.7/string.py def substitute(self, mapping): # Helper function for .sub() def convert(mo): named = mo.group('named') or mo.group('braced') braced = mo.group('braced') if braced is not None: sep = mo.group('sep') if sep: return self.process_braced_group(braced, sep, mapping) if named is not None: val = mapping[named] if isinstance(val, bytes): val = val.decode('utf-8') return '{}'.format(val) if mo.group('escaped') is not None: return self.delimiter if mo.group('invalid') is not None: self._invalid(mo) raise ValueError('Unrecognized named group in pattern', self.pattern) return self.pattern.sub(convert, self.template) class InvalidInterpolation(Exception): def __init__(self, string): self.string = string class UnsetRequiredSubstitution(Exception): def __init__(self, custom_err_msg): self.err = custom_err_msg PATH_JOKER = '[^/]+' FULL_JOKER = '.+' def re_path(*args): return re.compile('^{}$'.format('/'.join(args))) def re_path_basic(section, name): return re_path(section, PATH_JOKER, name) def service_path(*args): return re_path('service', PATH_JOKER, *args) def to_boolean(s): if not isinstance(s, str): return s s = s.lower() if s in ['y', 'yes', 'true', 'on']: return True elif s in ['n', 'no', 'false', 'off']: return False raise ValueError('"{}" is not a valid boolean value'.format(s)) def to_int(s): if not isinstance(s, str): return s # We must be able to handle octal representation for `mode` values notably if re.match('^0[0-9]+$', s.strip()): s = '0o' + s[1:] try: return int(s, base=0) except ValueError: raise ValueError('"{}" is not a valid integer'.format(s)) def to_float(s): if not isinstance(s, str): return s try: return float(s) except ValueError: raise ValueError('"{}" is not a valid float'.format(s)) def to_str(o): if isinstance(o, (bool, float, int)): return '{}'.format(o) return o def bytes_to_int(s): v = parse_bytes(s) if v is None: raise ValueError('"{}" is not a valid byte value'.format(s)) return v def to_microseconds(v): if not isinstance(v, str): return v return int(parse_nanoseconds_int(v) / 1000) class ConversionMap: map = { service_path('blkio_config', 'weight'): to_int, service_path('blkio_config', 'weight_device', 'weight'): to_int, service_path('build', 'labels', FULL_JOKER): to_str, service_path('cpus'): to_float, service_path('cpu_count'): to_int, service_path('cpu_quota'): to_microseconds, service_path('cpu_period'): to_microseconds, service_path('cpu_rt_period'): to_microseconds, service_path('cpu_rt_runtime'): to_microseconds, service_path('configs', 'mode'): to_int, service_path('secrets', 'mode'): to_int, service_path('healthcheck', 'retries'): to_int, service_path('healthcheck', 'disable'): to_boolean, service_path('deploy', 'labels', PATH_JOKER): to_str, service_path('deploy', 'replicas'): to_int, service_path('deploy', 'placement', 'max_replicas_per_node'): to_int, service_path('deploy', 'resources', 'limits', "cpus"): to_float, service_path('deploy', 'update_config', 'parallelism'): to_int, service_path('deploy', 'update_config', 'max_failure_ratio'): to_float, service_path('deploy', 'rollback_config', 'parallelism'): to_int, service_path('deploy', 'rollback_config', 'max_failure_ratio'): to_float, service_path('deploy', 'restart_policy', 'max_attempts'): to_int, service_path('mem_swappiness'): to_int, service_path('labels', FULL_JOKER): to_str, service_path('oom_kill_disable'): to_boolean, service_path('oom_score_adj'): to_int, service_path('ports', 'target'): to_int, service_path('ports', 'published'): to_int, service_path('scale'): to_int, service_path('ulimits', PATH_JOKER): to_int, service_path('ulimits', PATH_JOKER, 'soft'): to_int, service_path('ulimits', PATH_JOKER, 'hard'): to_int, service_path('privileged'): to_boolean, service_path('read_only'): to_boolean, service_path('stdin_open'): to_boolean, service_path('tty'): to_boolean, service_path('volumes', 'read_only'): to_boolean, service_path('volumes', 'volume', 'nocopy'): to_boolean, service_path('volumes', 'tmpfs', 'size'): bytes_to_int, re_path_basic('network', 'attachable'): to_boolean, re_path_basic('network', 'external'): to_boolean, re_path_basic('network', 'internal'): to_boolean, re_path('network', PATH_JOKER, 'labels', FULL_JOKER): to_str, re_path_basic('volume', 'external'): to_boolean, re_path('volume', PATH_JOKER, 'labels', FULL_JOKER): to_str, re_path_basic('secret', 'external'): to_boolean, re_path('secret', PATH_JOKER, 'labels', FULL_JOKER): to_str, re_path_basic('config', 'external'): to_boolean, re_path('config', PATH_JOKER, 'labels', FULL_JOKER): to_str, } def convert(self, path, value): for rexp in self.map.keys(): if rexp.match(path): try: return self.map[rexp](value) except ValueError as e: raise ConfigurationError( 'Error while attempting to convert {} to appropriate type: {}'.format( path.replace('/', '.'), e ) ) return value converter = ConversionMap() compose-1.29.2/compose/config/serialize.py000066400000000000000000000117451404620552300205360ustar00rootroot00000000000000import yaml from compose.config import types from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 def serialize_config_type(dumper, data): representer = dumper.represent_str return representer(data.repr()) def serialize_dict_type(dumper, data): return dumper.represent_dict(data.repr()) def serialize_string(dumper, data): """ Ensure boolean-like strings are quoted in the output """ representer = dumper.represent_str if isinstance(data, bytes): data = data.decode('utf-8') if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'): # Empirically only y/n appears to be an issue, but this might change # depending on which PyYaml version is being used. Err on safe side. return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"') return representer(data) def serialize_string_escape_dollar(dumper, data): """ Ensure boolean-like strings are quoted in the output and escape $ characters """ data = data.replace('$', '$$') return serialize_string(dumper, data) yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type) yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type) yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type) yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type) yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type) yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type) yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type) def denormalize_config(config, image_digests=None): result = {'version': str(config.config_version)} denormalized_services = [ denormalize_service_dict( service_dict, config.version, image_digests[service_dict['name']] if image_digests else None) for service_dict in config.services ] result['services'] = { service_dict.pop('name'): service_dict for service_dict in denormalized_services } for key in ('networks', 'volumes', 'secrets', 'configs'): config_dict = getattr(config, key) if not config_dict: continue result[key] = config_dict.copy() for name, conf in result[key].items(): if 'external_name' in conf: del conf['external_name'] if 'name' in conf: if 'external' in conf: conf['external'] = bool(conf['external']) return result def serialize_config(config, image_digests=None, escape_dollar=True): if escape_dollar: yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar) yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar) else: yaml.SafeDumper.add_representer(str, serialize_string) yaml.SafeDumper.add_representer(str, serialize_string) return yaml.safe_dump( denormalize_config(config, image_digests), default_flow_style=False, indent=2, width=80, allow_unicode=True ) def serialize_ns_time_value(value): result = (value, 'ns') table = [ (1000., 'us'), (1000., 'ms'), (1000., 's'), (60., 'm'), (60., 'h') ] for stage in table: tmp = value / stage[0] if tmp == int(value / stage[0]): value = tmp result = (int(value), stage[1]) else: break return '{}{}'.format(*result) def denormalize_service_dict(service_dict, version, image_digest=None): service_dict = service_dict.copy() if image_digest: service_dict['image'] = image_digest if 'restart' in service_dict: service_dict['restart'] = types.serialize_restart_spec( service_dict['restart'] ) if version == V1 and 'network_mode' not in service_dict: service_dict['network_mode'] = 'bridge' if 'healthcheck' in service_dict: if 'interval' in service_dict['healthcheck']: service_dict['healthcheck']['interval'] = serialize_ns_time_value( service_dict['healthcheck']['interval'] ) if 'timeout' in service_dict['healthcheck']: service_dict['healthcheck']['timeout'] = serialize_ns_time_value( service_dict['healthcheck']['timeout'] ) if 'start_period' in service_dict['healthcheck']: service_dict['healthcheck']['start_period'] = serialize_ns_time_value( service_dict['healthcheck']['start_period'] ) if 'ports' in service_dict: service_dict['ports'] = [ p.legacy_repr() if p.external_ip or version < VERSION else p for p in service_dict['ports'] ] if 'volumes' in service_dict and (version == V1): service_dict['volumes'] = [ v.legacy_repr() if isinstance(v, types.MountSpec) else v for v in service_dict['volumes'] ] return service_dict compose-1.29.2/compose/config/sort_services.py000066400000000000000000000046671404620552300214460ustar00rootroot00000000000000from compose.config.errors import DependencyError def get_service_name_from_network_mode(network_mode): return get_source_name_from_network_mode(network_mode, 'service') def get_container_name_from_network_mode(network_mode): return get_source_name_from_network_mode(network_mode, 'container') def get_source_name_from_network_mode(network_mode, source_type): if not network_mode: return if not network_mode.startswith(source_type+':'): return _, net_name = network_mode.split(':', 1) return net_name def get_service_names(links): return [link.split(':', 1)[0] for link in links] def get_service_names_from_volumes_from(volumes_from): return [volume_from.source for volume_from in volumes_from] def get_service_dependents(service_dict, services): name = service_dict['name'] return [ service for service in services if (name in get_service_names(service.get('links', [])) or name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or name == get_service_name_from_network_mode(service.get('network_mode')) or name == get_service_name_from_network_mode(service.get('pid')) or name == get_service_name_from_network_mode(service.get('ipc')) or name in service.get('depends_on', [])) ] def sort_service_dicts(services): # Topological sort (Cormen/Tarjan algorithm). unmarked = services[:] temporary_marked = set() sorted_services = [] def visit(n): if n['name'] in temporary_marked: if n['name'] in get_service_names(n.get('links', [])): raise DependencyError('A service can not link to itself: %s' % n['name']) if n['name'] in n.get('volumes_from', []): raise DependencyError('A service can not mount itself as volume: %s' % n['name']) if n['name'] in n.get('depends_on', []): raise DependencyError('A service can not depend on itself: %s' % n['name']) raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked)) if n in unmarked: temporary_marked.add(n['name']) for m in get_service_dependents(n, services): visit(m) temporary_marked.remove(n['name']) unmarked.remove(n) sorted_services.insert(0, n) while unmarked: visit(unmarked[-1]) return sorted_services compose-1.29.2/compose/config/types.py000066400000000000000000000367051404620552300177160ustar00rootroot00000000000000""" Types for objects parsed from the configuration. """ import json import ntpath import os import re from collections import namedtuple from docker.utils.ports import build_port_bindings from ..const import COMPOSEFILE_V1 as V1 from ..utils import unquote_path from .errors import ConfigurationError from compose.const import IS_WINDOWS_PLATFORM from compose.utils import splitdrive win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*') class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')): # TODO: drop service_names arg when v1 is removed @classmethod def parse(cls, volume_from_config, service_names, version): func = cls.parse_v1 if version == V1 else cls.parse_v2 return func(service_names, volume_from_config) @classmethod def parse_v1(cls, service_names, volume_from_config): parts = volume_from_config.split(':') if len(parts) > 2: raise ConfigurationError( "volume_from {} has incorrect format, should be " "service[:mode]".format(volume_from_config)) if len(parts) == 1: source = parts[0] mode = 'rw' else: source, mode = parts type = 'service' if source in service_names else 'container' return cls(source, mode, type) @classmethod def parse_v2(cls, service_names, volume_from_config): parts = volume_from_config.split(':') if len(parts) > 3: raise ConfigurationError( "volume_from {} has incorrect format, should be one of " "'[:]' or " "'container:[:]'".format(volume_from_config)) if len(parts) == 1: source = parts[0] return cls(source, 'rw', 'service') if len(parts) == 2: if parts[0] == 'container': type, source = parts return cls(source, 'rw', type) source, mode = parts return cls(source, mode, 'service') if len(parts) == 3: type, source, mode = parts if type not in ('service', 'container'): raise ConfigurationError( "Unknown volumes_from type '{}' in '{}'".format( type, volume_from_config)) return cls(source, mode, type) def repr(self): return '{v.type}:{v.source}:{v.mode}'.format(v=self) def parse_restart_spec(restart_config): if not restart_config: return None parts = restart_config.split(':') if len(parts) > 2: raise ConfigurationError( "Restart %s has incorrect format, should be " "mode[:max_retry]" % restart_config) if len(parts) == 2: name, max_retry_count = parts else: name, = parts max_retry_count = 0 return {'Name': name, 'MaximumRetryCount': int(max_retry_count)} def serialize_restart_spec(restart_spec): if not restart_spec: return '' parts = [restart_spec['Name']] if restart_spec['MaximumRetryCount']: parts.append(str(restart_spec['MaximumRetryCount'])) return ':'.join(parts) def parse_extra_hosts(extra_hosts_config): if not extra_hosts_config: return {} if isinstance(extra_hosts_config, dict): return dict(extra_hosts_config) if isinstance(extra_hosts_config, list): extra_hosts_dict = {} for extra_hosts_line in extra_hosts_config: # TODO: validate string contains ':' ? host, ip = extra_hosts_line.split(':', 1) extra_hosts_dict[host.strip()] = ip.strip() return extra_hosts_dict def normalize_path_for_engine(path): """Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with the Engine. Volume paths are expected to be linux style /c/my/path/shiny/ """ drive, tail = splitdrive(path) if drive: path = '/' + drive.lower().rstrip(':') + tail return path.replace('\\', '/') def normpath(path, win_host=False): """ Custom path normalizer that handles Compose-specific edge cases like UNIX paths on Windows hosts and vice-versa. """ sysnorm = ntpath.normpath if win_host else os.path.normpath # If a path looks like a UNIX absolute path on Windows, it probably is; # we'll need to revert the backslashes to forward slashes after normalization flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM path = sysnorm(path) if flip_slashes: path = path.replace('\\', '/') return path class MountSpec: options_map = { 'volume': { 'nocopy': 'no_copy' }, 'bind': { 'propagation': 'propagation' }, 'tmpfs': { 'size': 'tmpfs_size' } } _fields = ['type', 'source', 'target', 'read_only', 'consistency'] @classmethod def parse(cls, mount_dict, normalize=False, win_host=False): if mount_dict.get('source'): if mount_dict['type'] == 'tmpfs': raise ConfigurationError('tmpfs mounts can not specify a source') mount_dict['source'] = normpath(mount_dict['source'], win_host) if normalize: mount_dict['source'] = normalize_path_for_engine(mount_dict['source']) return cls(**mount_dict) def __init__(self, type, source=None, target=None, read_only=None, consistency=None, **kwargs): self.type = type self.source = source self.target = target self.read_only = read_only self.consistency = consistency self.options = None if self.type in kwargs: self.options = kwargs[self.type] def as_volume_spec(self): mode = 'ro' if self.read_only else 'rw' return VolumeSpec(external=self.source, internal=self.target, mode=mode) def legacy_repr(self): return self.as_volume_spec().repr() def repr(self): res = {} for field in self._fields: if getattr(self, field, None): res[field] = getattr(self, field) if self.options: res[self.type] = self.options return res @property def is_named_volume(self): return self.type == 'volume' and self.source @property def is_tmpfs(self): return self.type == 'tmpfs' @property def external(self): return self.source class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')): win32 = False @classmethod def _parse_unix(cls, volume_config): parts = volume_config.split(':') if len(parts) > 3: raise ConfigurationError( "Volume %s has incorrect format, should be " "external:internal[:mode]" % volume_config) if len(parts) == 1: external = None internal = os.path.normpath(parts[0]) else: external = os.path.normpath(parts[0]) internal = os.path.normpath(parts[1]) mode = 'rw' if len(parts) == 3: mode = parts[2] return cls(external, internal, mode) @classmethod def _parse_win32(cls, volume_config, normalize): # relative paths in windows expand to include the drive, eg C:\ # so we join the first 2 parts back together to count as one mode = 'rw' def separate_next_section(volume_config): drive, tail = splitdrive(volume_config) parts = tail.split(':', 1) if drive: parts[0] = drive + parts[0] return parts parts = separate_next_section(volume_config) if len(parts) == 1: internal = parts[0] external = None else: external = parts[0] parts = separate_next_section(parts[1]) external = normpath(external, True) internal = parts[0] if len(parts) > 1: if ':' in parts[1]: raise ConfigurationError( "Volume %s has incorrect format, should be " "external:internal[:mode]" % volume_config ) mode = parts[1] if normalize: external = normalize_path_for_engine(external) if external else None result = cls(external, internal, mode) result.win32 = True return result @classmethod def parse(cls, volume_config, normalize=False, win_host=False): """Parse a volume_config path and split it into external:internal[:mode] parts to be returned as a valid VolumeSpec. """ if IS_WINDOWS_PLATFORM or win_host: return cls._parse_win32(volume_config, normalize) else: return cls._parse_unix(volume_config) def repr(self): external = self.external + ':' if self.external else '' mode = ':' + self.mode if self.external else '' return '{ext}{v.internal}{mode}'.format(mode=mode, ext=external, v=self) @property def is_named_volume(self): res = self.external and not self.external.startswith(('.', '/', '~')) if not self.win32: return res return ( res and not self.external.startswith('\\') and not win32_root_path_pattern.match(self.external) ) class ServiceLink(namedtuple('_ServiceLink', 'target alias')): @classmethod def parse(cls, link_spec): target, _, alias = link_spec.partition(':') if not alias: alias = target return cls(target, alias) def repr(self): if self.target == self.alias: return self.target return '{s.target}:{s.alias}'.format(s=self) @property def merge_field(self): return self.alias class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode name')): @classmethod def parse(cls, spec): if isinstance(spec, str): return cls(spec, None, None, None, None, None) return cls( spec.get('source'), spec.get('target'), spec.get('uid'), spec.get('gid'), spec.get('mode'), spec.get('name') ) @property def merge_field(self): return self.source def repr(self): return { k: v for k, v in zip(self._fields, self) if v is not None } class ServiceSecret(ServiceConfigBase): pass class ServiceConfig(ServiceConfigBase): pass class ServicePort(namedtuple('_ServicePort', 'target published protocol mode external_ip')): def __new__(cls, target, published, *args, **kwargs): try: if target: target = int(target) except ValueError: raise ConfigurationError('Invalid target port: {}'.format(target)) if published: if isinstance(published, str) and '-' in published: # "x-y:z" format a, b = published.split('-', 1) if not a.isdigit() or not b.isdigit(): raise ConfigurationError('Invalid published port: {}'.format(published)) else: try: published = int(published) except ValueError: raise ConfigurationError('Invalid published port: {}'.format(published)) return super().__new__( cls, target, published, *args, **kwargs ) @classmethod def parse(cls, spec): if isinstance(spec, cls): # When extending a service with ports, the port definitions have already been parsed return [spec] if not isinstance(spec, dict): result = [] try: for k, v in build_port_bindings([spec]).items(): if '/' in k: target, proto = k.split('/', 1) else: target, proto = (k, None) for pub in v: if pub is None: result.append( cls(target, None, proto, None, None) ) elif isinstance(pub, tuple): result.append( cls(target, pub[1], proto, None, pub[0]) ) else: result.append( cls(target, pub, proto, None, None) ) except ValueError as e: raise ConfigurationError(str(e)) return result return [cls( spec.get('target'), spec.get('published'), spec.get('protocol'), spec.get('mode'), None )] @property def merge_field(self): return (self.target, self.published, self.external_ip, self.protocol) def repr(self): return { k: v for k, v in zip(self._fields, self) if v is not None } def legacy_repr(self): return normalize_port_dict(self.repr()) class GenericResource(namedtuple('_GenericResource', 'kind value')): @classmethod def parse(cls, dct): if 'discrete_resource_spec' not in dct: raise ConfigurationError( 'generic_resource entry must include a discrete_resource_spec key' ) if 'kind' not in dct['discrete_resource_spec']: raise ConfigurationError( 'generic_resource entry must include a discrete_resource_spec.kind subkey' ) return cls( dct['discrete_resource_spec']['kind'], dct['discrete_resource_spec'].get('value') ) def repr(self): return { 'discrete_resource_spec': { 'kind': self.kind, 'value': self.value, } } @property def merge_field(self): return self.kind def normalize_port_dict(port): return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format( published=port.get('published', ''), is_pub=(':' if port.get('published') is not None or port.get('external_ip') else ''), target=port.get('target'), protocol=port.get('protocol', 'tcp'), external_ip=port.get('external_ip', ''), has_ext_ip=(':' if port.get('external_ip') else ''), ) class SecurityOpt(namedtuple('_SecurityOpt', 'value src_file')): @classmethod def parse(cls, value): if not isinstance(value, str): return value # based on https://github.com/docker/cli/blob/9de1b162f/cli/command/container/opts.go#L673-L697 con = value.split('=', 2) if len(con) == 1 and con[0] != 'no-new-privileges': if ':' not in value: raise ConfigurationError('Invalid security_opt: {}'.format(value)) con = value.split(':', 2) if con[0] == 'seccomp' and con[1] != 'unconfined': try: with open(unquote_path(con[1])) as f: seccomp_data = json.load(f) except (OSError, ValueError) as e: raise ConfigurationError('Error reading seccomp profile: {}'.format(e)) return cls( 'seccomp={}'.format(json.dumps(seccomp_data)), con[1] ) return cls(value, None) def repr(self): if self.src_file is not None: return 'seccomp:{}'.format(self.src_file) return self.value @property def merge_field(self): return self.value compose-1.29.2/compose/config/validation.py000066400000000000000000000466641404620552300207110ustar00rootroot00000000000000import json import logging import os import re import sys from docker.utils.ports import split_port from jsonschema import Draft4Validator from jsonschema import FormatChecker from jsonschema import RefResolver from jsonschema import ValidationError from ..const import COMPOSEFILE_V1 as V1 from ..const import NANOCPUS_SCALE from .errors import ConfigurationError from .errors import VERSION_EXPLANATION from .sort_services import get_service_name_from_network_mode log = logging.getLogger(__name__) DOCKER_CONFIG_HINTS = { 'cpu_share': 'cpu_shares', 'add_host': 'extra_hosts', 'hosts': 'extra_hosts', 'extra_host': 'extra_hosts', 'device': 'devices', 'link': 'links', 'memory_swap': 'memswap_limit', 'port': 'ports', 'privilege': 'privileged', 'priviliged': 'privileged', 'privilige': 'privileged', 'volume': 'volumes', 'workdir': 'working_dir', } VALID_NAME_CHARS = r'[a-zA-Z0-9\._\-]' VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$' VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])' VALID_IPV4_ADDR = r"({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG) VALID_REGEX_IPV4_CIDR = r"^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR) VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}' VALID_REGEX_IPV6_CIDR = "".join(r""" ^ ( (({IPV6_SEG}:){{7}}{IPV6_SEG})| (({IPV6_SEG}:){{1,7}}:)| (({IPV6_SEG}:){{1,6}}(:{IPV6_SEG}){{1,1}})| (({IPV6_SEG}:){{1,5}}(:{IPV6_SEG}){{1,2}})| (({IPV6_SEG}:){{1,4}}(:{IPV6_SEG}){{1,3}})| (({IPV6_SEG}:){{1,3}}(:{IPV6_SEG}){{1,4}})| (({IPV6_SEG}:){{1,2}}(:{IPV6_SEG}){{1,5}})| (({IPV6_SEG}:){{1,1}}(:{IPV6_SEG}){{1,6}})| (:((:{IPV6_SEG}){{1,7}}|:))| (fe80:(:{IPV6_SEG}){{0,4}}%[0-9a-zA-Z]{{1,}})| (::(ffff(:0{{1,4}}){{0,1}}:){{0,1}}{IPV4_ADDR})| (({IPV6_SEG}:){{1,4}}:{IPV4_ADDR}) ) /(\d|[1-9]\d|1[0-1]\d|12[0-8]) $ """.format(IPV6_SEG=VALID_IPV6_SEG, IPV4_ADDR=VALID_IPV4_ADDR).split()) @FormatChecker.cls_checks(format="ports", raises=ValidationError) def format_ports(instance): try: split_port(instance) except ValueError as e: raise ValidationError(str(e)) return True @FormatChecker.cls_checks(format="expose", raises=ValidationError) def format_expose(instance): if isinstance(instance, str): if not re.match(VALID_EXPOSE_FORMAT, instance): raise ValidationError( "should be of the format 'PORT[/PROTOCOL]'") return True @FormatChecker.cls_checks("subnet_ip_address", raises=ValidationError) def format_subnet_ip_address(instance): if isinstance(instance, str): if not re.match(VALID_REGEX_IPV4_CIDR, instance) and \ not re.match(VALID_REGEX_IPV6_CIDR, instance): raise ValidationError("should use the CIDR format") return True def match_named_volumes(service_dict, project_volumes): service_volumes = service_dict.get('volumes', []) for volume_spec in service_volumes: if volume_spec.is_named_volume and volume_spec.external not in project_volumes: raise ConfigurationError( 'Named volume "{}" is used in service "{}" but no' ' declaration was found in the volumes section.'.format( volume_spec.repr(), service_dict.get('name') ) ) def python_type_to_yaml_type(type_): type_name = type(type_).__name__ return { 'dict': 'mapping', 'list': 'array', 'int': 'number', 'float': 'number', 'bool': 'boolean', 'unicode': 'string', 'str': 'string', 'bytes': 'string', }.get(type_name, type_name) def validate_config_section(filename, config, section): """Validate the structure of a configuration section. This must be done before interpolation so it's separate from schema validation. """ if not isinstance(config, dict): raise ConfigurationError( "In file '{filename}', {section} must be a mapping, not " "{type}.".format( filename=filename, section=section, type=anglicize_json_type(python_type_to_yaml_type(config)))) for key, value in config.items(): if not isinstance(key, str): raise ConfigurationError( "In file '{filename}', the {section} name {name} must be a " "quoted string, i.e. '{name}'.".format( filename=filename, section=section, name=key)) if not isinstance(value, (dict, type(None))): raise ConfigurationError( "In file '{filename}', {section} '{name}' must be a mapping not " "{type}.".format( filename=filename, section=section, name=key, type=anglicize_json_type(python_type_to_yaml_type(value)))) def validate_top_level_object(config_file): if not isinstance(config_file.config, dict): raise ConfigurationError( "Top level object in '{}' needs to be an object not '{}'.".format( config_file.filename, type(config_file.config))) def validate_ulimits(service_config): ulimit_config = service_config.config.get('ulimits', {}) for limit_name, soft_hard_values in ulimit_config.items(): if isinstance(soft_hard_values, dict): if not soft_hard_values['soft'] <= soft_hard_values['hard']: raise ConfigurationError( "Service '{s.name}' has invalid ulimit '{ulimit}'. " "'soft' value can not be greater than 'hard' value ".format( s=service_config, ulimit=ulimit_config)) def validate_extends_file_path(service_name, extends_options, filename): """ The service to be extended must either be defined in the config key 'file', or within 'filename'. """ error_prefix = "Invalid 'extends' configuration for %s:" % service_name if 'file' not in extends_options and filename is None: raise ConfigurationError( "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix ) def validate_network_mode(service_config, service_names): network_mode = service_config.config.get('network_mode') if not network_mode: return if 'networks' in service_config.config: raise ConfigurationError("'network_mode' and 'networks' cannot be combined") dependency = get_service_name_from_network_mode(network_mode) if not dependency: return if dependency not in service_names: raise ConfigurationError( "Service '{s.name}' uses the network stack of service '{dep}' which " "is undefined.".format(s=service_config, dep=dependency)) def validate_pid_mode(service_config, service_names): pid_mode = service_config.config.get('pid') if not pid_mode: return dependency = get_service_name_from_network_mode(pid_mode) if not dependency: return if dependency not in service_names: raise ConfigurationError( "Service '{s.name}' uses the PID namespace of service '{dep}' which " "is undefined.".format(s=service_config, dep=dependency) ) def validate_ipc_mode(service_config, service_names): ipc_mode = service_config.config.get('ipc') if not ipc_mode: return dependency = get_service_name_from_network_mode(ipc_mode) if not dependency: return if dependency not in service_names: raise ConfigurationError( "Service '{s.name}' uses the IPC namespace of service '{dep}' which " "is undefined.".format(s=service_config, dep=dependency) ) def validate_links(service_config, service_names): for link in service_config.config.get('links', []): if link.split(':')[0] not in service_names: raise ConfigurationError( "Service '{s.name}' has a link to service '{link}' which is " "undefined.".format(s=service_config, link=link)) def validate_depends_on(service_config, service_names): deps = service_config.config.get('depends_on', {}) for dependency in deps.keys(): if dependency not in service_names: raise ConfigurationError( "Service '{s.name}' depends on service '{dep}' which is " "undefined.".format(s=service_config, dep=dependency) ) def validate_credential_spec(service_config): credential_spec = service_config.config.get('credential_spec') if not credential_spec: return if 'registry' not in credential_spec and 'file' not in credential_spec: raise ConfigurationError( "Service '{s.name}' is missing 'credential_spec.file' or " "credential_spec.registry'".format(s=service_config) ) def get_unsupported_config_msg(path, error_key): msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key) if error_key in DOCKER_CONFIG_HINTS: msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key]) return msg def anglicize_json_type(json_type): if json_type.startswith(('a', 'e', 'i', 'o', 'u')): return 'an ' + json_type return 'a ' + json_type def is_service_dict_schema(schema_id): return schema_id in ('config_schema_v1.json', '#/properties/services') def handle_error_for_schema_with_id(error, path): schema_id = error.schema['id'] if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties': return "Invalid service name '{}' - only {} characters are allowed".format( # The service_name is one of the keys in the json object [i for i in list(error.instance) if not i or any(filter( lambda c: not re.match(VALID_NAME_CHARS, c), i ))][0], VALID_NAME_CHARS ) if error.validator == 'additionalProperties': if schema_id == '#/definitions/service': invalid_config_key = parse_key_from_error_msg(error) return get_unsupported_config_msg(path, invalid_config_key) if schema_id.startswith('config_schema_'): invalid_config_key = parse_key_from_error_msg(error) return ('Invalid top-level property "{key}". Valid top-level ' 'sections for this Compose file are: {properties}, and ' 'extensions starting with "x-".\n\n{explanation}').format( key=invalid_config_key, properties=', '.join(error.schema['properties'].keys()), explanation=VERSION_EXPLANATION ) if not error.path: return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION) def handle_generic_error(error, path): msg_format = None error_msg = error.message if error.validator == 'oneOf': msg_format = "{path} {msg}" config_key, error_msg = _parse_oneof_validator(error) if config_key: path.append(config_key) elif error.validator == 'type': msg_format = "{path} contains an invalid type, it should be {msg}" error_msg = _parse_valid_types_from_validator(error.validator_value) elif error.validator == 'required': error_msg = ", ".join(error.validator_value) msg_format = "{path} is invalid, {msg} is required." elif error.validator == 'dependencies': config_key = list(error.validator_value.keys())[0] required_keys = ",".join(error.validator_value[config_key]) msg_format = "{path} is invalid: {msg}" path.append(config_key) error_msg = "when defining '{}' you must set '{}' as well".format( config_key, required_keys) elif error.cause: error_msg = str(error.cause) msg_format = "{path} is invalid: {msg}" elif error.path: msg_format = "{path} value {msg}" if msg_format: return msg_format.format(path=path_string(path), msg=error_msg) return error.message def parse_key_from_error_msg(error): try: return error.message.split("'")[1] except IndexError: return error.message.split('(')[1].split(' ')[0].strip("'") def path_string(path): return ".".join(c for c in path if isinstance(c, str)) def _parse_valid_types_from_validator(validator): """A validator value can be either an array of valid types or a string of a valid type. Parse the valid types and prefix with the correct article. """ if not isinstance(validator, list): return anglicize_json_type(validator) if len(validator) == 1: return anglicize_json_type(validator[0]) return "{}, or {}".format( ", ".join([anglicize_json_type(validator[0])] + validator[1:-1]), anglicize_json_type(validator[-1])) def _parse_oneof_validator(error): """oneOf has multiple schemas, so we need to reason about which schema, sub schema or constraint the validation is failing on. Inspecting the context value of a ValidationError gives us information about which sub schema failed and which kind of error it is. """ types = [] for context in error.context: if context.validator == 'oneOf': _, error_msg = _parse_oneof_validator(context) return path_string(context.path), error_msg if context.validator == 'required': return (None, context.message) if context.validator == 'additionalProperties': invalid_config_key = parse_key_from_error_msg(context) return (None, "contains unsupported option: '{}'".format(invalid_config_key)) if context.validator == 'uniqueItems': return ( path_string(context.path) if context.path else None, "contains non-unique items, please remove duplicates from {}".format( context.instance), ) if context.path: return ( path_string(context.path), "contains {}, which is an invalid type, it should be {}".format( json.dumps(context.instance), _parse_valid_types_from_validator(context.validator_value)), ) if context.validator == 'type': types.append(context.validator_value) valid_types = _parse_valid_types_from_validator(types) return (None, "contains an invalid type, it should be {}".format(valid_types)) def process_service_constraint_errors(error, service_name, version): if version == V1: if 'image' in error.instance and 'build' in error.instance: return ( "Service {} has both an image and build path specified. " "A service can either be built to image or use an existing " "image, not both.".format(service_name)) if 'image' in error.instance and 'dockerfile' in error.instance: return ( "Service {} has both an image and alternate Dockerfile. " "A service can either be built to image or use an existing " "image, not both.".format(service_name)) if 'image' not in error.instance and 'build' not in error.instance: return ( "Service {} has neither an image nor a build context specified. " "At least one must be provided.".format(service_name)) def process_config_schema_errors(error): path = list(error.path) if 'id' in error.schema: error_msg = handle_error_for_schema_with_id(error, path) if error_msg: return error_msg return handle_generic_error(error, path) def keys_to_str(config_file): """ Non-string keys may break validator with patterned fields. """ d = {} for k, v in config_file.items(): d[str(k)] = v if isinstance(v, dict): d[str(k)] = keys_to_str(v) return d def validate_against_config_schema(config_file, version): schema = load_jsonschema(version) config = keys_to_str(config_file.config) format_checker = FormatChecker(["ports", "expose", "subnet_ip_address"]) validator = Draft4Validator( schema, resolver=RefResolver(get_resolver_path(), schema), format_checker=format_checker) handle_errors( validator.iter_errors(config), process_config_schema_errors, config_file.filename) def validate_service_constraints(config, service_name, config_file): def handler(errors): return process_service_constraint_errors( errors, service_name, config_file.version) schema = load_jsonschema(config_file.version) validator = Draft4Validator(schema['definitions']['constraints']['service']) handle_errors(validator.iter_errors(config), handler, None) def validate_cpu(service_config): cpus = service_config.config.get('cpus') if not cpus: return nano_cpus = cpus * NANOCPUS_SCALE if isinstance(nano_cpus, float) and not nano_cpus.is_integer(): raise ConfigurationError( "cpus must have nine or less digits after decimal point") def get_schema_path(): return os.path.dirname(os.path.abspath(__file__)) def load_jsonschema(version): name = "compose_spec" if version == V1: name = "config_schema_v1" filename = os.path.join( get_schema_path(), "{}.json".format(name)) if not os.path.exists(filename): raise ConfigurationError( 'Version in "{}" is unsupported. {}' .format(filename, VERSION_EXPLANATION)) with open(filename) as fh: return json.load(fh) def get_resolver_path(): schema_path = get_schema_path() if sys.platform == "win32": scheme = "///" # TODO: why is this necessary? schema_path = schema_path.replace('\\', '/') else: scheme = "//" return "file:{}{}/".format(scheme, schema_path) def handle_errors(errors, format_error_func, filename): """jsonschema returns an error tree full of information to explain what has gone wrong. Process each error and pull out relevant information and re-write helpful error messages that are relevant. """ errors = sorted(errors, key=str) if not errors: return error_msg = '\n'.join(format_error_func(error) for error in errors) raise ConfigurationError( "The Compose file{file_msg} is invalid because:\n{error_msg}".format( file_msg=" '{}'".format(filename) if filename else "", error_msg=error_msg)) def validate_healthcheck(service_config): healthcheck = service_config.config.get('healthcheck', {}) if 'test' in healthcheck and isinstance(healthcheck['test'], list): if len(healthcheck['test']) == 0: raise ConfigurationError( 'Service "{}" defines an invalid healthcheck: ' '"test" is an empty list' .format(service_config.name)) # when disable is true config.py::process_healthcheck adds "test: ['NONE']" to service_config elif healthcheck['test'][0] == 'NONE' and len(healthcheck) > 1: raise ConfigurationError( 'Service "{}" defines an invalid healthcheck: ' '"disable: true" cannot be combined with other options' .format(service_config.name)) elif healthcheck['test'][0] not in ('NONE', 'CMD', 'CMD-SHELL'): raise ConfigurationError( 'Service "{}" defines an invalid healthcheck: ' 'when "test" is a list the first item must be either NONE, CMD or CMD-SHELL' .format(service_config.name)) compose-1.29.2/compose/const.py000066400000000000000000000024031404620552300164170ustar00rootroot00000000000000import sys from .version import ComposeVersion DEFAULT_TIMEOUT = 10 HTTP_TIMEOUT = 60 IS_WINDOWS_PLATFORM = (sys.platform == "win32") IS_LINUX_PLATFORM = (sys.platform == "linux") LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number' LABEL_ONE_OFF = 'com.docker.compose.oneoff' LABEL_PROJECT = 'com.docker.compose.project' LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir' LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files' LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file' LABEL_SERVICE = 'com.docker.compose.service' LABEL_NETWORK = 'com.docker.compose.network' LABEL_VERSION = 'com.docker.compose.version' LABEL_SLUG = 'com.docker.compose.slug' LABEL_VOLUME = 'com.docker.compose.volume' LABEL_CONFIG_HASH = 'com.docker.compose.config-hash' NANOCPUS_SCALE = 1000000000 PARALLEL_LIMIT = 64 SECRETS_PATH = '/run/secrets' WINDOWS_LONGPATH_PREFIX = '\\\\?\\' COMPOSEFILE_V1 = ComposeVersion('1') COMPOSE_SPEC = ComposeVersion('3.9') # minimum DOCKER ENGINE API version needed to support # features for each compose schema version API_VERSIONS = { COMPOSEFILE_V1: '1.21', COMPOSE_SPEC: '1.38', } API_VERSION_TO_ENGINE_VERSION = { API_VERSIONS[COMPOSEFILE_V1]: '1.9.0', API_VERSIONS[COMPOSE_SPEC]: '18.06.0', } compose-1.29.2/compose/container.py000066400000000000000000000222011404620552300172510ustar00rootroot00000000000000from functools import reduce from docker.errors import ImageNotFound from .const import LABEL_CONTAINER_NUMBER from .const import LABEL_ONE_OFF from .const import LABEL_PROJECT from .const import LABEL_SERVICE from .const import LABEL_SLUG from .const import LABEL_VERSION from .utils import truncate_id from .version import ComposeVersion class Container: """ Represents a Docker container, constructed from the output of GET /containers/:id:/json. """ def __init__(self, client, dictionary, has_been_inspected=False): self.client = client self.dictionary = dictionary self.has_been_inspected = has_been_inspected self.log_stream = None @classmethod def from_ps(cls, client, dictionary, **kwargs): """ Construct a container object from the output of GET /containers/json. """ name = get_container_name(dictionary) if name is None: return None new_dictionary = { 'Id': dictionary['Id'], 'Image': dictionary['Image'], 'Name': '/' + name, } return cls(client, new_dictionary, **kwargs) @classmethod def from_id(cls, client, id): return cls(client, client.inspect_container(id), has_been_inspected=True) @classmethod def create(cls, client, **options): response = client.create_container(**options) return cls.from_id(client, response['Id']) @property def id(self): return self.dictionary['Id'] @property def image(self): return self.dictionary['Image'] @property def image_config(self): return self.client.inspect_image(self.image) @property def short_id(self): return self.id[:12] @property def name(self): return self.dictionary['Name'][1:] @property def project(self): return self.labels.get(LABEL_PROJECT) @property def service(self): return self.labels.get(LABEL_SERVICE) @property def name_without_project(self): if self.name.startswith('{}_{}'.format(self.project, self.service)): return '{}_{}'.format(self.service, self.number if self.number is not None else self.slug) else: return self.name @property def number(self): if self.one_off: # One-off containers are no longer assigned numbers and use slugs instead. return None number = self.labels.get(LABEL_CONTAINER_NUMBER) if not number: raise ValueError("Container {} does not have a {} label".format( self.short_id, LABEL_CONTAINER_NUMBER)) return int(number) @property def slug(self): if not self.full_slug: return None return truncate_id(self.full_slug) @property def full_slug(self): return self.labels.get(LABEL_SLUG) @property def one_off(self): return self.labels.get(LABEL_ONE_OFF) == 'True' @property def ports(self): self.inspect_if_not_inspected() return self.get('NetworkSettings.Ports') or {} @property def human_readable_ports(self): def format_port(private, public): if not public: return [private] return [ '{HostIp}:{HostPort}->{private}'.format(private=private, **pub) for pub in public ] return ', '.join( ','.join(format_port(*item)) for item in sorted(self.ports.items()) ) @property def labels(self): return self.get('Config.Labels') or {} @property def stop_signal(self): return self.get('Config.StopSignal') @property def log_config(self): return self.get('HostConfig.LogConfig') or None @property def human_readable_state(self): if self.is_paused: return 'Paused' if self.is_restarting: return 'Restarting' if self.is_running: return 'Ghost' if self.get('State.Ghost') else self.human_readable_health_status else: return 'Exit %s' % self.get('State.ExitCode') @property def human_readable_command(self): entrypoint = self.get('Config.Entrypoint') or [] cmd = self.get('Config.Cmd') or [] return ' '.join(entrypoint + cmd) @property def environment(self): def parse_env(var): if '=' in var: return var.split("=", 1) return var, None return dict(parse_env(var) for var in self.get('Config.Env') or []) @property def exit_code(self): return self.get('State.ExitCode') @property def is_running(self): return self.get('State.Running') @property def is_restarting(self): return self.get('State.Restarting') @property def is_paused(self): return self.get('State.Paused') @property def log_driver(self): return self.get('HostConfig.LogConfig.Type') @property def human_readable_health_status(self): """ Generate UP status string with up time and health """ status_string = 'Up' container_status = self.get('State.Health.Status') if container_status == 'starting': status_string += ' (health: starting)' elif container_status is not None: status_string += ' (%s)' % container_status return status_string def attach_log_stream(self): self.log_stream = self.attach(stdout=True, stderr=True, stream=True) def get(self, key): """Return a value from the container or None if the value is not set. :param key: a string using dotted notation for nested dictionary lookups """ self.inspect_if_not_inspected() def get_value(dictionary, key): return (dictionary or {}).get(key) return reduce(get_value, key.split('.'), self.dictionary) def get_local_port(self, port, protocol='tcp'): port = self.ports.get("{}/{}".format(port, protocol)) return "{HostIp}:{HostPort}".format(**port[0]) if port else None def get_mount(self, mount_dest): for mount in self.get('Mounts'): if mount['Destination'] == mount_dest: return mount return None def start(self, **options): return self.client.start(self.id, **options) def stop(self, **options): return self.client.stop(self.id, **options) def pause(self, **options): return self.client.pause(self.id, **options) def unpause(self, **options): return self.client.unpause(self.id, **options) def kill(self, **options): return self.client.kill(self.id, **options) def restart(self, **options): return self.client.restart(self.id, **options) def remove(self, **options): return self.client.remove_container(self.id, **options) def create_exec(self, command, **options): return self.client.exec_create(self.id, command, **options) def start_exec(self, exec_id, **options): return self.client.exec_start(exec_id, **options) def rename_to_tmp_name(self): """Rename the container to a hopefully unique temporary container name by prepending the short id. """ if not self.name.startswith(self.short_id): self.client.rename( self.id, '{}_{}'.format(self.short_id, self.name) ) def inspect_if_not_inspected(self): if not self.has_been_inspected: self.inspect() def wait(self): return self.client.wait(self.id).get('StatusCode', 127) def logs(self, *args, **kwargs): return self.client.logs(self.id, *args, **kwargs) def inspect(self): self.dictionary = self.client.inspect_container(self.id) self.has_been_inspected = True return self.dictionary def image_exists(self): try: self.client.inspect_image(self.image) except ImageNotFound: return False return True def reset_image(self, img_id): """ If this container's image has been removed, temporarily replace the old image ID with `img_id`. """ if not self.image_exists(): self.dictionary['Image'] = img_id def attach(self, *args, **kwargs): return self.client.attach(self.id, *args, **kwargs) def has_legacy_proj_name(self, project_name): return ( ComposeVersion(self.labels.get(LABEL_VERSION)) < ComposeVersion('1.21.0') and self.project != project_name ) def __repr__(self): return ''.format(self.name, self.id[:6]) def __eq__(self, other): if type(self) != type(other): return False return self.id == other.id def __hash__(self): return self.id.__hash__() def get_container_name(container): if not container.get('Name') and not container.get('Names'): return None # inspect if 'Name' in container: return container['Name'] # ps shortest_name = min(container['Names'], key=lambda n: len(n.split('/'))) return shortest_name.split('/')[-1] compose-1.29.2/compose/errors.py000066400000000000000000000016151404620552300166110ustar00rootroot00000000000000class OperationFailedError(Exception): def __init__(self, reason): self.msg = reason class StreamParseError(RuntimeError): def __init__(self, reason): self.msg = reason class HealthCheckException(Exception): def __init__(self, reason): self.msg = reason class HealthCheckFailed(HealthCheckException): def __init__(self, container_id): super().__init__( 'Container "{}" is unhealthy.'.format(container_id) ) class NoHealthCheckConfigured(HealthCheckException): def __init__(self, service_name): super().__init__( 'Service "{}" is missing a healthcheck configuration'.format( service_name ) ) class CompletedUnsuccessfully(Exception): def __init__(self, container_id, exit_code): self.msg = 'Container "{}" exited with code {}.'.format(container_id, exit_code) compose-1.29.2/compose/metrics/000077500000000000000000000000001404620552300163665ustar00rootroot00000000000000compose-1.29.2/compose/metrics/__init__.py000066400000000000000000000000001404620552300204650ustar00rootroot00000000000000compose-1.29.2/compose/metrics/client.py000066400000000000000000000034741404620552300202260ustar00rootroot00000000000000import os from enum import Enum import requests from docker import ContextAPI from docker.transport import UnixHTTPAdapter from compose.const import IS_WINDOWS_PLATFORM if IS_WINDOWS_PLATFORM: from docker.transport import NpipeHTTPAdapter class Status(Enum): SUCCESS = "success" FAILURE = "failure" CANCELED = "canceled" class MetricsSource: CLI = "docker-compose" if IS_WINDOWS_PLATFORM: METRICS_SOCKET_FILE = 'npipe://\\\\.\\pipe\\docker_cli' else: METRICS_SOCKET_FILE = 'http+unix:///var/run/docker-cli.sock' class MetricsCommand(requests.Session): """ Representation of a command in the metrics. """ def __init__(self, command, context_type=None, status=Status.SUCCESS, source=MetricsSource.CLI, uri=None): super().__init__() self.command = ("compose " + command).strip() if command else "compose --help" self.context = context_type or ContextAPI.get_current_context().context_type or 'moby' self.source = source self.status = status.value self.uri = uri or os.environ.get("METRICS_SOCKET_FILE", METRICS_SOCKET_FILE) if IS_WINDOWS_PLATFORM: self.mount("http+unix://", NpipeHTTPAdapter(self.uri)) else: self.mount("http+unix://", UnixHTTPAdapter(self.uri)) def send_metrics(self): try: return self.post("http+unix://localhost/usage", json=self.to_map(), timeout=.05, headers={'Content-Type': 'application/json'}) except Exception as e: return e def to_map(self): return { 'command': self.command, 'context': self.context, 'source': self.source, 'status': self.status, } compose-1.29.2/compose/metrics/decorator.py000066400000000000000000000012641404620552300207250ustar00rootroot00000000000000import functools from compose.metrics.client import MetricsCommand from compose.metrics.client import Status class metrics: def __init__(self, command_name=None): self.command_name = command_name def __call__(self, fn): @functools.wraps(fn, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES) def wrapper(*args, **kwargs): if not self.command_name: self.command_name = fn.__name__ result = fn(*args, **kwargs) MetricsCommand(self.command_name, status=Status.SUCCESS).send_metrics() return result return wrapper compose-1.29.2/compose/network.py000066400000000000000000000267241404620552300167760ustar00rootroot00000000000000import logging import re from collections import OrderedDict from operator import itemgetter from docker.errors import NotFound from docker.types import IPAMConfig from docker.types import IPAMPool from docker.utils import version_gte from docker.utils import version_lt from . import __version__ from .config import ConfigurationError from .const import LABEL_NETWORK from .const import LABEL_PROJECT from .const import LABEL_VERSION log = logging.getLogger(__name__) OPTS_EXCEPTIONS = [ 'com.docker.network.driver.overlay.vxlanid_list', 'com.docker.network.windowsshim.hnsid', 'com.docker.network.windowsshim.networkname' ] class Network: def __init__(self, client, project, name, driver=None, driver_opts=None, ipam=None, external=False, internal=False, enable_ipv6=False, labels=None, custom_name=False): self.client = client self.project = project self.name = name self.driver = driver self.driver_opts = driver_opts self.ipam = create_ipam_config_from_dict(ipam) self.external = external self.internal = internal self.enable_ipv6 = enable_ipv6 self.labels = labels self.custom_name = custom_name self.legacy = None def ensure(self): if self.external: if self.driver == 'overlay': # Swarm nodes do not register overlay networks that were # created on a different node unless they're in use. # See docker/compose#4399 return try: self.inspect() log.debug( 'Network {} declared as external. No new ' 'network will be created.'.format(self.name) ) except NotFound: raise ConfigurationError( 'Network {name} declared as external, but could' ' not be found. Please create the network manually' ' using `{command} {name}` and try again.'.format( name=self.full_name, command='docker network create' ) ) return self._set_legacy_flag() try: data = self.inspect(legacy=self.legacy) check_remote_network_config(data, self) except NotFound: driver_name = 'the default driver' if self.driver: driver_name = 'driver "{}"'.format(self.driver) log.info( 'Creating network "{}" with {}'.format(self.full_name, driver_name) ) self.client.create_network( name=self.full_name, driver=self.driver, options=self.driver_opts, ipam=self.ipam, internal=self.internal, enable_ipv6=self.enable_ipv6, labels=self._labels, attachable=version_gte(self.client._version, '1.24') or None, check_duplicate=True, ) def remove(self): if self.external: log.info("Network %s is external, skipping", self.true_name) return log.info("Removing network {}".format(self.true_name)) self.client.remove_network(self.true_name) def inspect(self, legacy=False): if legacy: return self.client.inspect_network(self.legacy_full_name) return self.client.inspect_network(self.full_name) @property def legacy_full_name(self): if self.custom_name: return self.name return '{}_{}'.format( re.sub(r'[_-]', '', self.project), self.name ) @property def full_name(self): if self.custom_name: return self.name return '{}_{}'.format(self.project, self.name) @property def true_name(self): self._set_legacy_flag() if self.legacy: return self.legacy_full_name return self.full_name @property def _labels(self): if version_lt(self.client._version, '1.23'): return None labels = self.labels.copy() if self.labels else {} labels.update({ LABEL_PROJECT: self.project, LABEL_NETWORK: self.name, LABEL_VERSION: __version__, }) return labels def _set_legacy_flag(self): if self.legacy is not None: return try: data = self.inspect(legacy=True) self.legacy = data is not None except NotFound: self.legacy = False def create_ipam_config_from_dict(ipam_dict): if not ipam_dict: return None return IPAMConfig( driver=ipam_dict.get('driver') or 'default', pool_configs=[ IPAMPool( subnet=config.get('subnet'), iprange=config.get('ip_range'), gateway=config.get('gateway'), aux_addresses=config.get('aux_addresses'), ) for config in ipam_dict.get('config', []) ], options=ipam_dict.get('options') ) class NetworkConfigChangedError(ConfigurationError): def __init__(self, net_name, property_name): super().__init__( 'Network "{}" needs to be recreated - {} has changed'.format( net_name, property_name ) ) def check_remote_ipam_config(remote, local): remote_ipam = remote.get('IPAM') ipam_dict = create_ipam_config_from_dict(local.ipam) if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'): raise NetworkConfigChangedError(local.true_name, 'IPAM driver') if len(ipam_dict['Config']) != 0: if len(ipam_dict['Config']) != len(remote_ipam['Config']): raise NetworkConfigChangedError(local.true_name, 'IPAM configs') remote_configs = sorted(remote_ipam['Config'], key='Subnet') local_configs = sorted(ipam_dict['Config'], key='Subnet') while local_configs: lc = local_configs.pop() rc = remote_configs.pop() if lc.get('Subnet') != rc.get('Subnet'): raise NetworkConfigChangedError(local.true_name, 'IPAM config subnet') if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'): raise NetworkConfigChangedError(local.true_name, 'IPAM config gateway') if lc.get('IPRange') != rc.get('IPRange'): raise NetworkConfigChangedError(local.true_name, 'IPAM config ip_range') if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')): raise NetworkConfigChangedError(local.true_name, 'IPAM config aux_addresses') remote_opts = remote_ipam.get('Options') or {} local_opts = local.ipam.get('Options') or {} for k in set.union(set(remote_opts.keys()), set(local_opts.keys())): if remote_opts.get(k) != local_opts.get(k): raise NetworkConfigChangedError(local.true_name, 'IPAM option "{}"'.format(k)) def check_remote_network_config(remote, local): if local.driver and remote.get('Driver') != local.driver: raise NetworkConfigChangedError(local.true_name, 'driver') local_opts = local.driver_opts or {} remote_opts = remote.get('Options') or {} for k in set.union(set(remote_opts.keys()), set(local_opts.keys())): if k in OPTS_EXCEPTIONS: continue if remote_opts.get(k) != local_opts.get(k): raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k)) if local.ipam is not None: check_remote_ipam_config(remote, local) if local.internal is not None and local.internal != remote.get('Internal', False): raise NetworkConfigChangedError(local.true_name, 'internal') if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False): raise NetworkConfigChangedError(local.true_name, 'enable_ipv6') local_labels = local.labels or {} remote_labels = remote.get('Labels') or {} for k in set.union(set(remote_labels.keys()), set(local_labels.keys())): if k.startswith('com.docker.'): # We are only interested in user-specified labels continue if remote_labels.get(k) != local_labels.get(k): log.warning( 'Network {}: label "{}" has changed. It may need to be' ' recreated.'.format(local.true_name, k) ) def build_networks(name, config_data, client): network_config = config_data.networks or {} networks = { network_name: Network( client=client, project=name, name=data.get('name', network_name), driver=data.get('driver'), driver_opts=data.get('driver_opts'), ipam=data.get('ipam'), external=bool(data.get('external', False)), internal=data.get('internal'), enable_ipv6=data.get('enable_ipv6'), labels=data.get('labels'), custom_name=data.get('name') is not None, ) for network_name, data in network_config.items() } if 'default' not in networks: networks['default'] = Network(client, name, 'default') return networks class ProjectNetworks: def __init__(self, networks, use_networking): self.networks = networks or {} self.use_networking = use_networking @classmethod def from_services(cls, services, networks, use_networking): service_networks = { network: networks.get(network) for service in services for network in get_network_names_for_service(service) } unused = set(networks) - set(service_networks) - {'default'} if unused: log.warning( "Some networks were defined but are not used by any service: " "{}".format(", ".join(unused))) return cls(service_networks, use_networking) def remove(self): if not self.use_networking: return for network in self.networks.values(): try: network.remove() except NotFound: log.warning("Network %s not found.", network.true_name) def initialize(self): if not self.use_networking: return for network in self.networks.values(): network.ensure() def get_network_defs_for_service(service_dict): if 'network_mode' in service_dict: return {} networks = service_dict.get('networks', {'default': None}) return { net: (config or {}) for net, config in networks.items() } def get_network_names_for_service(service_dict): return get_network_defs_for_service(service_dict).keys() def get_networks(service_dict, network_definitions): networks = {} for name, netdef in get_network_defs_for_service(service_dict).items(): network = network_definitions.get(name) if network: networks[network.true_name] = netdef else: raise ConfigurationError( 'Service "{}" uses an undefined network "{}"' .format(service_dict['name'], name)) if any([v.get('priority') for v in networks.values()]): return OrderedDict(sorted( networks.items(), key=lambda t: t[1].get('priority') or 0, reverse=True )) else: # Ensure Compose will pick a consistent primary network if no # priority is set return OrderedDict(sorted(networks.items(), key=itemgetter(0))) compose-1.29.2/compose/parallel.py000066400000000000000000000264541404620552300171010ustar00rootroot00000000000000import _thread as thread import logging import operator import sys from queue import Empty from queue import Queue from threading import Lock from threading import Semaphore from threading import Thread from docker.errors import APIError from docker.errors import ImageNotFound from compose.cli.colors import AnsiMode from compose.cli.colors import green from compose.cli.colors import red from compose.cli.signals import ShutdownException from compose.const import PARALLEL_LIMIT from compose.errors import CompletedUnsuccessfully from compose.errors import HealthCheckFailed from compose.errors import NoHealthCheckConfigured from compose.errors import OperationFailedError log = logging.getLogger(__name__) STOP = object() class GlobalLimit: """Simple class to hold a global semaphore limiter for a project. This class should be treated as a singleton that is instantiated when the project is. """ global_limiter = Semaphore(PARALLEL_LIMIT) @classmethod def set_global_limit(cls, value): if value is None: value = PARALLEL_LIMIT cls.global_limiter = Semaphore(value) def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check): """ Watch events from a parallel execution, update status and fill errors and results. Returns exception to re-raise. """ error_to_reraise = None for obj, result, exception in events: if exception is None: if fail_check is not None and fail_check(obj): writer.write(msg, get_name(obj), 'failed', red) else: writer.write(msg, get_name(obj), 'done', green) results.append(result) elif isinstance(exception, ImageNotFound): # This is to bubble up ImageNotFound exceptions to the client so we # can prompt the user if they want to rebuild. errors[get_name(obj)] = exception.explanation writer.write(msg, get_name(obj), 'error', red) error_to_reraise = exception elif isinstance(exception, APIError): errors[get_name(obj)] = exception.explanation writer.write(msg, get_name(obj), 'error', red) elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured, CompletedUnsuccessfully)): errors[get_name(obj)] = exception.msg writer.write(msg, get_name(obj), 'error', red) elif isinstance(exception, UpstreamError): writer.write(msg, get_name(obj), 'error', red) else: errors[get_name(obj)] = exception error_to_reraise = exception return error_to_reraise def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None): """Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. get_deps called on object must return a collection with its dependencies. get_name called on object must return its name. fail_check is an additional failure check for cases that should display as a failure in the CLI logs, but don't raise an exception (such as attempting to start 0 containers) """ objects = list(objects) stream = sys.stderr writer = ParallelStreamWriter.get_or_assign_instance(ParallelStreamWriter(stream)) for obj in objects: writer.add_object(msg, get_name(obj)) for obj in objects: writer.write_initial(msg, get_name(obj)) events = parallel_execute_iter(objects, func, get_deps, limit) errors = {} results = [] error_to_reraise = parallel_execute_watch( events, writer, errors, results, msg, get_name, fail_check ) for obj_name, error in errors.items(): stream.write("\nERROR: for {} {}\n".format(obj_name, error)) if error_to_reraise: raise error_to_reraise return results, errors def _no_deps(x): return [] class State: """ Holds the state of a partially-complete parallel operation. state.started: objects being processed state.finished: objects which have been processed state.failed: objects which either failed or whose dependencies failed """ def __init__(self, objects): self.objects = objects self.started = set() self.finished = set() self.failed = set() def is_done(self): return len(self.finished) + len(self.failed) >= len(self.objects) def pending(self): return set(self.objects) - self.started - self.finished - self.failed class NoLimit: def __enter__(self): pass def __exit__(self, *ex): pass def parallel_execute_iter(objects, func, get_deps, limit): """ Runs func on objects in parallel while ensuring that func is ran on object only after it is ran on all its dependencies. Returns an iterator of tuples which look like: # if func returned normally when run on object (object, result, None) # if func raised an exception when run on object (object, None, exception) # if func raised an exception when run on one of object's dependencies (object, None, UpstreamError()) """ if get_deps is None: get_deps = _no_deps if limit is None: limiter = NoLimit() else: limiter = Semaphore(limit) results = Queue() state = State(objects) while True: feed_queue(objects, func, get_deps, results, state, limiter) try: event = results.get(timeout=0.1) except Empty: continue # See https://github.com/docker/compose/issues/189 except thread.error: raise ShutdownException() if event is STOP: break obj, _, exception = event if exception is None: log.debug('Finished processing: {}'.format(obj)) state.finished.add(obj) else: log.debug('Failed: {}'.format(obj)) state.failed.add(obj) yield event def producer(obj, func, results, limiter): """ The entry point for a producer thread which runs func on a single object. Places a tuple on the results queue once func has either returned or raised. """ with limiter, GlobalLimit.global_limiter: try: result = func(obj) results.put((obj, result, None)) except Exception as e: results.put((obj, None, e)) def feed_queue(objects, func, get_deps, results, state, limiter): """ Starts producer threads for any objects which are ready to be processed (i.e. they have no dependencies which haven't been successfully processed). Shortcuts any objects whose dependencies have failed and places an (object, None, UpstreamError()) tuple on the results queue. """ pending = state.pending() log.debug('Pending: {}'.format(pending)) for obj in pending: deps = get_deps(obj) try: if any(dep[0] in state.failed for dep in deps): log.debug('{} has upstream errors - not processing'.format(obj)) results.put((obj, None, UpstreamError())) state.failed.add(obj) elif all( dep not in objects or ( dep in state.finished and (not ready_check or ready_check(dep)) ) for dep, ready_check in deps ): log.debug('Starting producer thread for {}'.format(obj)) t = Thread(target=producer, args=(obj, func, results, limiter)) t.daemon = True t.start() state.started.add(obj) except (HealthCheckFailed, NoHealthCheckConfigured) as e: log.debug( 'Healthcheck for service(s) upstream of {} failed - ' 'not processing'.format(obj) ) results.put((obj, None, e)) except CompletedUnsuccessfully as e: log.debug( 'Service(s) upstream of {} did not completed successfully - ' 'not processing'.format(obj) ) results.put((obj, None, e)) if state.is_done(): results.put(STOP) class UpstreamError(Exception): pass class ParallelStreamWriter: """Write out messages for operations happening in parallel. Each operation has its own line, and ANSI code characters are used to jump to the correct line, and write over the line. """ default_ansi_mode = AnsiMode.AUTO write_lock = Lock() instance = None instance_lock = Lock() @classmethod def get_instance(cls): return cls.instance @classmethod def get_or_assign_instance(cls, writer): cls.instance_lock.acquire() try: if cls.instance is None: cls.instance = writer return cls.instance finally: cls.instance_lock.release() @classmethod def set_default_ansi_mode(cls, ansi_mode): cls.default_ansi_mode = ansi_mode def __init__(self, stream, ansi_mode=None): if ansi_mode is None: ansi_mode = self.default_ansi_mode self.stream = stream self.use_ansi_codes = ansi_mode.use_ansi_codes(stream) self.lines = [] self.width = 0 def add_object(self, msg, obj_index): if msg is None: return self.lines.append(msg + obj_index) self.width = max(self.width, len(msg + ' ' + obj_index)) def write_initial(self, msg, obj_index): if msg is None: return return self._write_noansi(msg, obj_index, '') def _write_ansi(self, msg, obj_index, status): self.write_lock.acquire() position = self.lines.index(msg + obj_index) diff = len(self.lines) - position # move up self.stream.write("%c[%dA" % (27, diff)) # erase self.stream.write("%c[2K\r" % 27) self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index, status, width=self.width)) # move back down self.stream.write("%c[%dB" % (27, diff)) self.stream.flush() self.write_lock.release() def _write_noansi(self, msg, obj_index, status): self.stream.write( "{:<{width}} ... {}\r\n".format( msg + ' ' + obj_index, status, width=self.width ) ) self.stream.flush() def write(self, msg, obj_index, status, color_func): if msg is None: return if self.use_ansi_codes: self._write_ansi(msg, obj_index, color_func(status)) else: self._write_noansi(msg, obj_index, status) def parallel_operation(containers, operation, options, message): parallel_execute( containers, operator.methodcaller(operation, **options), operator.attrgetter('name'), message, ) def parallel_remove(containers, options): stopped_containers = [c for c in containers if not c.is_running] parallel_operation(stopped_containers, 'remove', options, 'Removing') def parallel_pause(containers, options): parallel_operation(containers, 'pause', options, 'Pausing') def parallel_unpause(containers, options): parallel_operation(containers, 'unpause', options, 'Unpausing') def parallel_kill(containers, options): parallel_operation(containers, 'kill', options, 'Killing') compose-1.29.2/compose/progress_stream.py000066400000000000000000000067611404620552300205230ustar00rootroot00000000000000from compose import utils class StreamOutputError(Exception): pass def write_to_stream(s, stream): try: stream.write(s) except UnicodeEncodeError: encoding = getattr(stream, 'encoding', 'ascii') stream.write(s.encode(encoding, errors='replace').decode(encoding)) def stream_output(output, stream): is_terminal = hasattr(stream, 'isatty') and stream.isatty() stream = stream lines = {} diff = 0 for event in utils.json_stream(output): yield event is_progress_event = 'progress' in event or 'progressDetail' in event if not is_progress_event: print_output_event(event, stream, is_terminal) stream.flush() continue if not is_terminal: continue # if it's a progress event and we have a terminal, then display the progress bars image_id = event.get('id') if not image_id: continue if image_id not in lines: lines[image_id] = len(lines) write_to_stream("\n", stream) diff = len(lines) - lines[image_id] # move cursor up `diff` rows write_to_stream("%c[%dA" % (27, diff), stream) print_output_event(event, stream, is_terminal) if 'id' in event: # move cursor back down write_to_stream("%c[%dB" % (27, diff), stream) stream.flush() def print_output_event(event, stream, is_terminal): if 'errorDetail' in event: raise StreamOutputError(event['errorDetail']['message']) terminator = '' if is_terminal and 'stream' not in event: # erase current line write_to_stream("%c[2K\r" % 27, stream) terminator = "\r" elif 'progressDetail' in event: return if 'time' in event: write_to_stream("[%s] " % event['time'], stream) if 'id' in event: write_to_stream("%s: " % event['id'], stream) if 'from' in event: write_to_stream("(from %s) " % event['from'], stream) status = event.get('status', '') if 'progress' in event: write_to_stream("{} {}{}".format(status, event['progress'], terminator), stream) elif 'progressDetail' in event: detail = event['progressDetail'] total = detail.get('total') if 'current' in detail and total: percentage = float(detail['current']) / float(total) * 100 write_to_stream('{} ({:.1f}%){}'.format(status, percentage, terminator), stream) else: write_to_stream('{}{}'.format(status, terminator), stream) elif 'stream' in event: write_to_stream("{}{}".format(event['stream'], terminator), stream) else: write_to_stream("{}{}\n".format(status, terminator), stream) def get_digest_from_pull(events): digest = None for event in events: status = event.get('status') if not status or 'Digest' not in status: continue else: digest = status.split(':', 1)[1].strip() return digest def get_digest_from_push(events): for event in events: digest = event.get('aux', {}).get('Digest') if digest: return digest return None def read_status(event): status = event['status'].lower() if 'progressDetail' in event: detail = event['progressDetail'] if 'current' in detail and 'total' in detail: percentage = float(detail['current']) / float(detail['total']) status = '{} ({:.1%})'.format(status, percentage) return status compose-1.29.2/compose/project.py000066400000000000000000001225671404620552300167550ustar00rootroot00000000000000import datetime import enum import logging import operator import re from functools import reduce from os import path from docker.errors import APIError from docker.errors import ImageNotFound from docker.errors import NotFound from docker.utils import version_lt from . import parallel from .cli.errors import UserError from .config import ConfigurationError from .config.config import V1 from .config.sort_services import get_container_name_from_network_mode from .config.sort_services import get_service_name_from_network_mode from .const import LABEL_ONE_OFF from .const import LABEL_PROJECT from .const import LABEL_SERVICE from .container import Container from .network import build_networks from .network import get_networks from .network import ProjectNetworks from .progress_stream import read_status from .service import BuildAction from .service import ContainerIpcMode from .service import ContainerNetworkMode from .service import ContainerPidMode from .service import ConvergenceStrategy from .service import IpcMode from .service import NetworkMode from .service import NoSuchImageError from .service import parse_repository_tag from .service import PidMode from .service import Service from .service import ServiceIpcMode from .service import ServiceNetworkMode from .service import ServicePidMode from .utils import filter_attached_for_up from .utils import microseconds_from_time_nano from .utils import truncate_string from .volume import ProjectVolumes log = logging.getLogger(__name__) @enum.unique class OneOffFilter(enum.Enum): include = 0 exclude = 1 only = 2 @classmethod def update_labels(cls, value, labels): if value == cls.only: labels.append('{}={}'.format(LABEL_ONE_OFF, "True")) elif value == cls.exclude: labels.append('{}={}'.format(LABEL_ONE_OFF, "False")) elif value == cls.include: pass else: raise ValueError("Invalid value for one_off: {}".format(repr(value))) class Project: """ A collection of services. """ def __init__(self, name, services, client, networks=None, volumes=None, config_version=None, enabled_profiles=None): self.name = name self.services = services self.client = client self.volumes = volumes or ProjectVolumes({}) self.networks = networks or ProjectNetworks({}, False) self.config_version = config_version self.enabled_profiles = enabled_profiles or [] def labels(self, one_off=OneOffFilter.exclude, legacy=False): name = self.name if legacy: name = re.sub(r'[_-]', '', name) labels = ['{}={}'.format(LABEL_PROJECT, name)] OneOffFilter.update_labels(one_off, labels) return labels @classmethod def from_config(cls, name, config_data, client, default_platform=None, extra_labels=None, enabled_profiles=None): """ Construct a Project from a config.Config object. """ extra_labels = extra_labels or [] use_networking = (config_data.version and config_data.version != V1) networks = build_networks(name, config_data, client) project_networks = ProjectNetworks.from_services( config_data.services, networks, use_networking) volumes = ProjectVolumes.from_config(name, config_data, client) project = cls(name, [], client, project_networks, volumes, config_data.version, enabled_profiles) for service_dict in config_data.services: service_dict = dict(service_dict) if use_networking: service_networks = get_networks(service_dict, networks) else: service_networks = {} service_dict.pop('networks', None) links = project.get_links(service_dict) ipc_mode = project.get_ipc_mode(service_dict) network_mode = project.get_network_mode( service_dict, list(service_networks.keys()) ) pid_mode = project.get_pid_mode(service_dict) volumes_from = get_volumes_from(project, service_dict) if config_data.version != V1: service_dict['volumes'] = [ volumes.namespace_spec(volume_spec) for volume_spec in service_dict.get('volumes', []) ] secrets = get_secrets( service_dict['name'], service_dict.pop('secrets', None) or [], config_data.secrets) service_dict['scale'] = project.get_service_scale(service_dict) service_dict['device_requests'] = project.get_device_requests(service_dict) service_dict = translate_credential_spec_to_security_opt(service_dict) service_dict, ignored_keys = translate_deploy_keys_to_container_config( service_dict ) if ignored_keys: log.warning( 'The following deploy sub-keys are not supported and have' ' been ignored: {}'.format(', '.join(ignored_keys)) ) project.services.append( Service( service_dict.pop('name'), client=client, project=name, use_networking=use_networking, networks=service_networks, links=links, network_mode=network_mode, volumes_from=volumes_from, secrets=secrets, pid_mode=pid_mode, ipc_mode=ipc_mode, platform=service_dict.pop('platform', None), default_platform=default_platform, extra_labels=extra_labels, **service_dict) ) return project @property def service_names(self): return [service.name for service in self.services] def get_service(self, name): """ Retrieve a service by name. Raises NoSuchService if the named service does not exist. """ for service in self.services: if service.name == name: return service raise NoSuchService(name) def validate_service_names(self, service_names): """ Validate that the given list of service names only contains valid services. Raises NoSuchService if one of the names is invalid. """ valid_names = self.service_names for name in service_names: if name not in valid_names: raise NoSuchService(name) def get_services(self, service_names=None, include_deps=False, auto_enable_profiles=True): """ Returns a list of this project's services filtered by the provided list of names, or all services if service_names is None or []. If include_deps is specified, returns a list including the dependencies for service_names, in order of dependency. Preserves the original order of self.services where possible, reordering as needed to resolve dependencies. Raises NoSuchService if any of the named services do not exist. Raises ConfigurationError if any service depended on is not enabled by active profiles """ # create a copy so we can *locally* add auto-enabled profiles later enabled_profiles = self.enabled_profiles.copy() if service_names is None or len(service_names) == 0: auto_enable_profiles = False service_names = [ service.name for service in self.services if service.enabled_for_profiles(enabled_profiles) ] unsorted = [self.get_service(name) for name in service_names] services = [s for s in self.services if s in unsorted] if auto_enable_profiles: # enable profiles of explicitly targeted services for service in services: for profile in service.get_profiles(): if profile not in enabled_profiles: enabled_profiles.append(profile) if include_deps: services = reduce( lambda acc, s: self._inject_deps(acc, s, enabled_profiles), services, [] ) uniques = [] [uniques.append(s) for s in services if s not in uniques] return uniques def get_services_without_duplicate(self, service_names=None, include_deps=False): services = self.get_services(service_names, include_deps) for service in services: service.remove_duplicate_containers() return services def get_links(self, service_dict): links = [] if 'links' in service_dict: for link in service_dict.get('links', []): if ':' in link: service_name, link_name = link.split(':', 1) else: service_name, link_name = link, None try: links.append((self.get_service(service_name), link_name)) except NoSuchService: raise ConfigurationError( 'Service "%s" has a link to service "%s" which does not ' 'exist.' % (service_dict['name'], service_name)) del service_dict['links'] return links def get_network_mode(self, service_dict, networks): network_mode = service_dict.pop('network_mode', None) if not network_mode: if self.networks.use_networking: return NetworkMode(networks[0]) if networks else NetworkMode('none') return NetworkMode(None) service_name = get_service_name_from_network_mode(network_mode) if service_name: return ServiceNetworkMode(self.get_service(service_name)) container_name = get_container_name_from_network_mode(network_mode) if container_name: try: return ContainerNetworkMode(Container.from_id(self.client, container_name)) except APIError: raise ConfigurationError( "Service '{name}' uses the network stack of container '{dep}' which " "does not exist.".format(name=service_dict['name'], dep=container_name)) return NetworkMode(network_mode) def get_pid_mode(self, service_dict): pid_mode = service_dict.pop('pid', None) if not pid_mode: return PidMode(None) service_name = get_service_name_from_network_mode(pid_mode) if service_name: return ServicePidMode(self.get_service(service_name)) container_name = get_container_name_from_network_mode(pid_mode) if container_name: try: return ContainerPidMode(Container.from_id(self.client, container_name)) except APIError: raise ConfigurationError( "Service '{name}' uses the PID namespace of container '{dep}' which " "does not exist.".format(name=service_dict['name'], dep=container_name) ) return PidMode(pid_mode) def get_ipc_mode(self, service_dict): ipc_mode = service_dict.pop('ipc', None) if not ipc_mode: return IpcMode(None) service_name = get_service_name_from_network_mode(ipc_mode) if service_name: return ServiceIpcMode(self.get_service(service_name)) container_name = get_container_name_from_network_mode(ipc_mode) if container_name: try: return ContainerIpcMode(Container.from_id(self.client, container_name)) except APIError: raise ConfigurationError( "Service '{name}' uses the IPC namespace of container '{dep}' which " "does not exist.".format(name=service_dict['name'], dep=container_name) ) return IpcMode(ipc_mode) def get_service_scale(self, service_dict): # service.scale for v2 and deploy.replicas for v3 scale = service_dict.get('scale', None) deploy_dict = service_dict.get('deploy', None) if not deploy_dict: return 1 if scale is None else scale if deploy_dict.get('mode', 'replicated') != 'replicated': return 1 if scale is None else scale replicas = deploy_dict.get('replicas', None) if scale is not None and replicas is not None: raise ConfigurationError( "Both service.scale and service.deploy.replicas are set." " Only one of them must be set." ) if replicas is not None: scale = replicas if scale is None: return 1 # deploy may contain placement constraints introduced in v3.8 max_replicas = deploy_dict.get('placement', {}).get( 'max_replicas_per_node', scale) scale = min(scale, max_replicas) if max_replicas < scale: log.warning("Scale is limited to {} ('max_replicas_per_node' field).".format( max_replicas)) return scale def get_device_requests(self, service_dict): deploy_dict = service_dict.get('deploy', None) if not deploy_dict: return resources = deploy_dict.get('resources', None) if not resources or not resources.get('reservations', None): return devices = resources['reservations'].get('devices') if not devices: return for dev in devices: count = dev.get("count", -1) if not isinstance(count, int): if count != "all": raise ConfigurationError( 'Invalid value "{}" for devices count'.format(dev["count"]), '(expected integer or "all")') dev["count"] = -1 if 'capabilities' in dev: dev['capabilities'] = [dev['capabilities']] return devices def start(self, service_names=None, **options): containers = [] def start_service(service): service_containers = service.start(quiet=True, **options) containers.extend(service_containers) services = self.get_services(service_names) def get_deps(service): return { (self.get_service(dep), config) for dep, config in service.get_dependency_configs().items() } parallel.parallel_execute( services, start_service, operator.attrgetter('name'), 'Starting', get_deps, fail_check=lambda obj: not obj.containers(), ) return containers def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options): containers = self.containers(service_names, one_off=one_off) def get_deps(container): # actually returning inversed dependencies return {(other, None) for other in containers if container.service in self.get_service(other.service).get_dependency_names()} parallel.parallel_execute( containers, self.build_container_operation_with_timeout_func('stop', options), operator.attrgetter('name'), 'Stopping', get_deps, ) def pause(self, service_names=None, **options): containers = self.containers(service_names) parallel.parallel_pause(reversed(containers), options) return containers def unpause(self, service_names=None, **options): containers = self.containers(service_names) parallel.parallel_unpause(containers, options) return containers def kill(self, service_names=None, **options): parallel.parallel_kill(self.containers(service_names), options) def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options): parallel.parallel_remove(self.containers( service_names, stopped=True, one_off=one_off ), options) def down( self, remove_image_type, include_volumes, remove_orphans=False, timeout=None, ignore_orphans=False): self.stop(one_off=OneOffFilter.include, timeout=timeout) if not ignore_orphans: self.find_orphan_containers(remove_orphans) self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include) self.networks.remove() if include_volumes: self.volumes.remove() self.remove_images(remove_image_type) def remove_images(self, remove_image_type): for service in self.services: service.remove_image(remove_image_type) def restart(self, service_names=None, **options): # filter service_names by enabled profiles service_names = [s.name for s in self.get_services(service_names)] containers = self.containers(service_names, stopped=True) parallel.parallel_execute( containers, self.build_container_operation_with_timeout_func('restart', options), operator.attrgetter('name'), 'Restarting', ) return containers def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, memory=None, build_args=None, gzip=False, parallel_build=False, rm=True, silent=False, cli=False, progress=None): services = [] for service in self.get_services(service_names): if service.can_be_built(): services.append(service) elif not silent: log.info('%s uses an image, skipping' % service.name) if cli: if parallel_build: log.warning("Flag '--parallel' is ignored when building with " "COMPOSE_DOCKER_CLI_BUILD=1") if gzip: log.warning("Flag '--compress' is ignored when building with " "COMPOSE_DOCKER_CLI_BUILD=1") def build_service(service): service.build(no_cache, pull, force_rm, memory, build_args, gzip, rm, silent, cli, progress) if parallel_build: _, errors = parallel.parallel_execute( services, build_service, operator.attrgetter('name'), 'Building', limit=5, ) if len(errors): combined_errors = '\n'.join([ e.decode('utf-8') if isinstance(e, bytes) else e for e in errors.values() ]) raise ProjectError(combined_errors) else: for service in services: build_service(service) def create( self, service_names=None, strategy=ConvergenceStrategy.changed, do_build=BuildAction.none, ): services = self.get_services_without_duplicate(service_names, include_deps=True) for svc in services: svc.ensure_image_exists(do_build=do_build) plans = self._get_convergence_plans(services, strategy) for service in services: service.execute_convergence_plan( plans[service.name], detached=True, start=False) def _legacy_event_processor(self, service_names): # Only for v1 files or when Compose is forced to use an older API version def build_container_event(event, container): time = datetime.datetime.fromtimestamp(event['time']) time = time.replace( microsecond=microseconds_from_time_nano(event['timeNano']) ) return { 'time': time, 'type': 'container', 'action': event['status'], 'id': container.id, 'service': container.service, 'attributes': { 'name': container.name, 'image': event['from'], }, 'container': container, } service_names = set(service_names or self.service_names) for event in self.client.events( filters={'label': self.labels()}, decode=True ): # This is a guard against some events broadcasted by swarm that # don't have a status field. # See https://github.com/docker/compose/issues/3316 if 'status' not in event: continue try: # this can fail if the container has been removed or if the event # refers to an image container = Container.from_id(self.client, event['id']) except APIError: continue if container.service not in service_names: continue yield build_container_event(event, container) def events(self, service_names=None): if version_lt(self.client.api_version, '1.22'): # New, better event API was introduced in 1.22. return self._legacy_event_processor(service_names) def build_container_event(event): container_attrs = event['Actor']['Attributes'] time = datetime.datetime.fromtimestamp(event['time']) time = time.replace( microsecond=microseconds_from_time_nano(event['timeNano']) ) container = None try: container = Container.from_id(self.client, event['id']) except APIError: # Container may have been removed (e.g. if this is a destroy event) pass return { 'time': time, 'type': 'container', 'action': event['status'], 'id': event['Actor']['ID'], 'service': container_attrs.get(LABEL_SERVICE), 'attributes': { k: v for k, v in container_attrs.items() if not k.startswith('com.docker.compose.') }, 'container': container, } def yield_loop(service_names): for event in self.client.events( filters={'label': self.labels()}, decode=True ): # TODO: support other event types if event.get('Type') != 'container': continue try: if event['Actor']['Attributes'][LABEL_SERVICE] not in service_names: continue except KeyError: continue yield build_container_event(event) return yield_loop(set(service_names) if service_names else self.service_names) def up(self, service_names=None, start_deps=True, strategy=ConvergenceStrategy.changed, do_build=BuildAction.none, timeout=None, detached=False, remove_orphans=False, ignore_orphans=False, scale_override=None, rescale=True, start=True, always_recreate_deps=False, reset_container_image=False, renew_anonymous_volumes=False, silent=False, cli=False, one_off=False, attach_dependencies=False, override_options=None, ): self.initialize() if not ignore_orphans: self.find_orphan_containers(remove_orphans) if scale_override is None: scale_override = {} services = self.get_services_without_duplicate( service_names, include_deps=start_deps) for svc in services: svc.ensure_image_exists(do_build=do_build, silent=silent, cli=cli) plans = self._get_convergence_plans( services, strategy, always_recreate_deps=always_recreate_deps, one_off=service_names if one_off else [], ) services_to_attach = filter_attached_for_up( services, service_names, attach_dependencies, lambda service: service.name) def do(service): return service.execute_convergence_plan( plans[service.name], timeout=timeout, detached=detached or (service not in services_to_attach), scale_override=scale_override.get(service.name), rescale=rescale, start=start, reset_container_image=reset_container_image, renew_anonymous_volumes=renew_anonymous_volumes, override_options=override_options, ) def get_deps(service): return { (self.get_service(dep), config) for dep, config in service.get_dependency_configs().items() } results, errors = parallel.parallel_execute( services, do, operator.attrgetter('name'), None, get_deps, ) if errors: raise ProjectError( 'Encountered errors while bringing up the project.' ) return [ container for svc_containers in results if svc_containers is not None for container in svc_containers ] def initialize(self): self.networks.initialize() self.volumes.initialize() def _get_convergence_plans(self, services, strategy, always_recreate_deps=False, one_off=None): plans = {} for service in services: updated_dependencies = [ name for name in service.get_dependency_names() if name in plans and plans[name].action in ('recreate', 'create') ] is_one_off = one_off and service.name in one_off if updated_dependencies and strategy.allows_recreate: log.debug('%s has upstream changes (%s)', service.name, ", ".join(updated_dependencies)) containers_stopped = any( service.containers(stopped=True, filters={'status': ['created', 'exited']})) service_has_links = any(service.get_link_names()) container_has_links = any(c.get('HostConfig.Links') for c in service.containers()) should_recreate_for_links = service_has_links ^ container_has_links if always_recreate_deps or containers_stopped or should_recreate_for_links: plan = service.convergence_plan(ConvergenceStrategy.always, is_one_off) else: plan = service.convergence_plan(strategy, is_one_off) else: plan = service.convergence_plan(strategy, is_one_off) plans[service.name] = plan return plans def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=True, silent=False, include_deps=False): services = self.get_services(service_names, include_deps) if parallel_pull: self.parallel_pull(services, silent=silent) else: must_build = [] for service in services: try: service.pull(ignore_pull_failures, silent=silent) except (ImageNotFound, NotFound): if service.can_be_built(): must_build.append(service.name) else: raise if len(must_build): log.warning('Some service image(s) must be built from source by running:\n' ' docker-compose build {}' .format(' '.join(must_build))) def parallel_pull(self, services, ignore_pull_failures=False, silent=False): msg = 'Pulling' if not silent else None must_build = [] def pull_service(service): strm = service.pull(ignore_pull_failures, True, stream=True) if strm is None: # Attempting to pull service with no `image` key is a no-op return try: writer = parallel.ParallelStreamWriter.get_instance() if writer is None: raise RuntimeError('ParallelStreamWriter has not yet been instantiated') for event in strm: if 'status' not in event: continue status = read_status(event) writer.write( msg, service.name, truncate_string(status), lambda s: s ) except (ImageNotFound, NotFound): if service.can_be_built(): must_build.append(service.name) else: raise _, errors = parallel.parallel_execute( services, pull_service, operator.attrgetter('name'), msg, limit=5, ) if len(must_build): log.warning('Some service image(s) must be built from source by running:\n' ' docker-compose build {}' .format(' '.join(must_build))) if len(errors): combined_errors = '\n'.join([ e.decode('utf-8') if isinstance(e, bytes) else e for e in errors.values() ]) raise ProjectError(combined_errors) def push(self, service_names=None, ignore_push_failures=False): unique_images = set() for service in self.get_services(service_names, include_deps=False): # Considering and as the same repo, tag, sep = parse_repository_tag(service.image_name) service_image_name = sep.join((repo, tag)) if tag else sep.join((repo, 'latest')) if service_image_name not in unique_images: service.push(ignore_push_failures) unique_images.add(service_image_name) def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude): ctnrs = list(filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, filters={'label': self.labels(one_off=one_off)})]) ) if ctnrs: return ctnrs return list(filter(lambda c: c.has_legacy_proj_name(self.name), filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, filters={'label': self.labels(one_off=one_off, legacy=True)})]) )) def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude): if service_names: self.validate_service_names(service_names) else: service_names = self.service_names containers = self._labeled_containers(stopped, one_off) def matches_service_names(container): return container.labels.get(LABEL_SERVICE) in service_names return [c for c in containers if matches_service_names(c)] def find_orphan_containers(self, remove_orphans): def _find(): containers = set(self._labeled_containers() + self._labeled_containers(stopped=True)) for ctnr in containers: service_name = ctnr.labels.get(LABEL_SERVICE) if service_name not in self.service_names: yield ctnr orphans = list(_find()) if not orphans: return if remove_orphans: for ctnr in orphans: log.info('Removing orphan container "{}"'.format(ctnr.name)) try: ctnr.kill() except APIError: pass ctnr.remove(force=True) else: log.warning( 'Found orphan containers ({}) for this project. If ' 'you removed or renamed this service in your compose ' 'file, you can run this command with the ' '--remove-orphans flag to clean it up.'.format( ', '.join(["{}".format(ctnr.name) for ctnr in orphans]) ) ) def _inject_deps(self, acc, service, enabled_profiles): dep_names = service.get_dependency_names() if len(dep_names) > 0: dep_services = self.get_services( service_names=list(set(dep_names)), include_deps=True, auto_enable_profiles=False ) for dep in dep_services: if not dep.enabled_for_profiles(enabled_profiles): raise ConfigurationError( 'Service "{dep_name}" was pulled in as a dependency of ' 'service "{service_name}" but is not enabled by the ' 'active profiles. ' 'You may fix this by adding a common profile to ' '"{dep_name}" and "{service_name}".' .format(dep_name=dep.name, service_name=service.name) ) else: dep_services = [] dep_services.append(service) return acc + dep_services def build_container_operation_with_timeout_func(self, operation, options): def container_operation_with_timeout(container): _options = options.copy() if _options.get('timeout') is None: service = self.get_service(container.service) _options['timeout'] = service.stop_timeout(None) return getattr(container, operation)(**_options) return container_operation_with_timeout def translate_credential_spec_to_security_opt(service_dict): result = [] if 'credential_spec' in service_dict: spec = convert_credential_spec_to_security_opt(service_dict['credential_spec']) result.append('credentialspec={spec}'.format(spec=spec)) if result: service_dict['security_opt'] = result return service_dict def translate_resource_keys_to_container_config(resources_dict, service_dict): if 'limits' in resources_dict: service_dict['mem_limit'] = resources_dict['limits'].get('memory') if 'cpus' in resources_dict['limits']: service_dict['cpus'] = float(resources_dict['limits']['cpus']) if 'reservations' in resources_dict: service_dict['mem_reservation'] = resources_dict['reservations'].get('memory') if 'cpus' in resources_dict['reservations']: return ['resources.reservations.cpus'] return [] def convert_restart_policy(name): try: return { 'any': 'always', 'none': 'no', 'on-failure': 'on-failure' }[name] except KeyError: raise ConfigurationError('Invalid restart policy "{}"'.format(name)) def convert_credential_spec_to_security_opt(credential_spec): if 'file' in credential_spec: return 'file://{file}'.format(file=credential_spec['file']) return 'registry://{registry}'.format(registry=credential_spec['registry']) def translate_deploy_keys_to_container_config(service_dict): if 'credential_spec' in service_dict: del service_dict['credential_spec'] if 'configs' in service_dict: del service_dict['configs'] if 'deploy' not in service_dict: return service_dict, [] deploy_dict = service_dict['deploy'] ignored_keys = [ k for k in ['endpoint_mode', 'labels', 'update_config', 'rollback_config'] if k in deploy_dict ] if 'restart_policy' in deploy_dict: service_dict['restart'] = { 'Name': convert_restart_policy(deploy_dict['restart_policy'].get('condition', 'any')), 'MaximumRetryCount': deploy_dict['restart_policy'].get('max_attempts', 0) } for k in deploy_dict['restart_policy'].keys(): if k != 'condition' and k != 'max_attempts': ignored_keys.append('restart_policy.{}'.format(k)) ignored_keys.extend( translate_resource_keys_to_container_config( deploy_dict.get('resources', {}), service_dict ) ) del service_dict['deploy'] return service_dict, ignored_keys def get_volumes_from(project, service_dict): volumes_from = service_dict.pop('volumes_from', None) if not volumes_from: return [] def build_volume_from(spec): if spec.type == 'service': try: return spec._replace(source=project.get_service(spec.source)) except NoSuchService: pass if spec.type == 'container': try: container = Container.from_id(project.client, spec.source) return spec._replace(source=container) except APIError: pass raise ConfigurationError( "Service \"{}\" mounts volumes from \"{}\", which is not the name " "of a service or container.".format( service_dict['name'], spec.source)) return [build_volume_from(vf) for vf in volumes_from] def get_secrets(service, service_secrets, secret_defs): secrets = [] for secret in service_secrets: secret_def = secret_defs.get(secret.source) if not secret_def: raise ConfigurationError( "Service \"{service}\" uses an undefined secret \"{secret}\" " .format(service=service, secret=secret.source)) if secret_def.get('external'): log.warning('Service "{service}" uses secret "{secret}" which is external. ' 'External secrets are not available to containers created by ' 'docker-compose.'.format(service=service, secret=secret.source)) continue if secret.uid or secret.gid or secret.mode: log.warning( 'Service "{service}" uses secret "{secret}" with uid, ' 'gid, or mode. These fields are not supported by this ' 'implementation of the Compose file'.format( service=service, secret=secret.source ) ) secret_file = secret_def.get('file') if not path.isfile(str(secret_file)): log.warning( 'Service "{service}" uses an undefined secret file "{secret_file}", ' 'the following file should be created "{secret_file}"'.format( service=service, secret_file=secret_file ) ) secrets.append({'secret': secret, 'file': secret_file}) return secrets def get_image_digests(project): digests = {} needs_push = set() needs_pull = set() for service in project.services: try: digests[service.name] = get_image_digest(service) except NeedsPush as e: needs_push.add(e.image_name) except NeedsPull as e: needs_pull.add(e.service_name) if needs_push or needs_pull: raise MissingDigests(needs_push, needs_pull) return digests def get_image_digest(service): if 'image' not in service.options: raise UserError( "Service '{s.name}' doesn't define an image tag. An image name is " "required to generate a proper image digest. Specify an image repo " "and tag with the 'image' option.".format(s=service)) _, _, separator = parse_repository_tag(service.options['image']) # Compose file already uses a digest, no lookup required if separator == '@': return service.options['image'] digest = get_digest(service) if digest: return digest if 'build' not in service.options: raise NeedsPull(service.image_name, service.name) raise NeedsPush(service.image_name) def get_digest(service): digest = None try: image = service.image() # TODO: pick a digest based on the image tag if there are multiple # digests if image['RepoDigests']: digest = image['RepoDigests'][0] except NoSuchImageError: try: # Fetch the image digest from the registry distribution = service.get_image_registry_data() if distribution['Descriptor']['digest']: digest = '{image_name}@{digest}'.format( image_name=service.image_name, digest=distribution['Descriptor']['digest'] ) except NoSuchImageError: raise UserError( "Digest not found for service '{service}'. " "Repository does not exist or may require 'docker login'" .format(service=service.name)) return digest class MissingDigests(Exception): def __init__(self, needs_push, needs_pull): self.needs_push = needs_push self.needs_pull = needs_pull class NeedsPush(Exception): def __init__(self, image_name): self.image_name = image_name class NeedsPull(Exception): def __init__(self, image_name, service_name): self.image_name = image_name self.service_name = service_name class NoSuchService(Exception): def __init__(self, name): if isinstance(name, bytes): name = name.decode('utf-8') self.name = name self.msg = "No such service: %s" % self.name def __str__(self): return self.msg class ProjectError(Exception): def __init__(self, msg): self.msg = msg compose-1.29.2/compose/service.py000066400000000000000000002076371404620552300167510ustar00rootroot00000000000000import enum import itertools import logging import os import re import subprocess import sys import tempfile from collections import namedtuple from collections import OrderedDict from operator import attrgetter from docker.errors import APIError from docker.errors import ImageNotFound from docker.errors import NotFound from docker.types import LogConfig from docker.types import Mount from docker.utils import version_gte from docker.utils import version_lt from docker.utils.ports import build_port_bindings from docker.utils.ports import split_port from docker.utils.utils import convert_tmpfs_mounts from . import __version__ from . import const from . import progress_stream from .config import DOCKER_CONFIG_KEYS from .config import is_url from .config import merge_environment from .config import merge_labels from .config.errors import DependencyError from .config.types import MountSpec from .config.types import ServicePort from .config.types import VolumeSpec from .const import DEFAULT_TIMEOUT from .const import IS_WINDOWS_PLATFORM from .const import LABEL_CONFIG_HASH from .const import LABEL_CONTAINER_NUMBER from .const import LABEL_ONE_OFF from .const import LABEL_PROJECT from .const import LABEL_SERVICE from .const import LABEL_SLUG from .const import LABEL_VERSION from .const import NANOCPUS_SCALE from .const import WINDOWS_LONGPATH_PREFIX from .container import Container from .errors import CompletedUnsuccessfully from .errors import HealthCheckFailed from .errors import NoHealthCheckConfigured from .errors import OperationFailedError from .parallel import parallel_execute from .progress_stream import stream_output from .progress_stream import StreamOutputError from .utils import generate_random_id from .utils import json_hash from .utils import parse_bytes from .utils import parse_seconds_float from .utils import truncate_id from .utils import unique_everseen from compose.cli.utils import binarystr_to_unicode log = logging.getLogger(__name__) HOST_CONFIG_KEYS = [ 'cap_add', 'cap_drop', 'cgroup_parent', 'cpu_count', 'cpu_percent', 'cpu_period', 'cpu_quota', 'cpu_rt_period', 'cpu_rt_runtime', 'cpu_shares', 'cpus', 'cpuset', 'device_cgroup_rules', 'devices', 'device_requests', 'dns', 'dns_search', 'dns_opt', 'env_file', 'extra_hosts', 'group_add', 'init', 'ipc', 'isolation', 'read_only', 'log_driver', 'log_opt', 'mem_limit', 'mem_reservation', 'memswap_limit', 'mem_swappiness', 'oom_kill_disable', 'oom_score_adj', 'pid', 'pids_limit', 'privileged', 'restart', 'runtime', 'security_opt', 'shm_size', 'storage_opt', 'sysctls', 'userns_mode', 'volumes_from', 'volume_driver', ] CONDITION_STARTED = 'service_started' CONDITION_HEALTHY = 'service_healthy' CONDITION_COMPLETED_SUCCESSFULLY = 'service_completed_successfully' class BuildError(Exception): def __init__(self, service, reason): self.service = service self.reason = reason class NeedsBuildError(Exception): def __init__(self, service): self.service = service class NoSuchImageError(Exception): pass ServiceName = namedtuple('ServiceName', 'project service number') ConvergencePlan = namedtuple('ConvergencePlan', 'action containers') @enum.unique class ConvergenceStrategy(enum.Enum): """Enumeration for all possible convergence strategies. Values refer to when containers should be recreated. """ changed = 1 always = 2 never = 3 @property def allows_recreate(self): return self is not type(self).never @enum.unique class ImageType(enum.Enum): """Enumeration for the types of images known to compose.""" none = 0 local = 1 all = 2 @enum.unique class BuildAction(enum.Enum): """Enumeration for the possible build actions.""" none = 0 force = 1 skip = 2 class Service: def __init__( self, name, client=None, project='default', use_networking=False, links=None, volumes_from=None, network_mode=None, networks=None, secrets=None, scale=1, ipc_mode=None, pid_mode=None, default_platform=None, extra_labels=None, **options ): self.name = name self.client = client self.project = project self.use_networking = use_networking self.links = links or [] self.volumes_from = volumes_from or [] self.ipc_mode = ipc_mode or IpcMode(None) self.network_mode = network_mode or NetworkMode(None) self.pid_mode = pid_mode or PidMode(None) self.networks = networks or {} self.secrets = secrets or [] self.scale_num = scale self.default_platform = default_platform self.options = options self.extra_labels = extra_labels or [] def __repr__(self): return ''.format(self.name) def containers(self, stopped=False, one_off=False, filters=None, labels=None): if filters is None: filters = {} filters.update({'label': self.labels(one_off=one_off) + (labels or [])}) result = list(filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, filters=filters)]) ) if result: return result filters.update({'label': self.labels(one_off=one_off, legacy=True) + (labels or [])}) return list( filter( lambda c: c.has_legacy_proj_name(self.project), filter(None, [ Container.from_ps(self.client, container) for container in self.client.containers( all=stopped, filters=filters)]) ) ) def get_container(self, number=1): """Return a :class:`compose.container.Container` for this service. The container must be active, and match `number`. """ for container in self.containers(labels=['{}={}'.format(LABEL_CONTAINER_NUMBER, number)]): return container raise ValueError("No container found for {}_{}".format(self.name, number)) def start(self, **options): containers = self.containers(stopped=True) for c in containers: self.start_container_if_stopped(c, **options) return containers def show_scale_warnings(self, desired_num): if self.custom_container_name and desired_num > 1: log.warning('The "%s" service is using the custom container name "%s". ' 'Docker requires each container to have a unique name. ' 'Remove the custom name to scale the service.' % (self.name, self.custom_container_name)) if self.specifies_host_port() and desired_num > 1: log.warning('The "%s" service specifies a port on the host. If multiple containers ' 'for this service are created on a single host, the port will clash.' % self.name) def scale(self, desired_num, timeout=None): """ Adjusts the number of containers to the specified number and ensures they are running. - creates containers until there are at least `desired_num` - stops containers until there are at most `desired_num` running - starts containers until there are at least `desired_num` running - removes all stopped containers """ self.show_scale_warnings(desired_num) running_containers = self.containers(stopped=False) num_running = len(running_containers) for c in running_containers: if not c.has_legacy_proj_name(self.project): continue log.info('Recreating container with legacy name %s' % c.name) self.recreate_container(c, timeout, start_new_container=False) if desired_num == num_running: # do nothing as we already have the desired number log.info('Desired container number already achieved') return if desired_num > num_running: all_containers = self.containers(stopped=True) if num_running != len(all_containers): # we have some stopped containers, check for divergences stopped_containers = [ c for c in all_containers if not c.is_running ] # Remove containers that have diverged divergent_containers = [ c for c in stopped_containers if self._containers_have_diverged([c]) ] for c in divergent_containers: c.remove() all_containers = list(set(all_containers) - set(divergent_containers)) sorted_containers = sorted(all_containers, key=attrgetter('number')) self._execute_convergence_start( sorted_containers, desired_num, timeout, True, True ) if desired_num < num_running: num_to_stop = num_running - desired_num sorted_running_containers = sorted( running_containers, key=attrgetter('number')) self._downscale(sorted_running_containers[-num_to_stop:], timeout) def create_container(self, one_off=False, previous_container=None, number=None, quiet=False, **override_options): """ Create a container for this service. If the image doesn't exist, attempt to pull it. """ # This is only necessary for `scale` and `volumes_from` # auto-creating containers to satisfy the dependency. self.ensure_image_exists() container_options = self._get_container_create_options( override_options, number or self._next_container_number(one_off=one_off), one_off=one_off, previous_container=previous_container, ) if 'name' in container_options and not quiet: log.info("Creating %s" % container_options['name']) try: return Container.create(self.client, **container_options) except APIError as ex: raise OperationFailedError("Cannot create container for service %s: %s" % (self.name, binarystr_to_unicode(ex.explanation))) def ensure_image_exists(self, do_build=BuildAction.none, silent=False, cli=False): if self.can_be_built() and do_build == BuildAction.force: self.build(cli=cli) return try: self.image() return except NoSuchImageError: pass if not self.can_be_built(): self.pull(silent=silent) return if do_build == BuildAction.skip: raise NeedsBuildError(self) self.build(cli=cli) log.warning( "Image for service {} was built because it did not already exist. To " "rebuild this image you must use `docker-compose build` or " "`docker-compose up --build`.".format(self.name)) def get_image_registry_data(self): try: return self.client.inspect_distribution(self.image_name) except APIError: raise NoSuchImageError("Image '{}' not found".format(self.image_name)) def image(self): try: return self.client.inspect_image(self.image_name) except ImageNotFound: raise NoSuchImageError("Image '{}' not found".format(self.image_name)) @property def image_name(self): return self.options.get('image', '{project}_{s.name}'.format( s=self, project=self.project.lstrip('_-') )) @property def platform(self): platform = self.options.get('platform') if not platform and version_gte(self.client.api_version, '1.35'): platform = self.default_platform return platform def convergence_plan(self, strategy=ConvergenceStrategy.changed, one_off=False): containers = self.containers(stopped=True) if one_off: return ConvergencePlan('one_off', []) if not containers: return ConvergencePlan('create', []) if strategy is ConvergenceStrategy.never: return ConvergencePlan('start', containers) if ( strategy is ConvergenceStrategy.always or self._containers_have_diverged(containers) ): return ConvergencePlan('recreate', containers) stopped = [c for c in containers if not c.is_running] if stopped: return ConvergencePlan('start', containers) return ConvergencePlan('noop', containers) def _containers_have_diverged(self, containers): config_hash = None try: config_hash = self.config_hash except NoSuchImageError as e: log.debug( 'Service %s has diverged: %s', self.name, str(e), ) return True has_diverged = False for c in containers: if c.has_legacy_proj_name(self.project): log.debug('%s has diverged: Legacy project name' % c.name) has_diverged = True continue container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None) if container_config_hash != config_hash: log.debug( '%s has diverged: %s != %s', c.name, container_config_hash, config_hash, ) has_diverged = True return has_diverged def _execute_convergence_create(self, scale, detached, start, one_off=False, override_options=None): i = self._next_container_number() def create_and_start(service, n): if one_off: container = service.create_container(one_off=True, quiet=True, **override_options) else: container = service.create_container(number=n, quiet=True) if not detached: container.attach_log_stream() if start and not one_off: self.start_container(container) return container def get_name(service_name): if one_off: return "_".join([ service_name.project, service_name.service, "run", ]) return self.get_container_name(service_name.service, service_name.number) containers, errors = parallel_execute( [ ServiceName(self.project, self.name, index) for index in range(i, i + scale) ], lambda service_name: create_and_start(self, service_name.number), get_name, "Creating" ) for error in errors.values(): raise OperationFailedError(error) return containers def _execute_convergence_recreate(self, containers, scale, timeout, detached, start, renew_anonymous_volumes): if scale is not None and len(containers) > scale: self._downscale(containers[scale:], timeout) containers = containers[:scale] def recreate(container): return self.recreate_container( container, timeout=timeout, attach_logs=not detached, start_new_container=start, renew_anonymous_volumes=renew_anonymous_volumes ) containers, errors = parallel_execute( containers, recreate, lambda c: c.name, "Recreating", ) for error in errors.values(): raise OperationFailedError(error) if scale is not None and len(containers) < scale: containers.extend(self._execute_convergence_create( scale - len(containers), detached, start )) return containers def _execute_convergence_start(self, containers, scale, timeout, detached, start): if scale is not None and len(containers) > scale: self._downscale(containers[scale:], timeout) containers = containers[:scale] if start: stopped = [c for c in containers if not c.is_running] _, errors = parallel_execute( stopped, lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True), lambda c: c.name, "Starting", ) for error in errors.values(): raise OperationFailedError(error) if scale is not None and len(containers) < scale: containers.extend(self._execute_convergence_create( scale - len(containers), detached, start )) return containers def _downscale(self, containers, timeout=None): def stop_and_remove(container): container.stop(timeout=self.stop_timeout(timeout)) container.remove() parallel_execute( containers, stop_and_remove, lambda c: c.name, "Stopping and removing", ) def execute_convergence_plan(self, plan, timeout=None, detached=False, start=True, scale_override=None, rescale=True, reset_container_image=False, renew_anonymous_volumes=False, override_options=None): (action, containers) = plan scale = scale_override if scale_override is not None else self.scale_num containers = sorted(containers, key=attrgetter('number')) self.show_scale_warnings(scale) if action in ['create', 'one_off']: return self._execute_convergence_create( scale, detached, start, one_off=(action == 'one_off'), override_options=override_options ) # The create action needs always needs an initial scale, but otherwise, # we set scale to none in no-rescale scenarios (`run` dependencies) if not rescale: scale = None if action == 'recreate': if reset_container_image: # Updating the image ID on the container object lets us recover old volumes if # the new image uses them as well img_id = self.image()['Id'] for c in containers: c.reset_image(img_id) return self._execute_convergence_recreate( containers, scale, timeout, detached, start, renew_anonymous_volumes, ) if action == 'start': return self._execute_convergence_start( containers, scale, timeout, detached, start ) if action == 'noop': if scale != len(containers): return self._execute_convergence_start( containers, scale, timeout, detached, start ) for c in containers: log.info("%s is up-to-date" % c.name) return containers raise Exception("Invalid action: {}".format(action)) def recreate_container(self, container, timeout=None, attach_logs=False, start_new_container=True, renew_anonymous_volumes=False): """Recreate a container. The original container is renamed to a temporary name so that data volumes can be copied to the new container, before the original container is removed. """ container.stop(timeout=self.stop_timeout(timeout)) container.rename_to_tmp_name() new_container = self.create_container( previous_container=container if not renew_anonymous_volumes else None, number=container.number, quiet=True, ) if attach_logs: new_container.attach_log_stream() if start_new_container: self.start_container(new_container) container.remove() return new_container def stop_timeout(self, timeout): if timeout is not None: return timeout timeout = parse_seconds_float(self.options.get('stop_grace_period')) if timeout is not None: return timeout return DEFAULT_TIMEOUT def start_container_if_stopped(self, container, attach_logs=False, quiet=False): if not container.is_running: if not quiet: log.info("Starting %s" % container.name) if attach_logs: container.attach_log_stream() return self.start_container(container) def start_container(self, container, use_network_aliases=True): self.connect_container_to_networks(container, use_network_aliases) try: container.start() except APIError as ex: expl = binarystr_to_unicode(ex.explanation) if "driver failed programming external connectivity" in expl: log.warn("Host is already in use by another container") raise OperationFailedError("Cannot start service {}: {}".format(self.name, expl)) return container @property def prioritized_networks(self): return OrderedDict( sorted( self.networks.items(), key=lambda t: t[1].get('priority') or 0, reverse=True ) ) def connect_container_to_networks(self, container, use_network_aliases=True): connected_networks = container.get('NetworkSettings.Networks') for network, netdefs in self.prioritized_networks.items(): if network in connected_networks: if short_id_alias_exists(container, network): continue self.client.disconnect_container_from_network(container.id, network) aliases = self._get_aliases(netdefs, container) if use_network_aliases else [] self.client.connect_container_to_network( container.id, network, aliases=aliases, ipv4_address=netdefs.get('ipv4_address', None), ipv6_address=netdefs.get('ipv6_address', None), links=self._get_links(False), link_local_ips=netdefs.get('link_local_ips', None), ) def remove_duplicate_containers(self, timeout=None): for c in self.duplicate_containers(): log.info('Removing %s' % c.name) c.stop(timeout=self.stop_timeout(timeout)) c.remove() def duplicate_containers(self): containers = sorted( self.containers(stopped=True), key=lambda c: c.get('Created'), ) numbers = set() for c in containers: if c.number in numbers: yield c else: numbers.add(c.number) @property def config_hash(self): return json_hash(self.config_dict()) def config_dict(self): def image_id(): try: return self.image()['Id'] except NoSuchImageError: return None return { 'options': self.options, 'image_id': image_id(), 'links': self.get_link_names(), 'net': self.network_mode.id, 'ipc_mode': self.ipc_mode.mode, 'networks': self.networks, 'secrets': self.secrets, 'volumes_from': [ (v.source.name, v.mode) for v in self.volumes_from if isinstance(v.source, Service) ] } def get_dependency_names(self): net_name = self.network_mode.service_name pid_namespace = self.pid_mode.service_name ipc_namespace = self.ipc_mode.service_name return ( self.get_linked_service_names() + self.get_volumes_from_names() + ([net_name] if net_name else []) + ([pid_namespace] if pid_namespace else []) + ([ipc_namespace] if ipc_namespace else []) + list(self.options.get('depends_on', {}).keys()) ) def get_dependency_configs(self): net_name = self.network_mode.service_name pid_namespace = self.pid_mode.service_name ipc_namespace = self.ipc_mode.service_name configs = { name: None for name in self.get_linked_service_names() } configs.update( (name, None) for name in self.get_volumes_from_names() ) configs.update({net_name: None} if net_name else {}) configs.update({pid_namespace: None} if pid_namespace else {}) configs.update({ipc_namespace: None} if ipc_namespace else {}) configs.update(self.options.get('depends_on', {})) for svc, config in self.options.get('depends_on', {}).items(): if config['condition'] == CONDITION_STARTED: configs[svc] = lambda s: True elif config['condition'] == CONDITION_HEALTHY: configs[svc] = lambda s: s.is_healthy() elif config['condition'] == CONDITION_COMPLETED_SUCCESSFULLY: configs[svc] = lambda s: s.is_completed_successfully() else: # The config schema already prevents this, but it might be # bypassed if Compose is called programmatically. raise ValueError( 'depends_on condition "{}" is invalid.'.format( config['condition'] ) ) return configs def get_linked_service_names(self): return [service.name for (service, _) in self.links] def get_link_names(self): return [(service.name, alias) for service, alias in self.links] def get_volumes_from_names(self): return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)] def _next_container_number(self, one_off=False): if one_off: return None containers = itertools.chain( self._fetch_containers( all=True, filters={'label': self.labels(one_off=False)} ), self._fetch_containers( all=True, filters={'label': self.labels(one_off=False, legacy=True)} ) ) numbers = [c.number for c in containers if c.number is not None] return 1 if not numbers else max(numbers) + 1 def _fetch_containers(self, **fetch_options): # Account for containers that might have been removed since we fetched # the list. def soft_inspect(container): try: return Container.from_id(self.client, container['Id']) except NotFound: return None return filter(None, [ soft_inspect(container) for container in self.client.containers(**fetch_options) ]) def _get_aliases(self, network, container=None): return list( {self.name} | ({container.short_id} if container else set()) | set(network.get('aliases', ())) ) def build_default_networking_config(self): if not self.networks: return {} network = self.networks[self.network_mode.id] endpoint = { 'Aliases': self._get_aliases(network), 'IPAMConfig': {}, } if network.get('ipv4_address'): endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address') if network.get('ipv6_address'): endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address') return {"EndpointsConfig": {self.network_mode.id: endpoint}} def _get_links(self, link_to_self): links = {} for service, link_name in self.links: for container in service.containers(): links[link_name or service.name] = container.name links[container.name] = container.name links[container.name_without_project] = container.name if link_to_self: for container in self.containers(): links[self.name] = container.name links[container.name] = container.name links[container.name_without_project] = container.name for external_link in self.options.get('external_links') or []: if ':' not in external_link: link_name = external_link else: external_link, link_name = external_link.split(':') links[link_name] = external_link return [ (alias, container_name) for (container_name, alias) in links.items() ] def _get_volumes_from(self): return [build_volume_from(spec) for spec in self.volumes_from] def _get_container_create_options( self, override_options, number, one_off=False, previous_container=None): add_config_hash = (not one_off and not override_options) slug = generate_random_id() if one_off else None container_options = { k: self.options[k] for k in DOCKER_CONFIG_KEYS if k in self.options} override_volumes = override_options.pop('volumes', []) container_options.update(override_options) if not container_options.get('name'): container_options['name'] = self.get_container_name(self.name, number, slug) container_options.setdefault('detach', True) # If a qualified hostname was given, split it into an # unqualified hostname and a domainname unless domainname # was also given explicitly. This matches behavior # until Docker Engine 1.11.0 - Docker API 1.23. if (version_lt(self.client.api_version, '1.23') and 'hostname' in container_options and 'domainname' not in container_options and '.' in container_options['hostname']): parts = container_options['hostname'].partition('.') container_options['hostname'] = parts[0] container_options['domainname'] = parts[2] if (version_gte(self.client.api_version, '1.25') and 'stop_grace_period' in self.options): container_options['stop_timeout'] = self.stop_timeout(None) if 'ports' in container_options or 'expose' in self.options: container_options['ports'] = build_container_ports( formatted_ports(container_options.get('ports', [])), self.options) if 'volumes' in container_options or override_volumes: container_options['volumes'] = list(set( container_options.get('volumes', []) + override_volumes )) container_options['environment'] = merge_environment( self._parse_proxy_config(), merge_environment( self.options.get('environment'), override_options.get('environment') ) ) container_options['labels'] = merge_labels( self.options.get('labels'), override_options.get('labels')) container_options, override_options = self._build_container_volume_options( previous_container, container_options, override_options ) container_options['image'] = self.image_name container_options['labels'] = build_container_labels( container_options.get('labels', {}), self.labels(one_off=one_off) + self.extra_labels, number, self.config_hash if add_config_hash else None, slug ) # Delete options which are only used in HostConfig for key in HOST_CONFIG_KEYS: container_options.pop(key, None) container_options['host_config'] = self._get_container_host_config( override_options, one_off=one_off) networking_config = self.build_default_networking_config() if networking_config: container_options['networking_config'] = networking_config container_options['environment'] = format_environment( container_options['environment']) return container_options def _build_container_volume_options(self, previous_container, container_options, override_options): container_volumes = [] container_mounts = [] if 'volumes' in container_options: container_volumes = [ v for v in container_options.get('volumes') if isinstance(v, VolumeSpec) ] container_mounts = [v for v in container_options.get('volumes') if isinstance(v, MountSpec)] binds, affinity = merge_volume_bindings( container_volumes, self.options.get('tmpfs') or [], previous_container, container_mounts ) container_options['environment'].update(affinity) container_options['volumes'] = {v.internal: {} for v in container_volumes or {}} if version_gte(self.client.api_version, '1.30'): override_options['mounts'] = [build_mount(v) for v in container_mounts] or None else: # Workaround for 3.2 format override_options['tmpfs'] = self.options.get('tmpfs') or [] for m in container_mounts: if m.is_tmpfs: override_options['tmpfs'].append(m.target) else: binds.append(m.legacy_repr()) container_options['volumes'][m.target] = {} secret_volumes = self.get_secret_volumes() if secret_volumes: if version_lt(self.client.api_version, '1.30'): binds.extend(v.legacy_repr() for v in secret_volumes) container_options['volumes'].update( (v.target, {}) for v in secret_volumes ) else: override_options['mounts'] = override_options.get('mounts') or [] override_options['mounts'].extend([build_mount(v) for v in secret_volumes]) # Remove possible duplicates (see e.g. https://github.com/docker/compose/issues/5885). # unique_everseen preserves order. (see https://github.com/docker/compose/issues/6091). override_options['binds'] = list(unique_everseen(binds)) return container_options, override_options def _get_container_host_config(self, override_options, one_off=False): options = dict(self.options, **override_options) logging_dict = options.get('logging', None) blkio_config = convert_blkio_config(options.get('blkio_config', None)) log_config = get_log_config(logging_dict) init_path = None if isinstance(options.get('init'), str): init_path = options.get('init') options['init'] = True security_opt = [ o.value for o in options.get('security_opt') ] if options.get('security_opt') else None nano_cpus = None if 'cpus' in options: nano_cpus = int(options.get('cpus') * NANOCPUS_SCALE) return self.client.create_host_config( links=self._get_links(link_to_self=one_off), port_bindings=build_port_bindings( formatted_ports(options.get('ports', [])) ), binds=options.get('binds'), volumes_from=self._get_volumes_from(), privileged=options.get('privileged', False), network_mode=self.network_mode.mode, devices=options.get('devices'), device_requests=options.get('device_requests'), dns=options.get('dns'), dns_opt=options.get('dns_opt'), dns_search=options.get('dns_search'), restart_policy=options.get('restart'), runtime=options.get('runtime'), cap_add=options.get('cap_add'), cap_drop=options.get('cap_drop'), mem_limit=options.get('mem_limit'), mem_reservation=options.get('mem_reservation'), memswap_limit=options.get('memswap_limit'), ulimits=build_ulimits(options.get('ulimits')), log_config=log_config, extra_hosts=options.get('extra_hosts'), read_only=options.get('read_only'), pid_mode=self.pid_mode.mode, security_opt=security_opt, ipc_mode=self.ipc_mode.mode, cgroup_parent=options.get('cgroup_parent'), cpu_quota=options.get('cpu_quota'), shm_size=options.get('shm_size'), sysctls=options.get('sysctls'), pids_limit=options.get('pids_limit'), tmpfs=options.get('tmpfs'), oom_kill_disable=options.get('oom_kill_disable'), oom_score_adj=options.get('oom_score_adj'), mem_swappiness=options.get('mem_swappiness'), group_add=options.get('group_add'), userns_mode=options.get('userns_mode'), init=options.get('init', None), init_path=init_path, isolation=options.get('isolation'), cpu_count=options.get('cpu_count'), cpu_percent=options.get('cpu_percent'), nano_cpus=nano_cpus, volume_driver=options.get('volume_driver'), cpuset_cpus=options.get('cpuset'), cpu_shares=options.get('cpu_shares'), storage_opt=options.get('storage_opt'), blkio_weight=blkio_config.get('weight'), blkio_weight_device=blkio_config.get('weight_device'), device_read_bps=blkio_config.get('device_read_bps'), device_read_iops=blkio_config.get('device_read_iops'), device_write_bps=blkio_config.get('device_write_bps'), device_write_iops=blkio_config.get('device_write_iops'), mounts=options.get('mounts'), device_cgroup_rules=options.get('device_cgroup_rules'), cpu_period=options.get('cpu_period'), cpu_rt_period=options.get('cpu_rt_period'), cpu_rt_runtime=options.get('cpu_rt_runtime'), ) def get_secret_volumes(self): def build_spec(secret): target = secret['secret'].target if target is None: target = '{}/{}'.format(const.SECRETS_PATH, secret['secret'].source) elif not os.path.isabs(target): target = '{}/{}'.format(const.SECRETS_PATH, target) return MountSpec('bind', secret['file'], target, read_only=True) return [build_spec(secret) for secret in self.secrets] def build(self, no_cache=False, pull=False, force_rm=False, memory=None, build_args_override=None, gzip=False, rm=True, silent=False, cli=False, progress=None): output_stream = open(os.devnull, 'w') if not silent: output_stream = sys.stdout log.info('Building %s' % self.name) build_opts = self.options.get('build', {}) build_args = build_opts.get('args', {}).copy() if build_args_override: build_args.update(build_args_override) for k, v in self._parse_proxy_config().items(): build_args.setdefault(k, v) path = rewrite_build_path(build_opts.get('context')) if self.platform and version_lt(self.client.api_version, '1.35'): raise OperationFailedError( 'Impossible to perform platform-targeted builds for API version < 1.35' ) builder = _ClientBuilder(self.client) if not cli else _CLIBuilder(progress) return builder.build( service=self, path=path, tag=self.image_name, rm=rm, forcerm=force_rm, pull=pull, nocache=no_cache, dockerfile=build_opts.get('dockerfile', None), cache_from=self.get_cache_from(build_opts), labels=build_opts.get('labels', None), buildargs=build_args, network_mode=build_opts.get('network', None), target=build_opts.get('target', None), shmsize=parse_bytes(build_opts.get('shm_size')) if build_opts.get('shm_size') else None, extra_hosts=build_opts.get('extra_hosts', None), container_limits={ 'memory': parse_bytes(memory) if memory else None }, gzip=gzip, isolation=build_opts.get('isolation', self.options.get('isolation', None)), platform=self.platform, output_stream=output_stream) def get_cache_from(self, build_opts): cache_from = build_opts.get('cache_from', None) if cache_from is not None: cache_from = [tag for tag in cache_from if tag] return cache_from def can_be_built(self): return 'build' in self.options def labels(self, one_off=False, legacy=False): proj_name = self.project if not legacy else re.sub(r'[_-]', '', self.project) return [ '{}={}'.format(LABEL_PROJECT, proj_name), '{}={}'.format(LABEL_SERVICE, self.name), '{}={}'.format(LABEL_ONE_OFF, "True" if one_off else "False"), ] @property def custom_container_name(self): return self.options.get('container_name') def get_container_name(self, service_name, number, slug=None): if self.custom_container_name and slug is None: return self.custom_container_name container_name = build_container_name( self.project, service_name, number, slug, ) ext_links_origins = [link.split(':')[0] for link in self.options.get('external_links', [])] if container_name in ext_links_origins: raise DependencyError( 'Service {} has a self-referential external link: {}'.format( self.name, container_name ) ) return container_name def remove_image(self, image_type): if not image_type or image_type == ImageType.none: return False if image_type == ImageType.local and self.options.get('image'): return False log.info("Removing image %s", self.image_name) try: self.client.remove_image(self.image_name) return True except ImageNotFound: log.warning("Image %s not found.", self.image_name) return False except APIError as e: log.error("Failed to remove image for service %s: %s", self.name, e) return False def specifies_host_port(self): def has_host_port(binding): if isinstance(binding, dict): external_bindings = binding.get('published') else: _, external_bindings = split_port(binding) # there are no external bindings if external_bindings is None: return False # we only need to check the first binding from the range external_binding = external_bindings[0] # non-tuple binding means there is a host port specified if not isinstance(external_binding, tuple): return True # extract actual host port from tuple of (host_ip, host_port) _, host_port = external_binding if host_port is not None: return True return False return any(has_host_port(binding) for binding in self.options.get('ports', [])) def _do_pull(self, repo, pull_kwargs, silent, ignore_pull_failures): try: output = self.client.pull(repo, **pull_kwargs) if silent: with open(os.devnull, 'w') as devnull: yield from stream_output(output, devnull) else: yield from stream_output(output, sys.stdout) except (StreamOutputError, NotFound) as e: if not ignore_pull_failures: raise else: log.error(str(e)) def pull(self, ignore_pull_failures=False, silent=False, stream=False): if 'image' not in self.options: return repo, tag, separator = parse_repository_tag(self.options['image']) kwargs = { 'tag': tag or 'latest', 'stream': True, 'platform': self.platform, } if not silent: log.info('Pulling {} ({}{}{})...'.format(self.name, repo, separator, tag)) if kwargs['platform'] and version_lt(self.client.api_version, '1.35'): raise OperationFailedError( 'Impossible to perform platform-targeted pulls for API version < 1.35' ) event_stream = self._do_pull(repo, kwargs, silent, ignore_pull_failures) if stream: return event_stream return progress_stream.get_digest_from_pull(event_stream) def push(self, ignore_push_failures=False): if 'image' not in self.options or 'build' not in self.options: return repo, tag, separator = parse_repository_tag(self.options['image']) tag = tag or 'latest' log.info('Pushing {} ({}{}{})...'.format(self.name, repo, separator, tag)) output = self.client.push(repo, tag=tag, stream=True) try: return progress_stream.get_digest_from_push( stream_output(output, sys.stdout)) except StreamOutputError as e: if not ignore_push_failures: raise else: log.error(str(e)) def is_healthy(self): """ Check that all containers for this service report healthy. Returns false if at least one healthcheck is pending. If an unhealthy container is detected, raise a HealthCheckFailed exception. """ result = True for ctnr in self.containers(): ctnr.inspect() status = ctnr.get('State.Health.Status') if status is None: raise NoHealthCheckConfigured(self.name) elif status == 'starting': result = False elif status == 'unhealthy': raise HealthCheckFailed(ctnr.short_id) return result def is_completed_successfully(self): """ Check that all containers for this service has completed successfully Returns false if at least one container does not exited and raises CompletedUnsuccessfully exception if at least one container exited with non-zero exit code. """ result = True for ctnr in self.containers(stopped=True): ctnr.inspect() if ctnr.get('State.Status') != 'exited': result = False elif ctnr.exit_code != 0: raise CompletedUnsuccessfully(ctnr.short_id, ctnr.exit_code) return result def _parse_proxy_config(self): client = self.client if 'proxies' not in client._general_configs: return {} docker_host = getattr(client, '_original_base_url', client.base_url) proxy_config = client._general_configs['proxies'].get( docker_host, client._general_configs['proxies'].get('default') ) or {} permitted = { 'ftpProxy': 'FTP_PROXY', 'httpProxy': 'HTTP_PROXY', 'httpsProxy': 'HTTPS_PROXY', 'noProxy': 'NO_PROXY', } result = {} for k, v in proxy_config.items(): if k not in permitted: continue result[permitted[k]] = result[permitted[k].lower()] = v return result def get_profiles(self): if 'profiles' not in self.options: return [] return self.options.get('profiles') def enabled_for_profiles(self, enabled_profiles): # if service has no profiles specified it is always enabled if 'profiles' not in self.options: return True service_profiles = self.options.get('profiles') for profile in enabled_profiles: if profile in service_profiles: return True return False def short_id_alias_exists(container, network): aliases = container.get( 'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or () return container.short_id in aliases class IpcMode: def __init__(self, mode): self._mode = mode @property def mode(self): return self._mode @property def service_name(self): return None class ServiceIpcMode(IpcMode): def __init__(self, service): self.service = service @property def service_name(self): return self.service.name @property def mode(self): containers = self.service.containers() if containers: return 'container:' + containers[0].id log.warning( "Service %s is trying to use reuse the IPC namespace " "of another service that is not running." % (self.service_name) ) return None class ContainerIpcMode(IpcMode): def __init__(self, container): self.container = container self._mode = 'container:{}'.format(container.id) class PidMode: def __init__(self, mode): self._mode = mode @property def mode(self): return self._mode @property def service_name(self): return None class ServicePidMode(PidMode): def __init__(self, service): self.service = service @property def service_name(self): return self.service.name @property def mode(self): containers = self.service.containers() if containers: return 'container:' + containers[0].id log.warning( "Service %s is trying to use reuse the PID namespace " "of another service that is not running." % (self.service_name) ) return None class ContainerPidMode(PidMode): def __init__(self, container): self.container = container self._mode = 'container:{}'.format(container.id) class NetworkMode: """A `standard` network mode (ex: host, bridge)""" service_name = None def __init__(self, network_mode): self.network_mode = network_mode @property def id(self): return self.network_mode mode = id class ContainerNetworkMode: """A network mode that uses a container's network stack.""" service_name = None def __init__(self, container): self.container = container @property def id(self): return self.container.id @property def mode(self): return 'container:' + self.container.id class ServiceNetworkMode: """A network mode that uses a service's network stack.""" def __init__(self, service): self.service = service @property def id(self): return self.service.name service_name = id @property def mode(self): containers = self.service.containers() if containers: return 'container:' + containers[0].id log.warning("Service %s is trying to use reuse the network stack " "of another service that is not running." % (self.id)) return None # Names def build_container_name(project, service, number, slug=None): bits = [project.lstrip('-_'), service] if slug: bits.extend(['run', truncate_id(slug)]) else: bits.append(str(number)) return '_'.join(bits) # Images def parse_repository_tag(repo_path): """Splits image identification into base image path, tag/digest and it's separator. Example: >>> parse_repository_tag('user/repo@sha256:digest') ('user/repo', 'sha256:digest', '@') >>> parse_repository_tag('user/repo:v1') ('user/repo', 'v1', ':') """ tag_separator = ":" digest_separator = "@" if digest_separator in repo_path: repo, tag = repo_path.rsplit(digest_separator, 1) return repo, tag, digest_separator repo, tag = repo_path, "" if tag_separator in repo_path: repo, tag = repo_path.rsplit(tag_separator, 1) if "/" in tag: repo, tag = repo_path, "" return repo, tag, tag_separator # Volumes def merge_volume_bindings(volumes, tmpfs, previous_container, mounts): """ Return a list of volume bindings for a container. Container data volumes are replaced by those from the previous container. Anonymous mounts are updated in place. """ affinity = {} volume_bindings = OrderedDict( build_volume_binding(volume) for volume in volumes if volume.external ) if previous_container: old_volumes, old_mounts = get_container_data_volumes( previous_container, volumes, tmpfs, mounts ) warn_on_masked_volume(volumes, old_volumes, previous_container.service) volume_bindings.update( build_volume_binding(volume) for volume in old_volumes ) if old_volumes or old_mounts: affinity = {'affinity:container': '=' + previous_container.id} return list(volume_bindings.values()), affinity def get_container_data_volumes(container, volumes_option, tmpfs_option, mounts_option): """ Find the container data volumes that are in `volumes_option`, and return a mapping of volume bindings for those volumes. Anonymous volume mounts are updated in place instead. """ volumes = [] volumes_option = volumes_option or [] container_mounts = { mount['Destination']: mount for mount in container.get('Mounts') or {} } image_volumes = [ VolumeSpec.parse(volume) for volume in container.image_config['ContainerConfig'].get('Volumes') or {} ] for volume in set(volumes_option + image_volumes): # No need to preserve host volumes if volume.external: continue # Attempting to rebind tmpfs volumes breaks: https://github.com/docker/compose/issues/4751 if volume.internal in convert_tmpfs_mounts(tmpfs_option).keys(): continue mount = container_mounts.get(volume.internal) # New volume, doesn't exist in the old container if not mount: continue # Volume was previously a host volume, now it's a container volume if not mount.get('Name'): continue # Volume (probably an image volume) is overridden by a mount in the service's config # and would cause a duplicate mountpoint error if volume.internal in [m.target for m in mounts_option]: continue # Copy existing volume from old container volume = volume._replace(external=mount['Name']) volumes.append(volume) updated_mounts = False for mount in mounts_option: if mount.type != 'volume': continue ctnr_mount = container_mounts.get(mount.target) if not ctnr_mount or not ctnr_mount.get('Name'): continue mount.source = ctnr_mount['Name'] updated_mounts = True return volumes, updated_mounts def warn_on_masked_volume(volumes_option, container_volumes, service): container_volumes = { volume.internal: volume.external for volume in container_volumes} for volume in volumes_option: if ( volume.external and volume.internal in container_volumes and container_volumes.get(volume.internal) != volume.external ): log.warning(( "Service \"{service}\" is using volume \"{volume}\" from the " "previous container. Host mapping \"{host_path}\" has no effect. " "Remove the existing containers (with `docker-compose rm {service}`) " "to use the host volume mapping." ).format( service=service, volume=volume.internal, host_path=volume.external)) def build_volume_binding(volume_spec): return volume_spec.internal, volume_spec.repr() def build_volume_from(volume_from_spec): """ volume_from can be either a service or a container. We want to return the container.id and format it into a string complete with the mode. """ if isinstance(volume_from_spec.source, Service): containers = volume_from_spec.source.containers(stopped=True) if not containers: return "{}:{}".format( volume_from_spec.source.create_container().id, volume_from_spec.mode) container = containers[0] return "{}:{}".format(container.id, volume_from_spec.mode) elif isinstance(volume_from_spec.source, Container): return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode) def build_mount(mount_spec): kwargs = {} if mount_spec.options: for option, sdk_name in mount_spec.options_map[mount_spec.type].items(): if option in mount_spec.options: kwargs[sdk_name] = mount_spec.options[option] return Mount( type=mount_spec.type, target=mount_spec.target, source=mount_spec.source, read_only=mount_spec.read_only, consistency=mount_spec.consistency, **kwargs ) # Labels def build_container_labels(label_options, service_labels, number, config_hash, slug): labels = dict(label_options or {}) labels.update(label.split('=', 1) for label in service_labels) if number is not None: labels[LABEL_CONTAINER_NUMBER] = str(number) if slug is not None: labels[LABEL_SLUG] = slug labels[LABEL_VERSION] = __version__ if config_hash: log.debug("Added config hash: %s" % config_hash) labels[LABEL_CONFIG_HASH] = config_hash return labels # Ulimits def build_ulimits(ulimit_config): if not ulimit_config: return None ulimits = [] for limit_name, soft_hard_values in ulimit_config.items(): if isinstance(soft_hard_values, int): ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values}) elif isinstance(soft_hard_values, dict): ulimit_dict = {'name': limit_name} ulimit_dict.update(soft_hard_values) ulimits.append(ulimit_dict) return ulimits def get_log_config(logging_dict): log_driver = logging_dict.get('driver', "") if logging_dict else "" log_options = logging_dict.get('options', None) if logging_dict else None return LogConfig( type=log_driver, config=log_options ) # TODO: remove once fix is available in docker-py def format_environment(environment): def format_env(key, value): if value is None: return key if isinstance(value, bytes): value = value.decode('utf-8') return '{key}={value}'.format(key=key, value=value) return [format_env(*item) for item in environment.items()] # Ports def formatted_ports(ports): result = [] for port in ports: if isinstance(port, ServicePort): result.append(port.legacy_repr()) else: result.append(port) return result def build_container_ports(container_ports, options): ports = [] all_ports = container_ports + options.get('expose', []) for port_range in all_ports: internal_range, _ = split_port(port_range) for port in internal_range: port = str(port) if '/' in port: port = tuple(port.split('/')) ports.append(port) return ports def convert_blkio_config(blkio_config): result = {} if blkio_config is None: return result result['weight'] = blkio_config.get('weight') for field in [ "device_read_bps", "device_read_iops", "device_write_bps", "device_write_iops", "weight_device", ]: if field not in blkio_config: continue arr = [] for item in blkio_config[field]: arr.append({k.capitalize(): v for k, v in item.items()}) result[field] = arr return result def rewrite_build_path(path): if IS_WINDOWS_PLATFORM and not is_url(path) and not path.startswith(WINDOWS_LONGPATH_PREFIX): path = WINDOWS_LONGPATH_PREFIX + os.path.normpath(path) return path class _ClientBuilder: def __init__(self, client): self.client = client def build(self, service, path, tag=None, quiet=False, fileobj=None, nocache=False, rm=False, timeout=None, custom_context=False, encoding=None, pull=False, forcerm=False, dockerfile=None, container_limits=None, decode=False, buildargs=None, gzip=False, shmsize=None, labels=None, cache_from=None, target=None, network_mode=None, squash=None, extra_hosts=None, platform=None, isolation=None, use_config_proxy=True, output_stream=sys.stdout): build_output = self.client.build( path=path, tag=tag, nocache=nocache, rm=rm, pull=pull, forcerm=forcerm, dockerfile=dockerfile, labels=labels, cache_from=cache_from, buildargs=buildargs, network_mode=network_mode, target=target, shmsize=shmsize, extra_hosts=extra_hosts, container_limits=container_limits, gzip=gzip, isolation=isolation, platform=platform) try: all_events = list(stream_output(build_output, output_stream)) except StreamOutputError as e: raise BuildError(service, str(e)) # Ensure the HTTP connection is not reused for another # streaming command, as the Docker daemon can sometimes # complain about it self.client.close() image_id = None for event in all_events: if 'stream' in event: match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', '')) if match: image_id = match.group(1) if image_id is None: raise BuildError(service, event if all_events else 'Unknown') return image_id class _CLIBuilder: def __init__(self, progress): self._progress = progress def build(self, service, path, tag=None, quiet=False, fileobj=None, nocache=False, rm=False, timeout=None, custom_context=False, encoding=None, pull=False, forcerm=False, dockerfile=None, container_limits=None, decode=False, buildargs=None, gzip=False, shmsize=None, labels=None, cache_from=None, target=None, network_mode=None, squash=None, extra_hosts=None, platform=None, isolation=None, use_config_proxy=True, output_stream=sys.stdout): """ Args: service (str): Service to be built path (str): Path to the directory containing the Dockerfile buildargs (dict): A dictionary of build arguments cache_from (:py:class:`list`): A list of images used for build cache resolution container_limits (dict): A dictionary of limits applied to each container created by the build process. Valid keys: - memory (int): set memory limit for build - memswap (int): Total memory (memory + swap), -1 to disable swap - cpushares (int): CPU shares (relative weight) - cpusetcpus (str): CPUs in which to allow execution, e.g., ``"0-3"``, ``"0,1"`` custom_context (bool): Optional if using ``fileobj`` decode (bool): If set to ``True``, the returned stream will be decoded into dicts on the fly. Default ``False`` dockerfile (str): path within the build context to the Dockerfile encoding (str): The encoding for a stream. Set to ``gzip`` for compressing extra_hosts (dict): Extra hosts to add to /etc/hosts in building containers, as a mapping of hostname to IP address. fileobj: A file object to use as the Dockerfile. (Or a file-like object) forcerm (bool): Always remove intermediate containers, even after unsuccessful builds isolation (str): Isolation technology used during build. Default: `None`. labels (dict): A dictionary of labels to set on the image network_mode (str): networking mode for the run commands during build nocache (bool): Don't use the cache when set to ``True`` platform (str): Platform in the format ``os[/arch[/variant]]`` pull (bool): Downloads any updates to the FROM image in Dockerfiles quiet (bool): Whether to return the status rm (bool): Remove intermediate containers. The ``docker build`` command now defaults to ``--rm=true``, but we have kept the old default of `False` to preserve backward compatibility shmsize (int): Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB squash (bool): Squash the resulting images layers into a single layer. tag (str): A tag to add to the final image target (str): Name of the build-stage to build in a multi-stage Dockerfile timeout (int): HTTP timeout use_config_proxy (bool): If ``True``, and if the docker client configuration file (``~/.docker/config.json`` by default) contains a proxy configuration, the corresponding environment variables will be set in the container being built. output_stream (writer): stream to use for build logs Returns: A generator for the build output. """ if dockerfile and os.path.isdir(path): dockerfile = os.path.join(path, dockerfile) iidfile = tempfile.mktemp() command_builder = _CommandBuilder() command_builder.add_params("--build-arg", buildargs) command_builder.add_list("--cache-from", cache_from) command_builder.add_arg("--file", dockerfile) command_builder.add_flag("--force-rm", forcerm) command_builder.add_params("--label", labels) command_builder.add_arg("--memory", container_limits.get("memory")) command_builder.add_arg("--network", network_mode) command_builder.add_flag("--no-cache", nocache) command_builder.add_arg("--progress", self._progress) command_builder.add_flag("--pull", pull) command_builder.add_arg("--tag", tag) command_builder.add_arg("--target", target) command_builder.add_arg("--iidfile", iidfile) command_builder.add_arg("--platform", platform) command_builder.add_arg("--isolation", isolation) if extra_hosts: if isinstance(extra_hosts, dict): extra_hosts = ["{}:{}".format(host, ip) for host, ip in extra_hosts.items()] for host in extra_hosts: command_builder.add_arg("--add-host", "{}".format(host)) args = command_builder.build([path]) with subprocess.Popen(args, stdout=output_stream, stderr=sys.stderr, universal_newlines=True) as p: p.communicate() if p.returncode != 0: raise BuildError(service, "Build failed") with open(iidfile) as f: line = f.readline() image_id = line.split(":")[1].strip() os.remove(iidfile) return image_id class _CommandBuilder: def __init__(self): self._args = ["docker", "build"] def add_arg(self, name, value): if value: self._args.extend([name, str(value)]) def add_flag(self, name, flag): if flag: self._args.extend([name]) def add_params(self, name, params): if params: for key, val in params.items(): self._args.extend([name, "{}={}".format(key, val)]) def add_list(self, name, values): if values: for val in values: self._args.extend([name, val]) def build(self, args): return self._args + args compose-1.29.2/compose/timeparse.py000066400000000000000000000053311404620552300172650ustar00rootroot00000000000000#!/usr/bin/env python ''' timeparse.py (c) Will Roberts 1 February, 2014 This is a vendored and modified copy of: github.com/wroberts/pytimeparse @ cc0550d It has been modified to mimic the behaviour of https://golang.org/pkg/time/#ParseDuration ''' # MIT LICENSE # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import re HOURS = r'(?P[\d.]+)h' MINS = r'(?P[\d.]+)m' SECS = r'(?P[\d.]+)s' MILLI = r'(?P[\d.]+)ms' MICRO = r'(?P[\d.]+)(?:us|µs)' NANO = r'(?P[\d.]+)ns' def opt(x): return r'(?:{x})?'.format(x=x) TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format( HOURS=opt(HOURS), MINS=opt(MINS), SECS=opt(SECS), MILLI=opt(MILLI), MICRO=opt(MICRO), NANO=opt(NANO), ) MULTIPLIERS = { 'hours': 60 * 60, 'mins': 60, 'secs': 1, 'milli': 1.0 / 1000, 'micro': 1.0 / 1000.0 / 1000, 'nano': 1.0 / 1000.0 / 1000.0 / 1000.0, } def timeparse(sval): """Parse a time expression, returning it as a number of seconds. If possible, the return value will be an `int`; if this is not possible, the return will be a `float`. Returns `None` if a time expression cannot be parsed from the given string. Arguments: - `sval`: the string value to parse >>> timeparse('1m24s') 84 >>> timeparse('1.2 minutes') 72 >>> timeparse('1.2 seconds') 1.2 """ match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I) if not match or not match.group(0).strip(): return mdict = match.groupdict() return sum( MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None) def cast(value): return int(value) if value.isdigit() else float(value) compose-1.29.2/compose/utils.py000066400000000000000000000120101404620552300164240ustar00rootroot00000000000000import hashlib import json.decoder import logging import ntpath import random from docker.errors import DockerException from docker.utils import parse_bytes as sdk_parse_bytes from .errors import StreamParseError from .timeparse import MULTIPLIERS from .timeparse import timeparse json_decoder = json.JSONDecoder() log = logging.getLogger(__name__) def stream_as_text(stream): """Given a stream of bytes or text, if any of the items in the stream are bytes convert them to text. This function can be removed once docker-py returns text streams instead of byte streams. """ for data in stream: if not isinstance(data, str): data = data.decode('utf-8', 'replace') yield data def line_splitter(buffer, separator='\n'): index = buffer.find(str(separator)) if index == -1: return None return buffer[:index + 1], buffer[index + 1:] def split_buffer(stream, splitter=None, decoder=lambda a: a): """Given a generator which yields strings and a splitter function, joins all input, splits on the separator and yields each chunk. Unlike string.split(), each chunk includes the trailing separator, except for the last one if none was found on the end of the input. """ splitter = splitter or line_splitter buffered = '' for data in stream_as_text(stream): buffered += data while True: buffer_split = splitter(buffered) if buffer_split is None: break item, buffered = buffer_split yield item if buffered: try: yield decoder(buffered) except Exception as e: log.error( 'Compose tried decoding the following data chunk, but failed:' '\n%s' % repr(buffered) ) raise StreamParseError(e) def json_splitter(buffer): """Attempt to parse a json object from a buffer. If there is at least one object, return it and the rest of the buffer, otherwise return None. """ buffer = buffer.strip() try: obj, index = json_decoder.raw_decode(buffer) rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():] return obj, rest except ValueError: return None def json_stream(stream): """Given a stream of text, return a stream of json objects. This handles streams which are inconsistently buffered (some entries may be newline delimited, and others are not). """ return split_buffer(stream, json_splitter, json_decoder.decode) def json_hash(obj): dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr()) h = hashlib.sha256() h.update(dump.encode('utf8')) return h.hexdigest() def microseconds_from_time_nano(time_nano): return int(time_nano % 1000000000 / 1000) def nanoseconds_from_time_seconds(time_seconds): return int(time_seconds / MULTIPLIERS['nano']) def parse_seconds_float(value): return timeparse(value or '') def parse_nanoseconds_int(value): parsed = timeparse(value or '') if parsed is None: return None return nanoseconds_from_time_seconds(parsed) def build_string_dict(source_dict): return {k: str(v if v is not None else '') for k, v in source_dict.items()} def splitdrive(path): if len(path) == 0: return ('', '') if path[0] in ['.', '\\', '/', '~']: return ('', path) return ntpath.splitdrive(path) def parse_bytes(n): try: return sdk_parse_bytes(n) except DockerException: return None def unquote_path(s): if not s: return s if s[0] == '"' and s[-1] == '"': return s[1:-1] return s def generate_random_id(): while True: val = hex(random.getrandbits(32 * 8))[2:-1] try: int(truncate_id(val)) continue except ValueError: return val def truncate_id(value): if ':' in value: value = value[value.index(':') + 1:] if len(value) > 12: return value[:12] return value def unique_everseen(iterable, key=lambda x: x): "List unique elements, preserving order. Remember all elements ever seen." seen = set() for element in iterable: unique_key = key(element) if unique_key not in seen: seen.add(unique_key) yield element def truncate_string(s, max_chars=35): if len(s) > max_chars: return s[:max_chars - 2] + '...' return s def filter_attached_for_up(items, service_names, attach_dependencies=False, item_to_service_name=lambda x: x): """This function contains the logic of choosing which services to attach when doing docker-compose up. It may be used both with containers and services, and any other entities that map to service names - this mapping is provided by item_to_service_name.""" if attach_dependencies or not service_names: return items return [ item for item in items if item_to_service_name(item) in service_names ] compose-1.29.2/compose/version.py000066400000000000000000000002611404620552300167560ustar00rootroot00000000000000from distutils.version import LooseVersion class ComposeVersion(LooseVersion): """ A hashable version object """ def __hash__(self): return hash(self.vstring) compose-1.29.2/compose/volume.py000066400000000000000000000163261404620552300166110ustar00rootroot00000000000000import logging import re from itertools import chain from docker.errors import NotFound from docker.utils import version_lt from . import __version__ from .config import ConfigurationError from .config.types import VolumeSpec from .const import LABEL_PROJECT from .const import LABEL_VERSION from .const import LABEL_VOLUME log = logging.getLogger(__name__) class Volume: def __init__(self, client, project, name, driver=None, driver_opts=None, external=False, labels=None, custom_name=False): self.client = client self.project = project self.name = name self.driver = driver self.driver_opts = driver_opts self.external = external self.labels = labels self.custom_name = custom_name self.legacy = None def create(self): return self.client.create_volume( self.full_name, self.driver, self.driver_opts, labels=self._labels ) def remove(self): if self.external: log.info("Volume %s is external, skipping", self.true_name) return log.info("Removing volume %s", self.true_name) return self.client.remove_volume(self.true_name) def inspect(self, legacy=None): if legacy: return self.client.inspect_volume(self.legacy_full_name) return self.client.inspect_volume(self.full_name) def exists(self): self._set_legacy_flag() try: self.inspect(legacy=self.legacy) except NotFound: return False return True @property def full_name(self): if self.custom_name: return self.name return '{}_{}'.format(self.project.lstrip('-_'), self.name) @property def legacy_full_name(self): if self.custom_name: return self.name return '{}_{}'.format( re.sub(r'[_-]', '', self.project), self.name ) @property def true_name(self): self._set_legacy_flag() if self.legacy: return self.legacy_full_name return self.full_name @property def _labels(self): if version_lt(self.client._version, '1.23'): return None labels = self.labels.copy() if self.labels else {} labels.update({ LABEL_PROJECT: self.project, LABEL_VOLUME: self.name, LABEL_VERSION: __version__, }) return labels def _set_legacy_flag(self): if self.legacy is not None: return try: data = self.inspect(legacy=True) self.legacy = data is not None except NotFound: self.legacy = False class ProjectVolumes: def __init__(self, volumes): self.volumes = volumes @classmethod def from_config(cls, name, config_data, client): config_volumes = config_data.volumes or {} volumes = { vol_name: Volume( client=client, project=name, name=data.get('name', vol_name), driver=data.get('driver'), driver_opts=data.get('driver_opts'), custom_name=data.get('name') is not None, labels=data.get('labels'), external=bool(data.get('external', False)) ) for vol_name, data in config_volumes.items() } return cls(volumes) def remove(self): for volume in self.volumes.values(): try: volume.remove() except NotFound: log.warning("Volume %s not found.", volume.true_name) def initialize(self): try: for volume in self.volumes.values(): volume_exists = volume.exists() if volume.external: log.debug( 'Volume {} declared as external. No new ' 'volume will be created.'.format(volume.name) ) if not volume_exists: raise ConfigurationError( 'Volume {name} declared as external, but could' ' not be found. Please create the volume manually' ' using `{command}{name}` and try again.'.format( name=volume.full_name, command='docker volume create --name=' ) ) continue if not volume_exists: log.info( 'Creating volume "{}" with {} driver'.format( volume.full_name, volume.driver or 'default' ) ) volume.create() else: check_remote_volume_config(volume.inspect(legacy=volume.legacy), volume) except NotFound: raise ConfigurationError( 'Volume {} specifies nonexistent driver {}'.format(volume.name, volume.driver) ) def namespace_spec(self, volume_spec): if not volume_spec.is_named_volume: return volume_spec if isinstance(volume_spec, VolumeSpec): volume = self.volumes[volume_spec.external] return volume_spec._replace(external=volume.true_name) else: volume_spec.source = self.volumes[volume_spec.source].true_name return volume_spec class VolumeConfigChangedError(ConfigurationError): def __init__(self, local, property_name, local_value, remote_value): super().__init__( 'Configuration for volume {vol_name} specifies {property_name} ' '{local_value}, but a volume with the same name uses a different ' '{property_name} ({remote_value}). If you wish to use the new ' 'configuration, please remove the existing volume "{full_name}" ' 'first:\n$ docker volume rm {full_name}'.format( vol_name=local.name, property_name=property_name, local_value=local_value, remote_value=remote_value, full_name=local.true_name ) ) def check_remote_volume_config(remote, local): if local.driver and remote.get('Driver') != local.driver: raise VolumeConfigChangedError(local, 'driver', local.driver, remote.get('Driver')) local_opts = local.driver_opts or {} remote_opts = remote.get('Options') or {} for k in set(chain(remote_opts, local_opts)): if k.startswith('com.docker.'): # These options are set internally continue if remote_opts.get(k) != local_opts.get(k): raise VolumeConfigChangedError( local, '"{}" driver_opt'.format(k), local_opts.get(k), remote_opts.get(k), ) local_labels = local.labels or {} remote_labels = remote.get('Labels') or {} for k in set(chain(remote_labels, local_labels)): if k.startswith('com.docker.'): # We are only interested in user-specified labels continue if remote_labels.get(k) != local_labels.get(k): log.warning( 'Volume {}: label "{}" has changed. It may need to be' ' recreated.'.format(local.name, k) ) compose-1.29.2/contrib/000077500000000000000000000000001404620552300147135ustar00rootroot00000000000000compose-1.29.2/contrib/completion/000077500000000000000000000000001404620552300170645ustar00rootroot00000000000000compose-1.29.2/contrib/completion/bash/000077500000000000000000000000001404620552300200015ustar00rootroot00000000000000compose-1.29.2/contrib/completion/bash/docker-compose000066400000000000000000000322741404620552300226460ustar00rootroot00000000000000#!/bin/bash # # bash completion for docker-compose # # This work is based on the completion for the docker command. # # This script provides completion of: # - commands and their options # - service names # - filepaths # # To enable the completions either: # - place this file in /etc/bash_completion.d # or # - copy this file to e.g. ~/.docker-compose-completion.sh and add the line # below to your .bashrc after bash completion features are loaded # . ~/.docker-compose-completion.sh __docker_compose_previous_extglob_setting=$(shopt -p extglob) shopt -s extglob __docker_compose_q() { docker-compose 2>/dev/null "${top_level_options[@]}" "$@" } # Transforms a multiline list of strings into a single line string # with the words separated by "|". __docker_compose_to_alternatives() { local parts=( $1 ) local IFS='|' echo "${parts[*]}" } # Transforms a multiline list of options into an extglob pattern # suitable for use in case statements. __docker_compose_to_extglob() { local extglob=$( __docker_compose_to_alternatives "$1" ) echo "@($extglob)" } # Determines whether the option passed as the first argument exist on # the commandline. The option may be a pattern, e.g. `--force|-f`. __docker_compose_has_option() { local pattern="$1" for (( i=2; i < $cword; ++i)); do if [[ ${words[$i]} =~ ^($pattern)$ ]] ; then return 0 fi done return 1 } # Returns `key` if we are currently completing the value of a map option (`key=value`) # which matches the extglob passed in as an argument. # This function is needed for key-specific completions. __docker_compose_map_key_of_current_option() { local glob="$1" local key glob_pos if [ "$cur" = "=" ] ; then # key= case key="$prev" glob_pos=$((cword - 2)) elif [[ $cur == *=* ]] ; then # key=value case (OSX) key=${cur%=*} glob_pos=$((cword - 1)) elif [ "$prev" = "=" ] ; then key=${words[$cword - 2]} # key=value case glob_pos=$((cword - 3)) else return fi [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" } # suppress trailing whitespace __docker_compose_nospace() { # compopt is not available in ancient bash versions type compopt &>/dev/null && compopt -o nospace } # Outputs a list of all defined services, regardless of their running state. # Arguments for `docker-compose ps` may be passed in order to filter the service list, # e.g. `status=running`. __docker_compose_services() { __docker_compose_q ps --services "$@" } # Applies completion of services based on the current value of `$cur`. # Arguments for `docker-compose ps` may be passed in order to filter the service list, # see `__docker_compose_services`. __docker_compose_complete_services() { COMPREPLY=( $(compgen -W "$(__docker_compose_services "$@")" -- "$cur") ) } # The services for which at least one running container exists __docker_compose_complete_running_services() { local names=$(__docker_compose_services --filter status=running) COMPREPLY=( $(compgen -W "$names" -- "$cur") ) } _docker_compose_build() { case "$prev" in --build-arg) COMPREPLY=( $( compgen -e -- "$cur" ) ) __docker_compose_nospace return ;; --memory|-m) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) ) ;; *) __docker_compose_complete_services --filter source=build ;; esac } _docker_compose_config() { case "$prev" in --hash) if [[ $cur == \\* ]] ; then COMPREPLY=( '\*' ) else COMPREPLY=( $(compgen -W "$(__docker_compose_services) \\\* " -- "$cur") ) fi return ;; esac COMPREPLY=( $( compgen -W "--hash --help --no-interpolate --profiles --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) ) } _docker_compose_create() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--build --force-recreate --help --no-build --no-recreate" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_docker_compose() { case "$prev" in --tlscacert|--tlscert|--tlskey) _filedir return ;; --file|-f) _filedir "y?(a)ml" return ;; --ansi) COMPREPLY=( $( compgen -W "never always auto" -- "$cur" ) ) return ;; --log-level) COMPREPLY=( $( compgen -W "debug info warning error critical" -- "$cur" ) ) return ;; --profile) COMPREPLY=( $( compgen -W "$(__docker_compose_q config --profiles)" -- "$cur" ) ) return ;; --project-directory) _filedir -d return ;; --env-file) _filedir return ;; $(__docker_compose_to_extglob "$daemon_options_with_args") ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "$daemon_boolean_options $daemon_options_with_args $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) ) ;; *) COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) ;; esac } _docker_compose_down() { case "$prev" in --rmi) COMPREPLY=( $( compgen -W "all local" -- "$cur" ) ) return ;; --timeout|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --rmi --timeout -t --volumes -v --remove-orphans" -- "$cur" ) ) ;; esac } _docker_compose_events() { case "$prev" in --json) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --json" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_exec() { case "$prev" in --index|--user|-u|--workdir|-w) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "-d --detach --help --index --privileged -T --user -u --workdir -w" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_help() { COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) } _docker_compose_images() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_kill() { case "$prev" in -s) COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) ) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_logs() { case "$prev" in --tail) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--follow -f --help --no-color --no-log-prefix --tail --timestamps -t" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_pause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_port() { case "$prev" in --protocol) COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) ) return; ;; --index) return; ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_ps() { local key=$(__docker_compose_map_key_of_current_option '--filter') case "$key" in source) COMPREPLY=( $( compgen -W "build image" -- "${cur##*=}" ) ) return ;; status) COMPREPLY=( $( compgen -W "paused restarting running stopped" -- "${cur##*=}" ) ) return ;; esac case "$prev" in --filter) COMPREPLY=( $( compgen -W "source status" -S "=" -- "$cur" ) ) __docker_compose_nospace return; ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_pull() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --include-deps --no-parallel --quiet -q" -- "$cur" ) ) ;; *) __docker_compose_complete_services --filter source=image ;; esac } _docker_compose_push() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_restart() { case "$prev" in --timeout|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_rm() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--force -f --help --stop -s -v" -- "$cur" ) ) ;; *) if __docker_compose_has_option "--stop|-s" ; then __docker_compose_complete_services else __docker_compose_complete_services --filter status=stopped fi ;; esac } _docker_compose_run() { case "$prev" in -e) COMPREPLY=( $( compgen -e -- "$cur" ) ) __docker_compose_nospace return ;; --entrypoint|--label|-l|--name|--user|-u|--volume|-v|--workdir|-w) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--detach -d --entrypoint -e --help --label -l --name --no-deps --publish -p --rm --service-ports -T --use-aliases --user -u --volume -v --workdir -w" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_scale() { case "$prev" in =) COMPREPLY=("$cur") return ;; --timeout|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) ;; *) COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") ) __docker_compose_nospace ;; esac } _docker_compose_start() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_compose_complete_services --filter status=stopped ;; esac } _docker_compose_stop() { case "$prev" in --timeout|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_top() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_compose_complete_running_services ;; esac } _docker_compose_unpause() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) ;; *) __docker_compose_complete_services --filter status=paused ;; esac } _docker_compose_up() { case "$prev" in =) COMPREPLY=("$cur") return ;; --exit-code-from) __docker_compose_complete_services return ;; --scale) COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") ) __docker_compose_nospace return ;; --timeout|-t) return ;; esac case "$cur" in -*) COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --attach-dependencies --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-log-prefix --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) ) ;; *) __docker_compose_complete_services ;; esac } _docker_compose_version() { case "$cur" in -*) COMPREPLY=( $( compgen -W "--short" -- "$cur" ) ) ;; esac } _docker_compose() { local previous_extglob_setting=$(shopt -p extglob) shopt -s extglob local commands=( build config create down events exec help images kill logs pause port ps pull push restart rm run scale start stop top unpause up version ) # Options for the docker daemon that have to be passed to secondary calls to # docker-compose executed by this script. local daemon_boolean_options=" --skip-hostname-check --tls --tlsverify " local daemon_options_with_args=" --context -c --env-file --file -f --host -H --project-directory --project-name -p --tlscacert --tlscert --tlskey " # These options require special treatment when searching the command. local top_level_options_with_args=" --ansi --log-level --profile " COMPREPLY=() local cur prev words cword _get_comp_words_by_ref -n : cur prev words cword # search subcommand and invoke its handler. # special treatment of some top-level options local command='docker_compose' local top_level_options=() local counter=1 while [ $counter -lt $cword ]; do case "${words[$counter]}" in $(__docker_compose_to_extglob "$daemon_boolean_options") ) local opt=${words[counter]} top_level_options+=($opt) ;; $(__docker_compose_to_extglob "$daemon_options_with_args") ) local opt=${words[counter]} local arg=${words[++counter]} top_level_options+=($opt $arg) ;; $(__docker_compose_to_extglob "$top_level_options_with_args") ) (( counter++ )) ;; -*) ;; *) command="${words[$counter]}" break ;; esac (( counter++ )) done local completions_func=_docker_compose_${command//-/_} declare -F $completions_func >/dev/null && $completions_func eval "$previous_extglob_setting" return 0 } eval "$__docker_compose_previous_extglob_setting" unset __docker_compose_previous_extglob_setting complete -F _docker_compose docker-compose docker-compose.exe compose-1.29.2/contrib/completion/fish/000077500000000000000000000000001404620552300200155ustar00rootroot00000000000000compose-1.29.2/contrib/completion/fish/docker-compose.fish000066400000000000000000000035471404620552300236130ustar00rootroot00000000000000# Tab completion for docker-compose (https://github.com/docker/compose). # Version: 1.9.0 complete -e -c docker-compose for line in (docker-compose --help | \ string match -r '^\s+\w+\s+[^\n]+' | \ string trim) set -l doc (string split -m 1 ' ' -- $line) complete -c docker-compose -n '__fish_use_subcommand' -xa $doc[1] --description $doc[2] end complete -c docker-compose -s f -l file -r -d 'Specify an alternate compose file' complete -c docker-compose -s p -l project-name -x -d 'Specify an alternate project name' complete -c docker-compose -l env-file -r -d 'Specify an alternate environment file (default: .env)' complete -c docker-compose -l verbose -d 'Show more output' complete -c docker-compose -s H -l host -x -d 'Daemon socket to connect to' complete -c docker-compose -l tls -d 'Use TLS; implied by --tlsverify' complete -c docker-compose -l tlscacert -r -d 'Trust certs signed only by this CA' complete -c docker-compose -l tlscert -r -d 'Path to TLS certificate file' complete -c docker-compose -l tlskey -r -d 'Path to TLS key file' complete -c docker-compose -l tlsverify -d 'Use TLS and verify the remote' complete -c docker-compose -l skip-hostname-check -d "Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)" complete -c docker-compose -l no-ansi -d 'Do not print ANSI control characters' complete -c docker-compose -l ansi -a 'never always auto' -d 'Control when to print ANSI control characters' complete -c docker-compose -s h -l help -d 'Print usage' complete -c docker-compose -s v -l version -d 'Print version and exit' compose-1.29.2/contrib/completion/zsh/000077500000000000000000000000001404620552300176705ustar00rootroot00000000000000compose-1.29.2/contrib/completion/zsh/_docker-compose000077500000000000000000000433721404620552300227000ustar00rootroot00000000000000#compdef docker-compose # Description # ----------- # zsh completion for docker-compose # ------------------------------------------------------------------------- # Authors # ------- # * Steve Durrheimer # ------------------------------------------------------------------------- # Inspiration # ----------- # * @albers docker-compose bash completion script # * @felixr docker zsh completion script : https://github.com/felixr/docker-zsh-completion # ------------------------------------------------------------------------- __docker-compose_q() { docker-compose 2>/dev/null $compose_options "$@" } # All services defined in docker-compose.yml __docker-compose_all_services_in_compose_file() { local already_selected local -a services already_selected=$(echo $words | tr " " "|") __docker-compose_q ps --services "$@" \ | grep -Ev "^(${already_selected})$" } # All services, even those without an existing container __docker-compose_services_all() { [[ $PREFIX = -* ]] && return 1 integer ret=1 services=$(__docker-compose_all_services_in_compose_file "$@") _alternative "args:services:($services)" && ret=0 return ret } # All services that are defined by a Dockerfile reference __docker-compose_services_from_build() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all --filter source=build } # All services that are defined by an image __docker-compose_services_from_image() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all --filter source=image } __docker-compose_pausedservices() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all --filter status=paused } __docker-compose_stoppedservices() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all --filter status=stopped } __docker-compose_runningservices() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all --filter status=running } __docker-compose_services() { [[ $PREFIX = -* ]] && return 1 __docker-compose_services_all } __docker-compose_caching_policy() { oldp=( "$1"(Nmh+1) ) # 1 hour (( $#oldp )) } __docker-compose_commands() { local cache_policy zstyle -s ":completion:${curcontext}:" cache-policy cache_policy if [[ -z "$cache_policy" ]]; then zstyle ":completion:${curcontext}:" cache-policy __docker-compose_caching_policy fi if ( [[ ${+_docker_compose_subcommands} -eq 0 ]] || _cache_invalid docker_compose_subcommands) \ && ! _retrieve_cache docker_compose_subcommands; then local -a lines lines=(${(f)"$(_call_program commands docker-compose 2>&1)"}) _docker_compose_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) (( $#_docker_compose_subcommands > 0 )) && _store_cache docker_compose_subcommands _docker_compose_subcommands fi _describe -t docker-compose-commands "docker-compose command" _docker_compose_subcommands } __docker-compose_subcommand() { local opts_help opts_force_recreate opts_no_recreate opts_no_build opts_remove_orphans opts_timeout opts_no_color opts_no_deps opts_help='(: -)--help[Print usage]' opts_force_recreate="(--no-recreate)--force-recreate[Recreate containers even if their configuration and image haven't changed. Incompatible with --no-recreate.]" opts_no_recreate="(--force-recreate)--no-recreate[If containers already exist, don't recreate them. Incompatible with --force-recreate.]" opts_no_build="(--build)--no-build[Don't build an image, even if it's missing.]" opts_remove_orphans="--remove-orphans[Remove containers for services not defined in the Compose file]" opts_timeout=('(-t --timeout)'{-t,--timeout}"[Specify a shutdown timeout in seconds. (default: 10)]:seconds: ") opts_no_color='--no-color[Produce monochrome output.]' opts_no_deps="--no-deps[Don't start linked services.]" integer ret=1 case "$words[1]" in (build) _arguments \ $opts_help \ "*--build-arg=[Set build-time variables for one service.]:=: " \ '--force-rm[Always remove intermediate containers.]' \ '(--quiet -q)'{--quiet,-q}'[Curb build output]' \ '(--memory -m)'{--memory,-m}'[Memory limit for the build container.]' \ '--no-cache[Do not use cache when building the image.]' \ '--pull[Always attempt to pull a newer version of the image.]' \ '--compress[Compress the build context using gzip.]' \ '--parallel[Build images in parallel.]' \ '*:services:__docker-compose_services_from_build' && ret=0 ;; (config) _arguments \ $opts_help \ '(--quiet -q)'{--quiet,-q}"[Only validate the configuration, don't print anything.]" \ '--resolve-image-digests[Pin image tags to digests.]' \ '--services[Print the service names, one per line.]' \ '--volumes[Print the volume names, one per line.]' \ '--hash[Print the service config hash, one per line. Set "service1,service2" for a list of specified services.]' \ && ret=0 ;; (create) _arguments \ $opts_help \ $opts_force_recreate \ $opts_no_recreate \ $opts_no_build \ "(--no-build)--build[Build images before creating containers.]" \ '*:services:__docker-compose_services' && ret=0 ;; (down) _arguments \ $opts_help \ $opts_timeout \ "--rmi[Remove images. Type must be one of: 'all': Remove all images used by any service. 'local': Remove only images that don't have a custom tag set by the \`image\` field.]:type:(all local)" \ '(-v --volumes)'{-v,--volumes}"[Remove named volumes declared in the \`volumes\` section of the Compose file and anonymous volumes attached to containers.]" \ $opts_remove_orphans && ret=0 ;; (events) _arguments \ $opts_help \ '--json[Output events as a stream of json objects]' \ '*:services:__docker-compose_services' && ret=0 ;; (exec) _arguments \ $opts_help \ '-d[Detached mode: Run command in the background.]' \ '--privileged[Give extended privileges to the process.]' \ '(-u --user)'{-u,--user=}'[Run the command as this user.]:username:_users' \ '-T[Disable pseudo-tty allocation. By default `docker-compose exec` allocates a TTY.]' \ '--index=[Index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \ '*'{-e,--env}'[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \ '(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \ '(-):running services:__docker-compose_runningservices' \ '(-):command: _command_names -e' \ '*::arguments: _normal' && ret=0 ;; (help) _arguments ':subcommand:__docker-compose_commands' && ret=0 ;; (images) _arguments \ $opts_help \ '-q[Only display IDs]' \ '*:services:__docker-compose_services' && ret=0 ;; (kill) _arguments \ $opts_help \ '-s[SIGNAL to send to the container. Default signal is SIGKILL.]:signal:_signals' \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (logs) _arguments \ $opts_help \ '(-f --follow)'{-f,--follow}'[Follow log output]' \ $opts_no_color \ '--tail=[Number of lines to show from the end of the logs for each container.]:number of lines: ' \ '(-t --timestamps)'{-t,--timestamps}'[Show timestamps]' \ '*:services:__docker-compose_services' && ret=0 ;; (pause) _arguments \ $opts_help \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (port) _arguments \ $opts_help \ '--protocol=[tcp or udp \[default: tcp\]]:protocol:(tcp udp)' \ '--index=[index of the container if there are multiple instances of a service \[default: 1\]]:index: ' \ '1:running services:__docker-compose_runningservices' \ '2:port:_ports' && ret=0 ;; (ps) _arguments \ $opts_help \ '-q[Only display IDs]' \ '--filter KEY=VAL[Filter services by a property]:=:' \ '*:services:__docker-compose_services' && ret=0 ;; (pull) _arguments \ $opts_help \ '--ignore-pull-failures[Pull what it can and ignores images with pull failures.]' \ '--no-parallel[Disable parallel pulling]' \ '(-q --quiet)'{-q,--quiet}'[Pull without printing progress information]' \ '--include-deps[Also pull services declared as dependencies]' \ '*:services:__docker-compose_services_from_image' && ret=0 ;; (push) _arguments \ $opts_help \ '--ignore-push-failures[Push what it can and ignores images with push failures.]' \ '*:services:__docker-compose_services' && ret=0 ;; (rm) _arguments \ $opts_help \ '(-f --force)'{-f,--force}"[Don't ask to confirm removal]" \ '-v[Remove any anonymous volumes attached to containers]' \ '*:stopped services:__docker-compose_stoppedservices' && ret=0 ;; (run) _arguments \ $opts_help \ $opts_no_deps \ '-d[Detached mode: Run container in the background, print new container name.]' \ '*-e[KEY=VAL Set an environment variable (can be used multiple times)]:environment variable KEY=VAL: ' \ '*'{-l,--label}'[KEY=VAL Add or override a label (can be used multiple times)]:label KEY=VAL: ' \ '--entrypoint[Overwrite the entrypoint of the image.]:entry point: ' \ '--name=[Assign a name to the container]:name: ' \ '(-p --publish)'{-p,--publish=}"[Publish a container's port(s) to the host]" \ '--rm[Remove container after run. Ignored in detached mode.]' \ "--service-ports[Run command with the service's ports enabled and mapped to the host.]" \ '-T[Disable pseudo-tty allocation. By default `docker-compose run` allocates a TTY.]' \ '(-u --user)'{-u,--user=}'[Run as specified username or uid]:username or uid:_users' \ '(-v --volume)*'{-v,--volume=}'[Bind mount a volume]:volume: ' \ '(-w --workdir)'{-w,--workdir=}'[Working directory inside the container]:workdir: ' \ "--use-aliases[Use the services network aliases in the network(s) the container connects to]" \ '(-):services:__docker-compose_services' \ '(-):command: _command_names -e' \ '*::arguments: _normal' && ret=0 ;; (scale) _arguments \ $opts_help \ $opts_timeout \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (start) _arguments \ $opts_help \ '*:stopped services:__docker-compose_stoppedservices' && ret=0 ;; (stop|restart) _arguments \ $opts_help \ $opts_timeout \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (top) _arguments \ $opts_help \ '*:running services:__docker-compose_runningservices' && ret=0 ;; (unpause) _arguments \ $opts_help \ '*:paused services:__docker-compose_pausedservices' && ret=0 ;; (up) _arguments \ $opts_help \ '(--abort-on-container-exit)-d[Detached mode: Run containers in the background, print new container names. Incompatible with --abort-on-container-exit and --attach-dependencies.]' \ $opts_no_color \ $opts_no_deps \ $opts_force_recreate \ $opts_no_recreate \ $opts_no_build \ "(--no-build)--build[Build images before starting containers.]" \ "(-d)--abort-on-container-exit[Stops all containers if any container was stopped. Incompatible with -d.]" \ "(-d)--attach-dependencies[Attach to dependent containers. Incompatible with -d.]" \ '(-t --timeout)'{-t,--timeout}"[Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)]:seconds: " \ '--scale[SERVICE=NUM Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.]:service scale SERVICE=NUM: ' \ '--exit-code-from=[Return the exit code of the selected service container. Implies --abort-on-container-exit]:service:__docker-compose_services' \ $opts_remove_orphans \ '*:services:__docker-compose_services' && ret=0 ;; (version) _arguments \ $opts_help \ "--short[Shows only Compose's version number.]" && ret=0 ;; (*) _message 'Unknown sub command' && ret=1 ;; esac return ret } _docker-compose() { # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. if [[ $service != docker-compose ]]; then _call_function - _$service return fi local curcontext="$curcontext" state line integer ret=1 typeset -A opt_args local file_description if [[ -n ${words[(r)-f]} || -n ${words[(r)--file]} ]] ; then file_description="Specify an override docker-compose file (default: docker-compose.override.yml)" else file_description="Specify an alternate docker-compose file (default: docker-compose.yml)" fi _arguments -C \ '(- :)'{-h,--help}'[Get help]' \ '*'{-f,--file}"[${file_description}]:file:_files -g '*.yml'" \ '(-p --project-name)'{-p,--project-name}'[Specify an alternate project name (default: directory name)]:project name:' \ '--env-file[Specify an alternate environment file (default: .env)]:env-file:_files' \ "--compatibility[If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalent]" \ '(- :)'{-v,--version}'[Print version and exit]' \ '--verbose[Show more output]' \ '--log-level=[Set log level]:level:(DEBUG INFO WARNING ERROR CRITICAL)' \ '--no-ansi[Do not print ANSI control characters]' \ '--ansi=[Control when to print ANSI control characters]:when:(never always auto)' \ '(-H --host)'{-H,--host}'[Daemon socket to connect to]:host:' \ '--tls[Use TLS; implied by --tlsverify]' \ '--tlscacert=[Trust certs signed only by this CA]:ca path:' \ '--tlscert=[Path to TLS certificate file]:client cert path:' \ '--tlskey=[Path to TLS key file]:tls key path:' \ '--tlsverify[Use TLS and verify the remote]' \ "--skip-hostname-check[Don't check the daemon's hostname against the name specified in the client certificate (for example if your docker host is an IP address)]" \ '(-): :->command' \ '(-)*:: :->option-or-argument' && ret=0 local -a relevant_compose_flags relevant_compose_repeatable_flags relevant_docker_flags compose_options docker_options relevant_compose_flags=( "--env-file" "--file" "-f" "--host" "-H" "--project-name" "-p" "--tls" "--tlscacert" "--tlscert" "--tlskey" "--tlsverify" "--skip-hostname-check" ) relevant_compose_repeatable_flags=( "--file" "-f" ) relevant_docker_flags=( "--host" "-H" "--tls" "--tlscacert" "--tlscert" "--tlskey" "--tlsverify" ) for k in "${(@k)opt_args}"; do if [[ -n "${relevant_docker_flags[(r)$k]}" ]]; then docker_options+=$k if [[ -n "$opt_args[$k]" ]]; then docker_options+=$opt_args[$k] fi fi if [[ -n "${relevant_compose_flags[(r)$k]}" ]]; then if [[ -n "${relevant_compose_repeatable_flags[(r)$k]}" ]]; then values=("${(@s/:/)opt_args[$k]}") for value in $values do compose_options+=$k compose_options+=$value done else compose_options+=$k if [[ -n "$opt_args[$k]" ]]; then compose_options+=$opt_args[$k] fi fi fi done case $state in (command) __docker-compose_commands && ret=0 ;; (option-or-argument) curcontext=${curcontext%:*:*}:docker-compose-$words[1]: __docker-compose_subcommand && ret=0 ;; esac return ret } _docker-compose "$@" compose-1.29.2/contrib/migration/000077500000000000000000000000001404620552300167045ustar00rootroot00000000000000compose-1.29.2/contrib/migration/migrate-compose-file-v1-to-v2.py000077500000000000000000000124321404620552300245640ustar00rootroot00000000000000#!/usr/bin/env python """ Migrate a Compose file from the V1 format in Compose 1.5 to the V2 format supported by Compose 1.6+ """ import argparse import logging import sys import ruamel.yaml from compose.config.types import VolumeSpec log = logging.getLogger('migrate') def migrate(content): data = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader) service_names = data.keys() for name, service in data.items(): warn_for_links(name, service) warn_for_external_links(name, service) rewrite_net(service, service_names) rewrite_build(service) rewrite_logging(service) rewrite_volumes_from(service, service_names) services = {name: data.pop(name) for name in data.keys()} data['version'] = "2" data['services'] = services create_volumes_section(data) return data def warn_for_links(name, service): links = service.get('links') if links: example_service = links[0].partition(':')[0] log.warning( "Service {name} has links, which no longer create environment " "variables such as {example_service_upper}_PORT. " "If you are using those in your application code, you should " "instead connect directly to the hostname, e.g. " "'{example_service}'." .format(name=name, example_service=example_service, example_service_upper=example_service.upper())) def warn_for_external_links(name, service): external_links = service.get('external_links') if external_links: log.warning( "Service {name} has external_links: {ext}, which now work " "slightly differently. In particular, two containers must be " "connected to at least one network in common in order to " "communicate, even if explicitly linked together.\n\n" "Either connect the external container to your app's default " "network, or connect both the external container and your " "service's containers to a pre-existing network. See " "https://docs.docker.com/compose/networking/ " "for more on how to do this." .format(name=name, ext=external_links)) def rewrite_net(service, service_names): if 'net' in service: network_mode = service.pop('net') # "container:" is now "service:" if network_mode.startswith('container:'): name = network_mode.partition(':')[2] if name in service_names: network_mode = 'service:{}'.format(name) service['network_mode'] = network_mode def rewrite_build(service): if 'dockerfile' in service: service['build'] = { 'context': service.pop('build'), 'dockerfile': service.pop('dockerfile'), } def rewrite_logging(service): if 'log_driver' in service: service['logging'] = {'driver': service.pop('log_driver')} if 'log_opt' in service: service['logging']['options'] = service.pop('log_opt') def rewrite_volumes_from(service, service_names): for idx, volume_from in enumerate(service.get('volumes_from', [])): if volume_from.split(':', 1)[0] not in service_names: service['volumes_from'][idx] = 'container:%s' % volume_from def create_volumes_section(data): named_volumes = get_named_volumes(data['services']) if named_volumes: log.warning( "Named volumes ({names}) must be explicitly declared. Creating a " "'volumes' section with declarations.\n\n" "For backwards-compatibility, they've been declared as external. " "If you don't mind the volume names being prefixed with the " "project name, you can remove the 'external' option from each one." .format(names=', '.join(list(named_volumes)))) data['volumes'] = named_volumes def get_named_volumes(services): volume_specs = [ VolumeSpec.parse(volume) for service in services.values() for volume in service.get('volumes', []) ] names = { spec.external for spec in volume_specs if spec.is_named_volume } return {name: {'external': True} for name in names} def write(stream, new_format, indent, width): ruamel.yaml.dump( new_format, stream, Dumper=ruamel.yaml.RoundTripDumper, indent=indent, width=width) def parse_opts(args): parser = argparse.ArgumentParser() parser.add_argument("filename", help="Compose file filename.") parser.add_argument("-i", "--in-place", action='store_true') parser.add_argument( "--indent", type=int, default=2, help="Number of spaces used to indent the output yaml.") parser.add_argument( "--width", type=int, default=80, help="Number of spaces used as the output width.") return parser.parse_args() def main(args): logging.basicConfig(format='\033[33m%(levelname)s:\033[37m %(message)s\033[0m\n') opts = parse_opts(args) with open(opts.filename) as fh: new_format = migrate(fh.read()) if opts.in_place: output = open(opts.filename, 'w') else: output = sys.stdout write(output, new_format, opts.indent, opts.width) if __name__ == "__main__": main(sys.argv) compose-1.29.2/contrib/update/000077500000000000000000000000001404620552300161755ustar00rootroot00000000000000compose-1.29.2/contrib/update/update-docker-compose.ps1000066400000000000000000000106241404620552300230170ustar00rootroot00000000000000# Self-elevate the script if required # http://www.expta.com/2017/03/how-to-self-elevate-powershell-script.html If (-Not ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] 'Administrator')) { If ([int](Get-CimInstance -Class Win32_OperatingSystem | Select-Object -ExpandProperty BuildNumber) -ge 6000) { $CommandLine = "-File `"" + $MyInvocation.MyCommand.Path + "`" " + $MyInvocation.UnboundArguments Start-Process -FilePath PowerShell.exe -Verb Runas -ArgumentList $CommandLine Exit } } $SectionSeparator = "--------------------------------------------------" # Update docker-compose if required Function UpdateDockerCompose() { Write-Host "Updating docker-compose if required..." Write-Host $SectionSeparator # Find the installed docker-compose.exe location Try { $DockerComposePath = Get-Command docker-compose.exe -ErrorAction Stop | ` Select-Object -First 1 -ExpandProperty Definition } Catch { Write-Host "Error: Could not find path to docker-compose.exe" ` -ForegroundColor Red Return $false } # Prefer/enable TLS 1.2 # https://stackoverflow.com/a/48030563/153079 [Net.ServicePointManager]::SecurityProtocol = "tls12, tls11, tls" # Query for the latest release version Try { $URI = "https://api.github.com/repos/docker/compose/releases/latest" $LatestComposeVersion = [System.Version](Invoke-RestMethod -Method Get -Uri $URI).tag_name } Catch { Write-Host "Error: Query for the latest docker-compose release version failed" ` -ForegroundColor Red Return $false } # Check the installed version and compare with latest release $UpdateDockerCompose = $false Try { $InstalledComposeVersion = ` [System.Version]((docker-compose.exe version --short) | Out-String) If ($InstalledComposeVersion -eq $LatestComposeVersion) { Write-Host ("Installed docker-compose version ({0}) same as latest ({1})." ` -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString()) } ElseIf ($InstalledComposeVersion -lt $LatestComposeVersion) { Write-Host ("Installed docker-compose version ({0}) older than latest ({1})." ` -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString()) $UpdateDockerCompose = $true } Else { Write-Host ("Installed docker-compose version ({0}) newer than latest ({1})." ` -f $InstalledComposeVersion.ToString(), $LatestComposeVersion.ToString()) ` -ForegroundColor Yellow } } Catch { Write-Host ` "Warning: Couldn't get docker-compose version, assuming an update is required..." ` -ForegroundColor Yellow $UpdateDockerCompose = $true } If (-Not $UpdateDockerCompose) { # Nothing to do! Return $false } # Download the latest version of docker-compose.exe Try { $RemoteFileName = "docker-compose-Windows-x86_64.exe" $URI = ("https://github.com/docker/compose/releases/download/{0}/{1}" ` -f $LatestComposeVersion.ToString(), $RemoteFileName) Invoke-WebRequest -UseBasicParsing -Uri $URI ` -OutFile $DockerComposePath Return $true } Catch { Write-Host ("Error: Failed to download the latest version of docker-compose`n{0}" ` -f $_.Exception.Message) -ForegroundColor Red Return $false } Return $false } If (UpdateDockerCompose) { Write-Host "Updated to latest-version of docker-compose, running update again to verify.`n" If (UpdateDockerCompose) { Write-Host "Error: Should not have updated twice." -ForegroundColor Red } } # Assuming elevation popped up a new powershell window, pause so the user can see what happened # https://stackoverflow.com/a/22362868/153079 Function Pause ($Message = "Press any key to continue . . . ") { If ((Test-Path variable:psISE) -and $psISE) { $Shell = New-Object -ComObject "WScript.Shell" $Shell.Popup("Click OK to continue.", 0, "Script Paused", 0) } Else { Write-Host "`n$SectionSeparator" Write-Host -NoNewline $Message [void][System.Console]::ReadKey($true) Write-Host } } Pause compose-1.29.2/docker-compose-entrypoint.sh000077500000000000000000000010421404620552300207320ustar00rootroot00000000000000#!/bin/sh set -e # first arg is `-f` or `--some-option` if [ "${1#-}" != "$1" ]; then set -- docker-compose "$@" fi # if our command is a valid Docker subcommand, let's invoke it through Docker instead # (this allows for "docker run docker ps", etc) if docker-compose help "$1" > /dev/null 2>&1; then set -- docker-compose "$@" fi # if we have "--link some-docker:docker" and not DOCKER_HOST, let's set DOCKER_HOST automatically if [ -z "$DOCKER_HOST" -a "$DOCKER_PORT_2375_TCP" ]; then export DOCKER_HOST='tcp://docker:2375' fi exec "$@" compose-1.29.2/docker-compose.spec000066400000000000000000000017551404620552300170510ustar00rootroot00000000000000# -*- mode: python -*- block_cipher = None a = Analysis(['bin/docker-compose'], pathex=['.'], hiddenimports=[], hookspath=None, runtime_hooks=None, cipher=block_cipher) pyz = PYZ(a.pure, cipher=block_cipher) exe = EXE(pyz, a.scripts, a.binaries, a.zipfiles, a.datas, [ ( 'compose/config/config_schema_v1.json', 'compose/config/config_schema_v1.json', 'DATA' ), ( 'compose/config/compose_spec.json', 'compose/config/compose_spec.json', 'DATA' ), ( 'compose/GITSHA', 'compose/GITSHA', 'DATA' ) ], name='docker-compose', debug=False, strip=None, upx=True, console=True, bootloader_ignore_signals=True) compose-1.29.2/docker-compose_darwin.spec000066400000000000000000000022561404620552300204120ustar00rootroot00000000000000# -*- mode: python -*- block_cipher = None a = Analysis(['bin/docker-compose'], pathex=['.'], hiddenimports=[], hookspath=[], runtime_hooks=[], cipher=block_cipher) pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) exe = EXE(pyz, a.scripts, exclude_binaries=True, name='docker-compose', debug=False, strip=False, upx=True, console=True, bootloader_ignore_signals=True) coll = COLLECT(exe, a.binaries, a.zipfiles, a.datas, [ ( 'compose/config/config_schema_v1.json', 'compose/config/config_schema_v1.json', 'DATA' ), ( 'compose/config/compose_spec.json', 'compose/config/compose_spec.json', 'DATA' ), ( 'compose/GITSHA', 'compose/GITSHA', 'DATA' ) ], strip=False, upx=True, upx_exclude=[], name='docker-compose-Darwin-x86_64') compose-1.29.2/docs/000077500000000000000000000000001404620552300142035ustar00rootroot00000000000000compose-1.29.2/docs/README.md000066400000000000000000000013261404620552300154640ustar00rootroot00000000000000# The docs have been moved! The documentation for Compose has been merged into [the general documentation repo](https://github.com/docker/docker.github.io). The docs for Compose are now here: https://github.com/docker/docker.github.io/tree/master/compose Please submit pull requests for unreleased features/changes on the `master` branch (https://github.com/docker/docker.github.io/tree/master), please prefix the PR title with `[WIP]` to indicate that it relates to an unreleased change. If you submit a PR to this codebase that has a docs impact, create a second docs PR on `docker.github.io`. Use the docs PR template provided. As always, the docs remain open-source and we appreciate your feedback and pull requests! compose-1.29.2/docs/issue_template.md000066400000000000000000000022671404620552300175570ustar00rootroot00000000000000 ## Description of the issue ## Context information (for bug reports) ``` Output of "docker-compose version" ``` ``` Output of "docker version" ``` ``` Output of "docker-compose config" ``` ## Steps to reproduce the issue 1. 2. 3. ### Observed result ### Expected result ### Stacktrace / full error message ``` (if applicable) ``` ## Additional information OS version / distribution, `docker-compose` install method, etc. compose-1.29.2/docs/pull_request_template.md000066400000000000000000000010701404620552300211420ustar00rootroot00000000000000 Resolves # compose-1.29.2/experimental/000077500000000000000000000000001404620552300157505ustar00rootroot00000000000000compose-1.29.2/experimental/compose_swarm_networking.md000066400000000000000000000002561404620552300234220ustar00rootroot00000000000000# Experimental: Compose, Swarm and Multi-Host Networking Compose now supports multi-host networking as standard. Read more here: https://docs.docker.com/compose/networking compose-1.29.2/logo.png000066400000000000000000001143371404620552300147320ustar00rootroot00000000000000PNG  IHDR(ޫbKGD pHYs%%IR$tIME (S IDATxw|չ6ze[+4SLH%@@)J$7@MrbظeY$sǬ+;!G?FiFiFiFiFiFiFiߋg駦oR'LW[^>􌬬/~7?/o]zɰGU ?6zHĮ;S9vc֭Ʃ'0vXv `efzeeYtvuRSSC]!"6CGN7n<;vlO$:k)'>iEHQR2LA.xB"C*J]c_0n|aokC&M4-ekaFyx揿#PY9r͛N0mv ƿ~~C}:em[>פ@8!Av|pZ~\.h@nzg?wmW^{]#„4456O/**-صsߒI_n5/3 1iexr,O!*INyx}^?e5jW#Y9%$bXu qt?@ _6]Iiq͛n]`oV ikPY92%6f;o$< La SbJ B؆&_V4pBJ)dd%*w}{gωgV[[]Ɉ "9y(YHAhB HA -1C<9 =RƜ}1uYk 1=&(m#$h% RbHiӧͷr<`் <}L/Z;;5š|EZy~T|` 8`(gN_8{LsײjȘBC(-$B 0b_q-]gF +XP\V43롦KoJ <: MDDzp%D,;Ba? Q9q*7|&BX i"btpb5 GgU40}YO31 0,+UmcDSfc -f b2[ԼBњ)pKd9u.[G[,zk| DMZ!Ϥk>(7n#I㟆/h^ٹq -Bq vj~{_#f-AxmV6Ʊ+L@(w>S/#X>4]m(IV!iɰetttB.X} ƇV%]M̞cdh:vn'TiuyZ"aQ)rze ?R5Xnjb X^>-7pjs=zHZODz.(zm ~Wg+0tԔix|{,Mة.c`c҉|xrK*O9=}X4JeH,.[L(A iS =1zi%/>C$'š7gZe [c )a#PZBXe>ZE_Kjci ü֫Nhjjz#eåщ|_;Ĝu޵0s>u<Ļ\>XV.aahVѰ{+[R} ] MBcúmCR[iXia_'N%npyQ"Bg}ݿ} |Nf\/V~6+ u"ښh=DfJӞ-t:@ E$E=2OEgdۃj"CJ`۶&HlڵӈF.ۗVe 4lk0o%ЕSEl{9F͜~vw܈4LT!Yydf/% 3!dH,%PX\ljjJ$iXVQ,jIJF:d7[b_x3Iga~z"ec%YC+ -tlC 0@Em>X׃yvө&' ѨW2O:a:WD!$PţCbJ`=d b;51PQ.(چhđJuGq,{D";)--2|giO +2}()GVXYr C;.3n8v\& ji  S#-?Cx9Ac@Fy]/>f{~xt:4A'&$Y&-ըHΈd;b0#%d\rzK|X'F${& ˭&HTF0׼{ǵkru/hm'=,.ٰDyqp$ "0Kk.hiD9yyrs֢P iK^lmJz^DWs[0Bu mhvQLz$G|yX@`݁1ǝio4A┉ذs_GQW ?{TĻ41H!DH8L/BCg*tYY !qK9n]taY2u_^_KDk %Xzon^zݷ4ANP5={pꩧjkk &Ý|ofiDN)$'?`PvTvOE ‘ l1DzZ){+3˭^r.o6i_DfKm_/ GuH$7j0 Éӿ~- 4~c{T1LeXƟ1w݆~Qq57(/~bܸq|wykmmm}>7e.:bQ%@H#Lp!jA};2Hk#g{׽Pj呋ˆ7n^~}ggk>eZ(//SظcHyuD zJ;mcc9 ˟s&U@ O<ƢWVM|wWnᳱ1i ro{w}w5񂥄<]5Rl\|i&O\|Emn% zP(t֚2u+jmKfXyeW?L'&4)BM| %]D?vY1%Iȴ9a1^kajS:)RDxwXK4AN\b1S IrGI!n'f ZW}Kub0XlrԽ$Hm޶B Yi rzW)!!=% KiL>S\ 'E @1kM8H >|$iZWAk L/| ޚ?%#r۶VtW ^paG?\s5_R:KRqS{o=kӪƍ:ujy`ХjiiŶP(4<MBb+m @U~~^uZvdggg̘{z>UMsְg C8ʓTӊRl[j}06 bY\ ɓ'ϮX(8 !r0M) RXE $ bv ;0 (R rD, 5aR^wSNN΋O>8X~~|?sϟB% g BbB{ߟUQ9{w ^w\vEBˮ]zd47] RX)*)%3+,/$'<^ `@GionG "O_,׺ʑ#;np?x9g,@kA 1:FjxW߼=7}'ͦ aYv{G25( |*Fby;"rrw[{zGu09~1Ӵ(-Bj|ٽlkEX&aƛ]eWrgV@ Ðض)q;i+?Iaضߔ` xF(m`&G3d)R9<1Ե&p؄q"縞g )1 (g pm5{n`ۖ,_X !$\=OYFHV( m~{Zmuտ?~<۷oO$#++VOp_Wg=BB28e q3Ϻ6# iI$#Q1.%n L)KVd ZA 4R"0+]ĪeKٱm*$ nο Vʆ8L^]9~B~ 6i Q1mTܹ֯sGlذbBddf1~,nvJU`#1 Z9#+VJfJ^\ PDג@iٷc3<,_p0! #/.}.GkĴ4 jkk7`{CלBzSL~[{Ҏ~48e|OHvnZ,i .(%Q= Z" !p5է\&%C{&͘eTI8`ӆ\i0bX"*233^^~jjjlO$>qLo\С*-[^ :϶5%\sgn]416A Bckrj/a 1 "ށ?1D1OA ) Aaa1sO; fAm!kٺa-;lbddT\a#* [w)}>M4zP[W˨1YJ 7s7}F?(p;fv[W0ƖNbJ7ָ$?Ê$)H!*͎e'88rgE}RSse- '1 6qSSSöM0qW/f4ANV1Vʆ]P>'nON^IM;n2 u Fbh!{T8?].Bd;ѝ_{Vf)>L5+loc)ц!Bsٺe 6m~Ȑ!YǮ;v rb(Gyy9-- m"o /Jq7t:V41l;(Oe ~.W7)D_LJX?!^3 #;EػFm={9s:^׹fOguf[SÆoi ralٴ!6i~Զ//τ`)H% "rƍ#"tڐhY:j:?ÃF=8)1fg`)a1tx%3>j.[Τi+B\.f͚â7R_W|kqee%---i8y,_#+loo]lH--1y(r#vb (1mg6_w!{ 1dd=HTkIo=C~ Gziʙ4ϼ3ȁ;lia?y*ha,}m:::,,*j?xhmkKDFjiii_ ufs;rD:eHZHdy]E(A(&f@h'JB+I $/+@#uU I*EBI`suyؼ"N.Y̌٧_@̶)+-˗yAA^n9o%Kfi 5Lο&θj" \BcZĐ, )V21,I,j2GcH&7Ӎ4Fh3LRHf R M? ZcIqf5P;W`ڬ9S0yCknniY& QFSYY/ 3.㟻[񖝤$- x NX dx,2q OM& _HDtrxl5f^Vhc疍:{6M(`uض:{6555 ry۷oe0d|0}7 qFh nK.RJpS/nɑ$Z>@r$$R#8ߢ #Lիߧ6py 0,e1c TUVӦMeuuu9p@ ;&MaC/J埼a&ѳ@&e r3}^Rȸ#%]ba " "QD9bĩoD;sBKL5ߥA :;;?ȑ&Pey76?rXo7*: $uH %&t DLe[ !K@I=cI\Ƣ=h06}64|x9C*Fck x7ho->bMKu8tP ;n]]yE|?DzZ:-=Dע*I8dhˀ}8QiNwʶ[9321dxX}:G?lٲ3i>|_ a3QZSp[E I<ͽ۲2/B*RcRdzi90}W f3l[@M =}C"-2;V3gҥK8böY6BO@pYΌ Bˁ8QԥHAc7QH<%ڊSg̹ tv{lۼ!%tbUdnmڸy!i r;:@CNA&]9b/ʺ90dgfxa#*7;q"=C1WK%r*:񼦾;n*@g O1 IDATRfzo7%9_7Rm@7,J 7?K'/V^Jˇ"MM!c1r$0M0~_<Ӛ( mAJ7q I(DK'K֪=G\׺} Lw!#Ϻ1܌&ry7QwCOr^x<=0NIn2H?Z>Hj{2wq>GH&5!ͣ)Fccc p eC$ӦM~=i$`\|eޯOT^| ~wߙ1  @œ4A=i!FL`C(a3j2yc&Sv5l6|+u N_3V Qw:^>j8vD*-aJRIQBaʥ0-"Jw⦣F)*QJu4Aa֭;:1ƻ(w1"0Nm$$9[`2u:V۴TEϾ #c-: P. =vu4Ǡ)=L h5NĶ߂ }F~"uxM-]-defa&0\c*dddvyuuPSScU,W_+BG#XPe3k(;\|xf- j(eBM5{^=h C;L3ӕ\)'hFU$2tGNj;S$EjϚ yvnª$I%hwUJT4խ  =n@{+htb K.AkMaIٓ&Y˷lS:_.`2sr"hu3/D޺Ic1SW jX70)@Klq#DcÄX,:m.w_˔k3DNZL1 iwO?׮B֡,!kR]7)q],")Lpk!V~IV>ׅ!^-{<;*Rb(;e0IbpU(e'mIOP B4paP_TT ǂv#r5}ևFkSR^v.ц)aA4G!.4#ToZʟ?7seaȄi $Bz ;/|t.!;;za0Q"htHmaJ;ՓDꢢcq&T&{"y#$}QH' )HSmm-xnrssRm/=ys'5A]/ ;0 @DAGpv%Uk^38/VW͆'n{\oB0'Re+ĿLQ!̎n"\u\,颵`< ; @rATCL{ #tOQ/D{ tRӔ7ǻRbt6Ss*u_}sDEƒS:c "`]V ضm Rh/YYغ=3]cfȑ#c'-A"px! ,BkҊL& ᮸g _~wcTfաHV¸Ɣz3BcQgR?:قNfs$>ީSnq$Id(8Z&YeB]`XH|,xNOFvFak1Ms{%in V& `" ]`tAG@I#ر8qE]Us!QF(oݲy  رs{O`SPAڈ&syӂTZu*~nbHͺ҄˴)۴0twn%%.;ZRxv/q iD.rF;;mx8>&f|UQL.ģw`vx2~a*6Yx uc@` M>Xm0[''`}01xDo,|eͤ:Aw^[ P475 r< FwGc1B:72- qca׀G{ܺw!. {B VϘ}1] 5ٱ!S%?@rc,G-V2"%fe"ިq 5WdmhOv%$f,i4OjL>{ݖACC]kYL1W}VܾJ%KO;DOzYz Zxa=dX#G\pE/?8u [.O]4y 9)퀣hRbH\F%[6h= Ӕ&{vnFir~|Rdڵ 6toWg+)ZۄQg)/.A^&X.<^QB:oKtT);Xg{"wlo*,-9жN7mݸ5WG^ߟc͞]iuԫU˷:u,-1d/t %yMރj[eg3i lVidߞ1y _>UuY}CU{1 G g_|:ƪ>l3gñC=n,XT& y)-x[2 .d d^s=w@gKztF XIQ=^e~$wC0$H,)ZL*(1O@)-:R/ύvnCċ]vMّ ڜ^Bgs? _.c K_2҄pY`q DLҽ^8L b0+ O}:eee e e8uxWc&J+?o:oR=9w8I@"uo:D[)l[! OjcXn{ #'NC-!PZb 7Wxx"?%faw6b^E~Q+qͷ#9 Uie^|!{iLKyoܺ~2c'JoBk'qcr<>2Ąis8OsWS6j"jgܱ.\o|1s7mx.h-۷o?g{0 R]a=nK=$1RF` Lr])PH,hk?P{o\q&l%u؇D#:xOOt?\QQO|uuMH,aH.O0t bZu2M3ddYpyh٨[[=;u{6_ /bmhp+'s6q}~i{wy:)+`TeYJ;ctOvZ9p 1%$UGO/Ȉq;W/P9f2+UV84Z޾GHdwlذ13S rm_ r\.77bة3ⓟ $ z1=B;E[;Pvg,[-kjj~W\x *I2nŮM9spsSSS3fdn݌0PDa2y>^*@kV14ope !1 xO&'_N vw@?Sh0cwntHFs:`횵}O:5|/ϝ;Om𡪙;lOƗ"LStcN24zuiHJwoY{],XumϯlZggiS>=BFl[1etç(G6'^ 4B~̏|M7yw{wSw2|\^1dld#%5VC܏-g]p9#&R* 7ٷ}#iY_pē o߾}~SՁFVT,:tuo1͘҈とPF:S$h\KEE/U+ ̜{:he*>FHмk\4&ꡎחf6nڌeY\ |+Ęq-(GLhj_v/oţKA4ĸ$PTu$IF{ZNGNq4ػs ?7 +ds8KjʑhsIJ zq͔-ݱc緁zom۸ ٹ{ϲ!CtlN=2I,$7+z `ĉ |~㢅7_%|?7QZQ]L>YJe<ٵk;[l_5WOhر_M2o׹ϱfH waSwh?-t;JߜGN]~NJvoC(mRT6ι8>Ŏ8=[iL)XBē=2c|W u=[wٱiŶ?`Μ9|slێ>˟qhN,5mil\.;)Ebk1СC0 v@_O uPFX cFr1dOm(_2?{!P_{q06 ]M)(>+?un:N(*n],zxEmW]ui 7WtH,-nll3sŊlܸq/vuIggGb̶1 Dkݫ]8;vN)RJki? 1!Dd~ˤeog"c$ smo}QF9 ~>;wIz\ݻWmXb~;F2rL%|+x1d7$.x퇣j,{%vm^Kn^nrda}t PPP@ 8=P+G^VQX~=?#?]axԄyB@[[G`BΥx ,Ʋםdެ~xBpi~#DUSw먂J&Jjfy\KO]f7|3M%%y.EKXf: ~U> 'cyQo֯zinnoִb *GFDd|SJIŰrtչjvL>#F:>{2O,NIAUӿAyظavr{i+nܭ\~յIe}XR(׳~rj 9gfeI64 !{QPxmX]E] 6P.^ kGI $@*{q IDAT̜1M^S}2$33={yGZ)Rʠ^ <>zyW\}f{22ur򞜙 FŞവ!bg >s"!Z>$<*VCRۗ<,^J| q #njxC$1Dف(?|'@^H0-S4;S`PV*AS}E{gG{so^gA%b׹%(.ē?JqX{a57OJhbuSHؚҧp cz7I@~ܗ&gn<A!24WDDYm|uʔ)K c cnjƎ3oޝ5al|R_"\p3ﯪYrǜۯKCd\9(2p&"!#"$ up:Z<*̹J9qqZс={O>EcSv s@@€;l E|,zA(`7"O 8Url>Yg88Th^W>{5؛c{wsظ97Q8c敂 "X(hH\ʘ? H<^K};6P}} 0zH.v(Ã~Rɶ^G(cڍPk==Ha0byS1z$}(HO*‹xM5훞`@n΍i}Uơ0 @&P2A׉ʒ"-D\b k$U+r&d0!h Ʊ}4o 5}ljic4i26n\2Çc߾}0[¿~Wfg ˲ۅ7\WGӭ Mo2 y/6ɴ}!o}L{t+E'ЃDS8SBBAYas`T h8 5'jࢫGz0(Be2J=w_F퉊ĸŕ#GĞ={ԳKxb\Hjsjl3E ip$јG ͅE}-ݸ1οUL +Zc;ylNo|yݟփNM0gJ (@,*pVCHNMu;w8c3v4v؅P1.xÏJ!B_UEi{AS%DpAQ/> o 1Q1yLN2FTpW8MkQZQYǦfd/yˁS(A|ޓF"2M^]},Aq͜1euTUg8|6PnKo5_"E7H1k@fÅf Θր" Q,fTl:4kr1 1 ďٙw9g2^FPt9~N7܂^}E 45g BU‚f޽ʘ+ ]0Ba1q 3Y&"Ž`c7 Lpole׏W v'Ů jpޙ;jԸ5jr Ո 0_ .5҃q( ?ZJz! I?Z[[k~8gwxy.Gq@¬qޝK*PW8:>ƗʊӦNƧ'+):ƸLt 9ٯƅƬw~;kPEX=)(`梭Ƶ5L_M![! {Ac6AP{Cқg5(@m9|pMQ:ƙn*z Cr]xN0{NJEMԞƸƒV$(N\Qq`jb 7`s>ܥӿ̏ȸw R]0FB"\*৶?)D``2  = XBvmaZ;p@ЫW* bky^ S#£"'?B6gL1.X@ zn7(!UO=I+sL]H`RujTYWoZ8AB 5<%4TVU|5);5ǎt^|xNv9V=<Ba r*4j#c00)?]ppA|۝xm񋐹 ,} /+903SW}gcsFe,o]p9:ufn͢EG=:s:ZJQݭXvm73H.h 6|ۘ1px, 7@u7|r`a *z8:piSϝk[l蕔0dGp>¢=L8QpĤ4: qxp8%]pb#o8|2h& HNFJzjCo3=s%\yN^zTu?m.ơe׸ Ȝo45U¶+j)La$_s (!}*8(D@Tr~9woBLz3Mŷᅪ7'AP = zCgOHץb $=h0q( \p~~PPvQ㥗.<3Za"DCH@{{pTWD*2݅[߉\cfF7?D"]^!~tT|8 rS'@tv,B8ؼz2G D^5t  ^xIv<8"eu55EmX~!YcE ( ˎ :TxEVq$^pRmǤ'#D>ވŏ>^~Ʀ=H6ם) k _rx<.HV8#`7ݾ*XIT;åίNBN#8i`?0T;Z >wo}N\e TN&O` P0?֊爍1z#[[[=$QR': F@VvfRSS)S***+x/Zܷ2l$nw7xU]{93JtǙܽƊ ޭw_G ̻~Ermv3l11\8=e*v:pZ*.E`Z**t:ɟ@^h4h4r*1jfMNOo8[Z3mZ-Ϛ}9Ο:"k 0{ޅ$^G@C࿜tvPqެ;o3I-(jڲ[֮kˆuභo_T/<~nշzW)ʝ=uEΔgDQ Jv;򷍪*|Z#h# 90 2;6xd8x2~ GEtuNcm'DcG@Z=V\vfZ aڢ8@z'E7 @QU ΂ ugA:4U_jl BEQ4A/ Y̵`ؙ;=Ca[2ҶPBdN2jDh<ܵI Z(}a$?t #UГ{EDҨwS("( I>ۃ$g$=f_u-ֶ݉&9[[Z9f6X#c G3ɤgWV1kqW@RR\RQq|erRou[EE?ȑM\U#8Ǟy)iP@"{{0hpGQS "l]ǚkq]rwT\ AoF:P/ <^rO?Sg\o^.BD(72g!3cl]j$S=?Eȝy)(.T۰qsZsשw-d־PdR@0PY'CÜpGYcF]J Ig@ƠCQ} \ :S%WtkdacO/a`s1Z[]Ų6"FZZnAޝPS} FY)H)W>xɨi#ȓ7Ut({5B*HAwUpY@2pϣOc黟a%peڹ":"7Qk)֯]-z Fas0bxTY=uZ_Bm Jh rp-{rǶMmmMK!4hQv_N( "<"Ն?'nC߿d*x0'mPu'ŹS>֠|ypoG$b2? kx:EVm3QUEV$W& c+Dl\<(b*;BzQv DI$`I DĪvNVw^:[npUEݍPsg IDAT 6iUiE0o+#3ҝ۶LjEVBrM bŧ9&9eQύ71&*7µ"HR<2]w뉪ii)WV|5}4fisw~!|غ}Wo8nLkOV>:it hx6}V^&F-;c N̸r"#(j`ASyIGqhS+vk߾tպҕ_S'(H4Yt"*: f]t9vn݈͛Deca j88TOC t D@5 G#1d0D&i=lٸFbvq`4!P!  J0TU >|_5֝z!.?%"h9!B_DcؼǚC;':T3pN;(+/Ezj&EDࢋ.6=pucӟ>oʅd&E?rPI3g!_ ?0>er|Xogzq V/-|>Y-5VHZct7ǏҢU׈6lL6jm44@fxwN( .tC߾_{ybRߍacӼ=Q)"K)pסeeߟSUp;Z@Q h@xxATt, 4F37.eT$uzl68].#=juY/j4̎?T0Xb PAk+Idz=:"UBvP~]'kӲx\t!AbHLLrD2q V5b@tT샻:{˪CyNhS/DиbF-qfť{䉒GcccY}}޺auw~1p}orߤ7M3y>L+T+A5q"Gme ^h췘g@,XV{{9 K.p4Xrze+"K '']UAjF0(B$pB!TXN h=74!Lf3Q]]ȸޝ$ǐӆ#YQOPsp"3@XJ!mkGIp.>Gy%W8WA֡D̀ y174Y\p{sm6zl 0Y¡R5Ik dXҲ]7xǒ>Pu/&æM0q~6`")׀^J 1N /Q7'*cV>YpQT@n2[H"OSAq;p(6oLvA"TXg9z,!7y&ݿ]߇LC NIMI<]p plov1LHNpssP1f9Sp Q3(ןF`˖-<=kiޟk{{r(!Cb\F/]pTp`"@ᶵKkKA[K e01Y `Ǿ]Px`o!KN H.pPFz)L':T@8[k[ ŵcIZ B(F eDZwp'Jן">/F3 px2d"cHd cj{攕357ȑ8g -kQ͎y$[[[g>lp|G N?R΀ov/DS Koq(/? ΂CVsݸ xAop;lChmiAVf&& 4e`Dge(W|V/D.>z' QN`c 5J3. ꂜV)?zd}F)}44b=2$/D tFrl߸ΖE55'MDb*{*  D(EJ 7 3`AdУ C0xDɠт5=x4N|rdb+v9_7j[[>CGW_͕D*BOjaqa´Y<6Y>WȲ|mbdK[4x~!!^ņ~;ovL Dxu NM-DctOO>Ar|ru4hmm#""^LOO:eAoinBmMmħ͡>83RW27!5gg্t:dt`ͪ>;.W ĨQf (T$@!acU7߉a)]*S``3asWTVVwb||hXt q111wECmkjlwK %}Ï/Ɠ_GƠQ0m\(xgn0b~޽~94i6nX3kj=Z!p7bQDgjWa̞:8V+Vmq(?[GFO 8qF(**KUU';-LXE! W/pEEE SUPe2(:3 BA=\n-SV#I"/"8#ULƑmXǴ?-$^.༩ӱpĉ&f9U.NGQ {<ЅXQÑ1*STE-G p9XQڽ& {ȑ#dggnٸq/***/N񦆆v o߁0uEݍǡr I"h6w$$$b]Mk/ic;VX͇4BˋC Q spaam$`Cf k=Z+{B2f`嗟ak-U`0cgRp N:lFQ(-)AUe9ftrFPA˯ pi[@C:Qi'N*dD}ז - 0\I䔔EEE`11h L4 /펫m dц;60-At|oDEGWX &@ a@]mZ \@2`B덟[,t;v2,"q{@u"T $J藖]u`n7db%fUpifLI\j)g>x}xa57^y*w$gU2`G2dHyTUELl FjjjF@$ (Uv @:z(sۛ߃)nX,at󊊊[@1ŗ/` %FR[sYMJTU\RqDQ۠IC'0 95>[B:o1cl2e +lmmp0D1(2; jG#³A 2Ӫd"rCh8*+ 5SN]ۯqM֎</qur0T} tw:.s45P0.<, @mM (8(< 0j Q! ҉(E{[+mق{0L0EINb߾999(**@~\Bk=W5^x-U( 0M'!ZZv[^̙3F566Nx<t9@za0FrjAXXXl^SaEGE+4xUx^߿rB'Q*W@ ex!R3xa8I2J)Z^L?`'EŋQSS7&55ukuuy;m`Ҍ+p7B%x )"CNrmߤ7yɲNG HDASz#& ~*n f,ϩ&C7N'>}+[  mW807nu٥s:.ʉ)p*˲;[nvۻw=z{<zEPQcbbwW_m.^( KƤ>Csc dEAA\=rkr`fqbW%$ĚíÅZxiBjjCϵao1y_0i4$M'dr]af%x<P(` ,*&H&kOA' נC%"2JsOhimH)BzÖƥeee5'N@]]zͳb ~ZNvVTuMF S1v8q xBEY1Aue)hka;G-MHHXSO==Z0U Z6;'IHhr`j RS!!&Kǒ9CZ Mᑊ?\xluO?~xTTT|QQvbh ~6h%+]YډZ>qV goW!w\JM"mDZu.r ɩ1f<.s#Z[[ӏ+ihjl444w8_P4lmZ Օe7x,!*D49dF[1h(ض,|$f}u۷cƌXjUe_Urذ'86ۉ{ "Q0[‘>GxX8$0ŋM?|FTIICDDtW9n \Ր!!VTp5S9j*n(ڷ%%\p\ߍعs*@́}кC7V\z5Й,PTK tzD⺛E† +OPu"ܣk5JK SSSQ^^_jEAI`tmO2 wtz#<.'^oوc0-{#"z/\C;6}|?ZZ%&&"Ku5C{3cƌY6m:| 5FiI D_} U!;q*$Iw:cйWᰡOr*QHW[Q_]-*¡px$C$Gm&)[O& fgW/aԀ;>g^ Ea>Ϻn1/\Cc:66_ F^*kkp,+K[[[NL%osdU9ӍW@sc5&s}%̑\{ (q|W^SSJK~-[&}6֎ z%EJIwzퟍ:Sm ոYHF+PZVacFRfs0z2)!.O/u,n7օK.K5wY:71c G ck/~:PA_u^ēkܴ%Ņ,)eZL`&DYQJ ć$E渄5e%Ցp\?;ΊӾwXnjjr475u3[nI;%g=wI'b(9﩯iC[lkkks45w9p?~Ł>h޲2vz`DptWH2Ŋ˯_^}nUUU FFF'Mu-I Z1nzdl fiT"]z"5F;J5jp)3 ,xi@$%%]_=7wP/ s[Zw Mnt,R[w-V5LF#SOxnpw$< r5εpI hEͩpmKl!>w%TPxU.'_&2z(h6\&$k3gkqEL3fa4[0lx"ڃ&B At:H][7"N"Rϱ/3ĨNG\P[C]B7 }B/,H Lڸ+w'|իߺ~]iݩS6A+b!NU>趦CH QA5\ a̦1V@Kmm\GL5\f9@7 ,|IP9 8^PU&MNo@|Z&iiTdefQ454<*]Tޝ+d'@L@A@vwD6wp0*:N$,I@ ${:Kw'NwWչ?; 茌~OUWթη9\ѨK7 MĤdZYYٽ Ƞ[|v/1 lm[F\Դj{(Ч^:Z:0zvEY#I`'梵C )EEEYţģN)cRs^;0B!:;5غy#;l JJ aUtZۭE1tCG(j DF3x0y/&aR7WoC (f,Y8޻n3W _tnᥘ;oxIچ-VTV~=8Uf[n'7\7?y Z6=/ɢҔnQT@jJkDɓyOP(?NJ)$IhDQ<(+iNJIr]DFW,w*QmmComX -[+*֝p7M *Jov>~EpFG$z$*$$ ðQi2}"BɼSZ'r䗖>㇧[Zy"~(Bx"""BjH6נ c.lOEeeq6HNI5/)?vgofdoDd݂<sB<Q1Xx^TW#s﷨*-FsS#,-;2x&IU@T!*:c1qUHNNru}UMc *Z&c,J>92p,5 ^YW0 E MK!pը./$8VH$}@0|߬e'|~n΃0YC"Gm(9eߴq\d\M~w8zk7U`ɢ4"44,o7ߴ w- 5GRGO+ypFz)2N޷@&~L5( mphmkqc0AՃWijT֋{WFοHL(܀MB\ =`6w2kV=8?3*-- F dbo/38NTWbώ-Xz6S7ub㦏Q&%IꑢT^TI'i?xOOH0a@7." gmݝig669 8~$kťQP[37TVjͬ$6gO')ǖeCN'Tj Gz=:gL>]ջOd#9w'2B}ZKkN)uZ՗7EИx0 _*< kVb]!#1|ѓH 1,@@nųصC7"$:~2r.ǎsYYw~j+[F!/@VnNNŸ˵-n.R {2[1."FS]%FBRR{3XG{GaHpЛ;%+~)6}۳@ԻDpH ()P(T: nmـ[֡!!! wao#u`/]jConYVXmX1f cǎ5izHD$γ`#X}$ŷ%\?t#Rw qq6x];D *0DԳ00DDI5/G.hP&A"}OO?^#333.c,uc~xT TQ2qoYdjJPb= u!$"zn6s#vc ?^\=ȕW^.N7fIh vR7CDd""qh~SaqqfϞ.Vd5\,=ԩܨHByۥQ+zS}܁^}p`$7V{Z)Ó'׌5 >C.}p 4n$Db`yZ81g#vPs4~n Uŧ;+흝ݥ*gHII DQDIyg+y$K& 8\S.1DͤV_A_<$]gJDmOl/Ð$LQSOMV>;re'AEہ׮@$,-(Ï`g IКͭϹܩ1)A͊ eX$x8y0N΃@ rBrTaM:]kbLL^sK7^}# \4+==999;:uEֈZӯ[pL[? 00pD7y`!K ?-fp6uc.݁ШX%@d!CIFGe =EU',QA7HJPP8[s#֪7CBB?vCzIj.}$DեP "yҞ\D@{=L`P0 w?-Q (VUAd^g"U(^r6i5V5 s?6n\Pln>71Fy9_4ي߇a]bW_[5N]N\R\rp?U%P(xtP(P(T0r}7=+^B (vw`X(8} %?evbZ͏9ݮ!xuԣGjl%"pI\p*$t^bR}$Ϝ)nU3(kU~K@~fd|̙YY'㊚keeEIQF p.G`30 ^N+NSyL6MKSUeEaٙ{phϷ(/>`j*MWOގ3žo V߮鞕M;(4-5ߤGx'oC &N0a.>BAhGDr  VΖqQBx`OC͆֡iia`;nCBbTۻɉm̭:c A{{;N8{ ~Kamv[$Q!AAOArr20(:AaP`

Xp!֯_͓!a ]VVXWW'ʕ+q}{ 4=Z˱iHġW9@DpAs@~)fp/.z\PHѹM C;Gr[Jn {`@ F3_/^k^(-[]YY=b8#V*nH qjTy͛?߾CCnySx'{x{EdX[@M{+ξCet,N1@ k} N}e_oF:zDM(s pZ(~Cbf7,} }̚1 ;vY|O " zV`sYtmY H_"n (E V޼7,)W@ NVoT B@~ )ӎ?'O1er<:C(O>fLw"‘|9s*ֿ ((\(E鎿c7t %NKL7/Ȯc0^2- P CMMK[t亦in ЇwW]-O!8 4T~; KDB@x<`{ )Q=K5751jdJhiSbxJ t9@@{9} W)$/Wh9J>Q>Y̓Ύ0Ɵ>_.FDưd,[xn&ςw*O84Xg"O*+s5P#S 07!1H/!zzKvuY}/ G&M3u͏$JUr 5hgޓ~gFz_m ]߫0SRCHs 8Jm}Q_ *NZswm.+UfLPgO;w*=u2,, PP9p$.H o.% զRk >2!-펧BBnvmV>?7s%Ga Vq'EAJPpYD sLGn5(֯}e+FOK[~Ax$ӛ+ ^ kx t 貃_w B6xoЌbke?8]!J,b]6&H rThA&ރ\b0n}ч*JK'MCpX({}c6-$쏄? qu(LԾR} n|rml`w}ïS޾$e JՊ[:h: (zE( AYrnB]"C/zN@}o ><@8LרLw<6V3>n<#3 :CCDX˫+|Aqzm1?bd SR+P*32mo|XtxpqZ?u(q\:vo|a ic}#>>>>>>5pnےIENDB`compose-1.29.2/project/000077500000000000000000000000001404620552300147215ustar00rootroot00000000000000compose-1.29.2/project/ISSUE-TRIAGE.md000066400000000000000000000017371404620552300171140ustar00rootroot00000000000000Triaging of issues ------------------ The docker-compose issue triage process follows https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md with the following additions or exceptions. ### Classify the Issue The following labels are provided in additional to the standard labels: | Kind | Description | |--------------|-------------------------------------------------------------------| | kind/cleanup | A refactor or improvement that is related to quality not function | | kind/parity | A request for feature parity with docker cli | ### Functional areas Most issues should fit into one of the following functional areas: | Area | |-----------------| | area/build | | area/cli | | area/config | | area/logs | | area/networking | | area/packaging | | area/run | | area/scale | | area/tests | | area/up | | area/volumes | compose-1.29.2/project/RELEASE-PROCESS.md000077700000000000000000000000001404620552300240502../script/release/README.mdustar00rootroot00000000000000compose-1.29.2/pyinstaller/000077500000000000000000000000001404620552300156215ustar00rootroot00000000000000compose-1.29.2/pyinstaller/ldd000077500000000000000000000010201404620552300163030ustar00rootroot00000000000000#!/bin/sh # From http://wiki.musl-libc.org/wiki/FAQ#Q:_where_is_ldd_.3F # # Musl's dynlinker comes with ldd functionality built in. just create a # symlink from ld-musl-$ARCH.so to /bin/ldd. If the dynlinker was started # as "ldd", it will detect that and print the appropriate DSO information. # # Instead, this string replaced "ldd" with the package so that pyinstaller # can find the actual lib. exec /usr/bin/ldd "$@" | \ sed -r 's/([^[:space:]]+) => ldd/\1 => \/lib\/\1/g' | \ sed -r 's/ldd \(.*\)//g' compose-1.29.2/requirements-build.txt000066400000000000000000000000211404620552300176250ustar00rootroot00000000000000pyinstaller==4.1 compose-1.29.2/requirements-dev.txt000066400000000000000000000002621404620552300173130ustar00rootroot00000000000000Click==7.1.2 coverage==5.5 ddt==1.4.1 flake8==3.8.3 gitpython==3.1.11 mock==3.0.5 pytest==6.0.1; python_version >= '3.5' pytest==4.6.5; python_version < '3.5' pytest-cov==2.10.1 compose-1.29.2/requirements-indirect.txt000066400000000000000000000007521404620552300203420ustar00rootroot00000000000000altgraph==0.17 appdirs==1.4.4 attrs==20.3.0 bcrypt==3.2.0 cffi==1.14.4 cryptography==3.3.2 distlib==0.3.1 entrypoints==0.3 filelock==3.0.12 gitdb2==4.0.2 mccabe==0.6.1 more-itertools==8.6.0; python_version >= '3.5' more-itertools==5.0.0; python_version < '3.5' packaging==20.9 pluggy==0.13.1 py==1.10.0 pycodestyle==2.6.0 pycparser==2.20 pyflakes==2.2.0 PyNaCl==1.4.0 pyparsing==2.4.7 pyrsistent==0.16.0 smmap==3.0.4 smmap2==3.0.1 toml==0.10.1 tox==3.21.2 virtualenv==20.4.0 wcwidth==0.2.5 compose-1.29.2/requirements.txt000066400000000000000000000007621404620552300165440ustar00rootroot00000000000000backports.shutil_get_terminal_size==1.0.0 cached-property==1.5.1; python_version < '3.8' certifi==2020.6.20 chardet==3.0.4 colorama==0.4.3; sys_platform == 'win32' distro==1.5.0 docker==5.0.0 docker-pycreds==0.4.0 dockerpty==0.4.1 docopt==0.6.2 idna==2.10 ipaddress==1.0.23 jsonschema==3.2.0 paramiko==2.7.1 PySocks==1.7.1 python-dotenv==0.17.0 pywin32==227; sys_platform == 'win32' PyYAML==5.4.1 requests==2.24.0 texttable==1.6.2 urllib3==1.25.10; python_version == '3.3' websocket-client==0.57.0 compose-1.29.2/script/000077500000000000000000000000001404620552300145575ustar00rootroot00000000000000compose-1.29.2/script/build/000077500000000000000000000000001404620552300156565ustar00rootroot00000000000000compose-1.29.2/script/build/image000077500000000000000000000005771404620552300166770ustar00rootroot00000000000000#!/bin/bash set -e if [ -z "$1" ]; then >&2 echo "First argument must be image tag." exit 1 fi TAG="$1" VERSION="$(python setup.py --version)" DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)" echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA python setup.py sdist bdist_wheel docker build \ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \ -t "${TAG}" . compose-1.29.2/script/build/linux000077500000000000000000000006271404620552300167500ustar00rootroot00000000000000#!/bin/bash set -ex ./script/clean DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)" docker build . \ --target bin \ --build-arg DISTRO=debian \ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" \ --output dist/ ARCH=$(uname -m) # Ensure that we output the binary with the same name as we did before mv dist/docker-compose-linux-amd64 "dist/docker-compose-Linux-${ARCH}" compose-1.29.2/script/build/linux-entrypoint000077500000000000000000000021741404620552300211600ustar00rootroot00000000000000#!/bin/bash set -ex CODE_PATH=/code VENV="${CODE_PATH}"/.tox/py37 cd "${CODE_PATH}" mkdir -p dist chmod 777 dist "${VENV}"/bin/pip3 install -q -r requirements-build.txt # TODO(ulyssessouza) To check if really needed if [ -z "${DOCKER_COMPOSE_GITSHA}" ]; then DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)" fi echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA export PATH="${CODE_PATH}/pyinstaller:${PATH}" if [ ! -z "${BUILD_BOOTLOADER}" ]; then # Build bootloader for alpine; develop is the main branch git clone --single-branch --branch develop https://github.com/pyinstaller/pyinstaller.git /tmp/pyinstaller cd /tmp/pyinstaller/bootloader # Checkout commit corresponding to version in requirements-build git checkout v4.1 "${VENV}"/bin/python3 ./waf configure --no-lsb all "${VENV}"/bin/pip3 install .. cd "${CODE_PATH}" rm -Rf /tmp/pyinstaller else echo "NOT compiling bootloader!!!" fi "${VENV}"/bin/pyinstaller --exclude-module pycrypto --exclude-module PyInstaller docker-compose.spec ls -la dist/ ldd dist/docker-compose mv dist/docker-compose /usr/local/bin docker-compose version compose-1.29.2/script/build/osx000077500000000000000000000015551404620552300164230ustar00rootroot00000000000000#!/bin/bash set -ex TOOLCHAIN_PATH="$(realpath $(dirname $0)/../../build/toolchain)" rm -rf venv virtualenv -p "${TOOLCHAIN_PATH}"/bin/python3 venv venv/bin/pip install -r requirements-indirect.txt venv/bin/pip install -r requirements.txt venv/bin/pip install -r requirements-build.txt venv/bin/pip install --no-deps . DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)" echo "${DOCKER_COMPOSE_GITSHA}" > compose/GITSHA # Build as a folder for macOS Catalina. venv/bin/pyinstaller docker-compose_darwin.spec dist/docker-compose-Darwin-x86_64/docker-compose version (cd dist/docker-compose-Darwin-x86_64/ && tar zcvf ../docker-compose-Darwin-x86_64.tgz .) rm -rf dist/docker-compose-Darwin-x86_64 # Build static binary for legacy. venv/bin/pyinstaller docker-compose.spec mv dist/docker-compose dist/docker-compose-Darwin-x86_64 dist/docker-compose-Darwin-x86_64 version compose-1.29.2/script/build/test-image000077500000000000000000000006241404620552300176450ustar00rootroot00000000000000#!/bin/bash set -e if [ -z "$1" ]; then >&2 echo "First argument must be image tag." exit 1 fi TAG="$1" IMAGE="docker/compose-tests" DOCKER_COMPOSE_GITSHA="$(script/build/write-git-sha)" docker build -t "${IMAGE}:${TAG}" . \ --target build \ --build-arg DISTRO="debian" \ --build-arg GIT_COMMIT="${DOCKER_COMPOSE_GITSHA}" docker tag "${IMAGE}":"${TAG}" "${IMAGE}":latest compose-1.29.2/script/build/windows.ps1000066400000000000000000000033221404620552300177750ustar00rootroot00000000000000# Builds the Windows binary. # # From a fresh 64-bit Windows 10 install, prepare the system as follows: # # 1. Install Git: # # http://git-scm.com/download/win # # 2. Install Python 3.9.x: # # https://www.python.org/downloads/ # # 3. Append ";C:\Python39;C:\Python39\Scripts" to the "Path" environment variable: # # https://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/sysdm_advancd_environmnt_addchange_variable.mspx?mfr=true # # 4. In Powershell, run the following commands: # # $ pip install 'virtualenv==20.2.2' # $ Set-ExecutionPolicy -Scope CurrentUser RemoteSigned # # 5. Clone the repository: # # $ git clone https://github.com/docker/compose.git # $ cd compose # # 6. Build the binary: # # .\script\build\windows.ps1 $ErrorActionPreference = "Stop" # Remove virtualenv if (Test-Path venv) { Remove-Item -Recurse -Force .\venv } # Remove .pyc files Get-ChildItem -Recurse -Include *.pyc | foreach ($_) { Remove-Item $_.FullName } # Create virtualenv virtualenv -p C:\Python39\python.exe .\venv # pip and pyinstaller generate lots of warnings, so we need to ignore them $ErrorActionPreference = "Continue" .\venv\Scripts\pip install pypiwin32==223 .\venv\Scripts\pip install -r requirements-indirect.txt .\venv\Scripts\pip install -r requirements.txt .\venv\Scripts\pip install --no-deps . .\venv\Scripts\pip install -r requirements-build.txt git rev-parse --short HEAD | out-file -encoding ASCII compose\GITSHA # Build binary .\venv\Scripts\pyinstaller .\docker-compose.spec $ErrorActionPreference = "Stop" Move-Item -Force .\dist\docker-compose.exe .\dist\docker-compose-Windows-x86_64.exe .\dist\docker-compose-Windows-x86_64.exe --version compose-1.29.2/script/build/write-git-sha000077500000000000000000000007321404620552300202720ustar00rootroot00000000000000#!/bin/bash # # Write the current commit sha to the file GITSHA. This file is included in # packaging so that `docker-compose version` can include the git sha. # sets to 'unknown' and echoes a message if the command is not successful DOCKER_COMPOSE_GITSHA="$(git rev-parse --short HEAD)" if [[ "${?}" != "0" ]]; then echo "Couldn't get revision of the git repository. Setting to 'unknown' instead" DOCKER_COMPOSE_GITSHA="unknown" fi echo "${DOCKER_COMPOSE_GITSHA}" compose-1.29.2/script/ci000077500000000000000000000002771404620552300151060ustar00rootroot00000000000000#!/bin/bash # # Backwards compatibility for jenkins # # TODO: remove this script after all current PRs and jenkins are updated with # the new script/test/ci change set -e exec script/test/ci compose-1.29.2/script/circle/000077500000000000000000000000001404620552300160205ustar00rootroot00000000000000compose-1.29.2/script/circle/bintray-deploy.sh000077500000000000000000000032031404620552300213170ustar00rootroot00000000000000#!/bin/bash curl -f -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X GET \ https://api.bintray.com/repos/docker-compose/${CIRCLE_BRANCH} if test $? -ne 0; then echo "Bintray repository ${CIRCLE_BRANCH} does not exist ; abandoning upload attempt" exit 0 fi curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST \ -d "{\ \"name\": \"${PKG_NAME}\", \"desc\": \"auto\", \"licenses\": [\"Apache-2.0\"], \ \"vcs_url\": \"${CIRCLE_REPOSITORY_URL}\" \ }" -H "Content-Type: application/json" \ https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH} curl -u$BINTRAY_USERNAME:$BINTRAY_API_KEY -X POST -d "{\ \"name\": \"$CIRCLE_BRANCH\", \ \"desc\": \"Automated build of the ${CIRCLE_BRANCH} branch.\", \ }" -H "Content-Type: application/json" \ https://api.bintray.com/packages/docker-compose/${CIRCLE_BRANCH}/${PKG_NAME}/versions curl -f -T dist/docker-compose-${OS_NAME}-x86_64 -u$BINTRAY_USERNAME:$BINTRAY_API_KEY \ -H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \ -H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \ https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64 || exit 1 # Upload folder format of docker-compose for macOS in addition to binary. if [ "${OS_NAME}" == "Darwin" ]; then curl -f -T dist/docker-compose-${OS_NAME}-x86_64.tgz -u$BINTRAY_USERNAME:$BINTRAY_API_KEY \ -H "X-Bintray-Package: ${PKG_NAME}" -H "X-Bintray-Version: $CIRCLE_BRANCH" \ -H "X-Bintray-Override: 1" -H "X-Bintray-Publish: 1" -X PUT \ https://api.bintray.com/content/docker-compose/${CIRCLE_BRANCH}/docker-compose-${OS_NAME}-x86_64.tgz || exit 1 fi compose-1.29.2/script/clean000077500000000000000000000003101404620552300155610ustar00rootroot00000000000000#!/bin/sh set -e find . -type f -name '*.pyc' -delete rm -rf .coverage-binfiles find . -name .coverage.* -delete find . -name __pycache__ -delete rm -rf docs/_site build dist docker-compose.egg-info compose-1.29.2/script/docs/000077500000000000000000000000001404620552300155075ustar00rootroot00000000000000compose-1.29.2/script/docs/check_help.py000077500000000000000000000016101404620552300201470ustar00rootroot00000000000000#!/usr/bin/env python3 import glob import os.path import re import subprocess USAGE_RE = re.compile(r"```.*?\nUsage:.*?```", re.MULTILINE | re.DOTALL) USAGE_IN_CMD_RE = re.compile(r"^Usage:.*", re.MULTILINE | re.DOTALL) HELP_CMD = "docker run --rm docker/compose:latest %s --help" for file in glob.glob("compose/reference/*.md"): with open(file) as f: data = f.read() if not USAGE_RE.search(data): print("Not a command:", file) continue subcmd = os.path.basename(file).replace(".md", "") if subcmd == "overview": continue print(f"Found {subcmd}: {file}") help_cmd = HELP_CMD % subcmd help = subprocess.check_output(help_cmd.split()) help = help.decode("utf-8") help = USAGE_IN_CMD_RE.findall(help)[0] help = help.strip() data = USAGE_RE.sub(f"```none\n{help}\n```", data) with open(file, "w") as f: f.write(data) compose-1.29.2/script/release/000077500000000000000000000000001404620552300161775ustar00rootroot00000000000000compose-1.29.2/script/release/README.md000066400000000000000000000022761404620552300174650ustar00rootroot00000000000000# Release HOWTO The release process is fully automated by `Release.Jenkinsfile`. ## Usage 1. In the appropriate branch, run `./script/release/release.py tag ` By appropriate, we mean for a version `1.26.0` or `1.26.0-rc1` you should run the script in the `1.26.x` branch. The script should check the above then ask for changelog modifications. After the executions, you should have a commit with the proper bumps for `docker-compose version` and `run.sh` 2. Run `git push --tags upstream ` This should trigger a new CI build on the new tag. When the CI finishes with the tests and builds a new draft release would be available on github's releases page. 3. Check and confirm the release on github's release page. 4. In case of a GA version, please update `docker-compose`s release notes and version on [github documentation repository](https://github.com/docker/docker.github.io): - [Release Notes](https://github.com/docker/docker.github.io/blob/master/compose/release-notes.md) - [Config version](https://github.com/docker/docker.github.io/blob/master/_config.yml) - [Config authoring version](https://github.com/docker/docker.github.io/blob/master/_config_authoring.yml) compose-1.29.2/script/release/cherry-pick-pr000077500000000000000000000010321404620552300207600ustar00rootroot00000000000000#!/bin/bash # # Cherry-pick a PR into the release branch # set -e set -o pipefail function usage() { >&2 cat << EOM Cherry-pick commits from a github pull request. Usage: $0 EOM exit 1 } [ -n "$1" ] || usage if [ -z "$(command -v hub 2> /dev/null)" ]; then >&2 echo "$0 requires https://hub.github.com/." >&2 echo "Please install it and make sure it is available on your \$PATH." exit 2 fi REPO=docker/compose GITHUB=https://github.com/$REPO/pull PR=$1 url="$GITHUB/$PR" hub am -3 $url compose-1.29.2/script/release/const.py000066400000000000000000000001141404620552300176730ustar00rootroot00000000000000import os REPO_ROOT = os.path.join(os.path.dirname(__file__), '..', '..') compose-1.29.2/script/release/generate_changelog.sh000077500000000000000000000024021404620552300223350ustar00rootroot00000000000000#!/bin/bash set -e set -x ## Usage : ## changelog PREVIOUS_TAG..HEAD # configure refs so we get pull-requests metadata git config --add remote.origin.fetch +refs/pull/*/head:refs/remotes/origin/pull/* git fetch origin RANGE=${1:-"$(git describe --tags --abbrev=0 HEAD^)..HEAD"} echo "Generate changelog for range ${RANGE}" echo pullrequests() { for commit in $(git log ${RANGE} --format='format:%H'); do # Get the oldest remotes/origin/pull/* branch to include this commit, i.e. the one to introduce it git branch -a --sort=committerdate --contains $commit --list 'origin/pull/*' | head -1 | cut -d'/' -f4 done } changes=$(pullrequests | uniq) echo "pull requests merged within range:" echo $changes echo '#Features' > FEATURES.md echo '#Bugs' > BUGS.md for pr in $changes; do curl -fs -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/repos/docker/compose/pulls/${pr} -o PR.json cat PR.json | jq -r ' select( .labels[].name | contains("kind/feature") ) | "- "+.title' >> FEATURES.md cat PR.json | jq -r ' select( .labels[].name | contains("kind/bug") ) | "- "+.title' >> BUGS.md done echo ${TAG_NAME} > CHANGELOG.md echo >> CHANGELOG.md cat FEATURES.md >> CHANGELOG.md echo >> CHANGELOG.md cat BUGS.md >> CHANGELOG.md compose-1.29.2/script/release/release.md.tmpl000066400000000000000000000023151404620552300211150ustar00rootroot00000000000000If you're a Mac or Windows user, the best way to install Compose and keep it up-to-date is **[Docker Desktop for Mac and Windows](https://www.docker.com/products/docker-desktop)**. Docker Desktop will automatically install the latest version of Docker Engine for you. Alternatively, you can use the usual commands to install or upgrade Compose: ``` curl -L https://github.com/docker/compose/releases/download/{{version}}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose ``` See the [install docs](https://docs.docker.com/compose/install/) for more install options and instructions. ## Compose file format compatibility matrix | Compose file format | Docker Engine | | --- | --- | {% for engine, formats in compat_matrix.items() -%} | {% for format in formats %}{{format}}{% if not loop.last %}, {% endif %}{% endfor %} | {{engine}}+ | {% endfor -%} ## Changes {{changelog}} Thanks to {% for name in contributors %}@{{name}}{% if not loop.last %}, {% endif %}{% endfor %} for contributing to this release! ## Integrity check Binary name | SHA-256 sum | --- | --- | {% for filename, sha in integrity.items() -%} | `{{filename}}` | `{{sha[1]}}` | {% endfor -%} compose-1.29.2/script/release/release.py000077500000000000000000000057041404620552300202020ustar00rootroot00000000000000#!/usr/bin/env python3 import re import click from git import Repo from utils import update_init_py_version from utils import update_run_sh_version from utils import yesno VALID_VERSION_PATTERN = re.compile(r"^\d+\.\d+\.\d+(-rc\d+)?$") class Version(str): def matching_groups(self): match = VALID_VERSION_PATTERN.match(self) if not match: return False return match.groups() def is_ga_version(self): groups = self.matching_groups() if not groups: return False rc_suffix = groups[1] return not rc_suffix def validate(self): return len(self.matching_groups()) > 0 def branch_name(self): if not self.validate(): return None rc_part = self.matching_groups()[0] ver = self if rc_part: ver = ver[:-len(rc_part)] tokens = ver.split(".") tokens[-1] = 'x' return ".".join(tokens) def create_bump_commit(repository, version): print('Creating bump commit...') repository.commit('-a', '-s', '-m "Bump {}"'.format(version), '--no-verify') def validate_environment(version, repository): if not version.validate(): print('Version "{}" has an invalid format. This should follow D+.D+.D+(-rcD+). ' 'Like: 1.26.0 or 1.26.0-rc1'.format(version)) return False expected_branch = version.branch_name() if str(repository.active_branch) != expected_branch: print('Cannot tag in this branch with version "{}". ' 'Please checkout "{}" to tag'.format(version, version.branch_name())) return False return True @click.group() def cli(): pass @cli.command() @click.argument('version') def tag(version): """ Updates the version related files and tag """ repo = Repo(".") version = Version(version) if not validate_environment(version, repo): return update_init_py_version(version) update_run_sh_version(version) input('Please add the release notes to the CHANGELOG.md file, then press Enter to continue.') proceed = False while not proceed: print(repo.git.diff()) proceed = yesno('Are these changes ok? y/N ', default=False) if repo.git.diff(): create_bump_commit(repo.git, version) else: print('No changes to commit. Exiting...') return repo.create_tag(version) print('Please, check the changes. If everything is OK, you just need to push with:\n' '$ git push --tags upstream {}'.format(version.branch_name())) @cli.command() @click.argument('version') def push_latest(version): """ TODO Pushes the latest tag pointing to a certain GA version """ raise NotImplementedError @cli.command() @click.argument('version') def ghtemplate(version): """ TODO Generates the github release page content """ version = Version(version) raise NotImplementedError if __name__ == '__main__': cli() compose-1.29.2/script/release/utils.py000066400000000000000000000022501404620552300177100ustar00rootroot00000000000000import os import re from const import REPO_ROOT def update_init_py_version(version): path = os.path.join(REPO_ROOT, 'compose', '__init__.py') with open(path) as f: contents = f.read() contents = re.sub(r"__version__ = '[0-9a-z.-]+'", "__version__ = '{}'".format(version), contents) with open(path, 'w') as f: f.write(contents) def update_run_sh_version(version): path = os.path.join(REPO_ROOT, 'script', 'run', 'run.sh') with open(path) as f: contents = f.read() contents = re.sub(r'VERSION="[0-9a-z.-]+"', 'VERSION="{}"'.format(version), contents) with open(path, 'w') as f: f.write(contents) def yesno(prompt, default=None): """ Prompt the user for a yes or no. Can optionally specify a default value, which will only be used if they enter a blank line. Unrecognised input (anything other than "y", "n", "yes", "no" or "") will return None. """ answer = input(prompt).strip().lower() if answer == "y" or answer == "yes": return True elif answer == "n" or answer == "no": return False elif answer == "": return default else: return None compose-1.29.2/script/release/utils.sh000066400000000000000000000006241404620552300176750ustar00rootroot00000000000000#!/bin/bash # # Util functions for release scripts # set -e set -o pipefail function browser() { local url=$1 xdg-open $url || open $url } function find_remote() { local url=$1 for remote in $(git remote); do git config --get remote.${remote}.url | grep $url > /dev/null && echo -n $remote done # Always return true, extra remotes cause it to return false true } compose-1.29.2/script/run/000077500000000000000000000000001404620552300153635ustar00rootroot00000000000000compose-1.29.2/script/run/run.ps1000066400000000000000000000015751404620552300166240ustar00rootroot00000000000000# Run docker-compose in a container via boot2docker. # # The current directory will be mirrored as a volume and additional # volumes (or any other options) can be mounted by using # $Env:DOCKER_COMPOSE_OPTIONS. if ($Env:DOCKER_COMPOSE_VERSION -eq $null -or $Env:DOCKER_COMPOSE_VERSION.Length -eq 0) { $Env:DOCKER_COMPOSE_VERSION = "latest" } if ($Env:DOCKER_COMPOSE_OPTIONS -eq $null) { $Env:DOCKER_COMPOSE_OPTIONS = "" } if (-not $Env:DOCKER_HOST) { docker-machine env --shell=powershell default | Invoke-Expression if (-not $?) { exit $LastExitCode } } $local="/$($PWD -replace '^(.):(.*)$', '"$1".ToLower()+"$2".Replace("\","/")' | Invoke-Expression)" docker run --rm -ti -v /var/run/docker.sock:/var/run/docker.sock -v "${local}:$local" -w "$local" $Env:DOCKER_COMPOSE_OPTIONS "docker/compose:$Env:DOCKER_COMPOSE_VERSION" $args exit $LastExitCode compose-1.29.2/script/run/run.sh000077500000000000000000000050311404620552300165250ustar00rootroot00000000000000#!/bin/sh # # Run docker-compose in a container # # This script will attempt to mirror the host paths by using volumes for the # following paths: # * $(pwd) # * $(dirname $COMPOSE_FILE) if it's set # * $HOME if it's set # # You can add additional volumes (or any docker run options) using # the $COMPOSE_OPTIONS environment variable. # set -e VERSION="1.29.2" IMAGE="docker/compose:$VERSION" # Setup options for connecting to docker host if [ -z "$DOCKER_HOST" ]; then DOCKER_HOST='unix:///var/run/docker.sock' fi if [ -S "${DOCKER_HOST#unix://}" ]; then DOCKER_ADDR="-v ${DOCKER_HOST#unix://}:${DOCKER_HOST#unix://} -e DOCKER_HOST" else DOCKER_ADDR="-e DOCKER_HOST -e DOCKER_TLS_VERIFY -e DOCKER_CERT_PATH" fi # Setup volume mounts for compose config and context if [ "$(pwd)" != '/' ]; then VOLUMES="-v $(pwd):$(pwd)" fi if [ -n "$COMPOSE_FILE" ]; then COMPOSE_OPTIONS="$COMPOSE_OPTIONS -e COMPOSE_FILE=$COMPOSE_FILE" compose_dir="$(dirname "$COMPOSE_FILE")" # canonicalize dir, do not use realpath or readlink -f # since they are not available in some systems (e.g. macOS). compose_dir="$(cd "$compose_dir" && pwd)" fi if [ -n "$COMPOSE_PROJECT_NAME" ]; then COMPOSE_OPTIONS="-e COMPOSE_PROJECT_NAME $COMPOSE_OPTIONS" fi if [ -n "$compose_dir" ]; then VOLUMES="$VOLUMES -v $compose_dir:$compose_dir" fi if [ -n "$HOME" ]; then VOLUMES="$VOLUMES -v $HOME:$HOME -e HOME" # Pass in HOME to share docker.config and allow ~/-relative paths to work. fi i=$# while [ $i -gt 0 ]; do arg=$1 i=$((i - 1)) shift case "$arg" in -f|--file) value=$1 i=$((i - 1)) shift set -- "$@" "$arg" "$value" file_dir=$(realpath "$(dirname "$value")") VOLUMES="$VOLUMES -v $file_dir:$file_dir" ;; *) set -- "$@" "$arg" ;; esac done # Setup environment variables for compose config and context ENV_OPTIONS=$(printenv | sed -E "/^PATH=.*/d; s/^/-e /g; s/=.*//g; s/\n/ /g") # Only allocate tty if we detect one if [ -t 0 ] && [ -t 1 ]; then DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -t" fi # Always set -i to support piped and terminal input in run/exec DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS -i" # Handle userns security if docker info --format '{{json .SecurityOptions}}' 2>/dev/null | grep -q 'name=userns'; then DOCKER_RUN_OPTIONS="$DOCKER_RUN_OPTIONS --userns=host" fi # shellcheck disable=SC2086 exec docker run --rm $DOCKER_RUN_OPTIONS $DOCKER_ADDR $COMPOSE_OPTIONS $ENV_OPTIONS $VOLUMES -w "$(pwd)" $IMAGE "$@" compose-1.29.2/script/setup/000077500000000000000000000000001404620552300157175ustar00rootroot00000000000000compose-1.29.2/script/setup/osx000077500000000000000000000066001404620552300164600ustar00rootroot00000000000000#!/usr/bin/env bash set -ex . $(dirname $0)/osx_helpers.sh DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET:-"$(macos_version)"} SDK_FETCH= if ! [ ${DEPLOYMENT_TARGET} == "$(macos_version)" ]; then SDK_FETCH=1 # SDK URL from https://github.com/docker/golang-cross/blob/master/osx-cross.sh SDK_URL=https://s3.dockerproject.org/darwin/v2/MacOSX${DEPLOYMENT_TARGET}.sdk.tar.xz SDK_SHA1=dd228a335194e3392f1904ce49aff1b1da26ca62 fi OPENSSL_VERSION=1.1.1h OPENSSL_URL=https://www.openssl.org/source/openssl-${OPENSSL_VERSION}.tar.gz OPENSSL_SHA1=8d0d099e8973ec851368c8c775e05e1eadca1794 PYTHON_VERSION=3.9.0 PYTHON_URL=https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz PYTHON_SHA1=5744a10ba989d2badacbab3c00cdcb83c83106c7 # # Install prerequisites. # if ! [ -x "$(command -v brew)" ]; then ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" fi if ! [ -x "$(command -v grealpath)" ]; then brew update > /dev/null brew install coreutils fi if ! [ -x "$(command -v python3)" ]; then brew update > /dev/null brew install python3 fi if ! [ -x "$(command -v virtualenv)" ]; then pip3 install virtualenv==20.2.2 fi # # Create toolchain directory. # BUILD_PATH="$(grealpath $(dirname $0)/../../build)" mkdir -p ${BUILD_PATH} TOOLCHAIN_PATH="${BUILD_PATH}/toolchain" mkdir -p ${TOOLCHAIN_PATH} # # Set macOS SDK. # if [[ ${SDK_FETCH} && ! -f ${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk/SDKSettings.plist ]]; then SDK_PATH=${TOOLCHAIN_PATH}/MacOSX${DEPLOYMENT_TARGET}.sdk fetch_tarball ${SDK_URL} ${SDK_PATH} ${SDK_SHA1} else SDK_PATH="$(xcode-select --print-path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX${DEPLOYMENT_TARGET}.sdk" fi # # Build OpenSSL. # OPENSSL_SRC_PATH=${TOOLCHAIN_PATH}/openssl-${OPENSSL_VERSION} if ! [[ $(${TOOLCHAIN_PATH}/bin/openssl version) == *"${OPENSSL_VERSION}"* ]]; then rm -rf ${OPENSSL_SRC_PATH} fetch_tarball ${OPENSSL_URL} ${OPENSSL_SRC_PATH} ${OPENSSL_SHA1} ( cd ${OPENSSL_SRC_PATH} export MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} export SDKROOT=${SDK_PATH} ./Configure darwin64-x86_64-cc --prefix=${TOOLCHAIN_PATH} make install_sw install_dev ) fi # # Build Python. # PYTHON_SRC_PATH=${TOOLCHAIN_PATH}/Python-${PYTHON_VERSION} if ! [[ $(${TOOLCHAIN_PATH}/bin/python3 --version) == *"${PYTHON_VERSION}"* ]]; then rm -rf ${PYTHON_SRC_PATH} fetch_tarball ${PYTHON_URL} ${PYTHON_SRC_PATH} ${PYTHON_SHA1} ( cd ${PYTHON_SRC_PATH} ./configure --prefix=${TOOLCHAIN_PATH} \ --enable-ipv6 --without-ensurepip --with-dtrace --without-gcc \ --datarootdir=${TOOLCHAIN_PATH}/share \ --datadir=${TOOLCHAIN_PATH}/share \ --enable-framework=${TOOLCHAIN_PATH}/Frameworks \ --with-openssl=${TOOLCHAIN_PATH} \ MACOSX_DEPLOYMENT_TARGET=${DEPLOYMENT_TARGET} \ CFLAGS="-isysroot ${SDK_PATH} -I${TOOLCHAIN_PATH}/include" \ CPPFLAGS="-I${SDK_PATH}/usr/include -I${TOOLCHAIN_PATH}/include" \ LDFLAGS="-isysroot ${SDK_PATH} -L ${TOOLCHAIN_PATH}/lib" make -j 4 make install PYTHONAPPSDIR=${TOOLCHAIN_PATH} make frameworkinstallextras PYTHONAPPSDIR=${TOOLCHAIN_PATH}/share ) fi # # Smoke test built Python. # openssl_version ${TOOLCHAIN_PATH} echo "" echo "*** Targeting macOS: ${DEPLOYMENT_TARGET}" echo "*** Using SDK ${SDK_PATH}" echo "*** Using $(python3_version ${TOOLCHAIN_PATH})" echo "*** Using $(openssl_version ${TOOLCHAIN_PATH})" compose-1.29.2/script/setup/osx_helpers.sh000066400000000000000000000014111404620552300206030ustar00rootroot00000000000000#!/usr/bin/env bash # Check file's ($1) SHA1 ($2). check_sha1() { echo -n "$2 *$1" | shasum -c - } # Download URL ($1) to path ($2). download() { curl -L $1 -o $2 } # Extract tarball ($1) in folder ($2). extract() { tar xf $1 -C $2 } # Download URL ($1), check SHA1 ($3), and extract utility ($2). fetch_tarball() { url=$1 tarball=$2.tarball sha1=$3 download $url $tarball check_sha1 $tarball $sha1 extract $tarball $(dirname $tarball) } # Version of Python at toolchain path ($1). python3_version() { $1/bin/python3 -V 2>&1 } # Version of OpenSSL used by toolchain ($1) Python. openssl_version() { $1/bin/python3 -c "import ssl; print(ssl.OPENSSL_VERSION)" } # System macOS version. macos_version() { sw_vers -productVersion | cut -f1,2 -d'.' } compose-1.29.2/script/test/000077500000000000000000000000001404620552300155365ustar00rootroot00000000000000compose-1.29.2/script/test/acceptance000077500000000000000000000001311404620552300175450ustar00rootroot00000000000000#!/usr/bin/env bash pytest --conformity --binary ${1:-docker-compose} tests/acceptance/ compose-1.29.2/script/test/all000077500000000000000000000034541404620552300162420ustar00rootroot00000000000000#!/bin/bash # This should be run inside a container built from the Dockerfile # at the root of the repo - script/test will do it automatically. set -e >&2 echo "Running lint checks" docker run --rm \ --tty \ ${GIT_VOLUME} \ "$TAG" tox -e pre-commit get_versions="docker run --rm --entrypoint=/code/.tox/py37/bin/python $TAG /code/script/test/versions.py docker/docker-ce,moby/moby" if [ "$DOCKER_VERSIONS" == "" ]; then DOCKER_VERSIONS="$($get_versions default)" elif [ "$DOCKER_VERSIONS" == "all" ]; then DOCKER_VERSIONS=$($get_versions -n 2 recent) fi BUILD_NUMBER=${BUILD_NUMBER-$USER} PY_TEST_VERSIONS=${PY_TEST_VERSIONS:-py37} for version in $DOCKER_VERSIONS; do >&2 echo "Running tests against Docker $version" daemon_container="compose-dind-$version-$BUILD_NUMBER" function on_exit() { if [[ "$?" != "0" ]]; then docker logs "$daemon_container" 2>&1 | tail -n 100 fi docker rm -vf "$daemon_container" } trap "on_exit" EXIT docker run \ -d \ --name "$daemon_container" \ --privileged \ --volume="/var/lib/docker" \ -e "DOCKER_TLS_CERTDIR=" \ "docker:$version-dind" \ dockerd -H tcp://0.0.0.0:2375 $DOCKER_DAEMON_ARGS \ 2>&1 | tail -n 10 docker exec "$daemon_container" sh -c "apk add --no-cache git" # copy docker config from host for authentication with Docker Hub docker exec "$daemon_container" sh -c "mkdir /root/.docker" docker cp /root/.docker/config.json $daemon_container:/root/.docker/config.json docker exec "$daemon_container" sh -c "chmod 644 /root/.docker/config.json" docker run \ --rm \ --tty \ --link="$daemon_container:docker" \ --env="DOCKER_HOST=tcp://docker:2375" \ --env="DOCKER_VERSION=$version" \ --entrypoint="tox" \ "$TAG" \ -e "$PY_TEST_VERSIONS" -- "$@" done compose-1.29.2/script/test/ci000077500000000000000000000011441404620552300160570ustar00rootroot00000000000000#!/bin/bash # This should be run inside a container built from the Dockerfile # at the root of the repo: # # $ TAG="docker-compose:$(git rev-parse --short HEAD)" # $ docker build -t "$TAG" . # $ docker run --rm \ # --volume="/var/run/docker.sock:/var/run/docker.sock" \ # --volume="$(pwd)/.git:/code/.git" \ # -e "TAG=$TAG" \ # --entrypoint="script/test/ci" "$TAG" set -ex docker version export DOCKER_VERSIONS=${DOCKER_VERSIONS:-all} STORAGE_DRIVER=${STORAGE_DRIVER:-overlay} export DOCKER_DAEMON_ARGS="--storage-driver=$STORAGE_DRIVER" GIT_VOLUME="--volumes-from=$(hostname)" . script/test/all compose-1.29.2/script/test/default000077500000000000000000000011061404620552300171060ustar00rootroot00000000000000#!/bin/bash # See CONTRIBUTING.md for usage. set -ex TAG="docker-compose:alpine-$(git rev-parse --short HEAD)" # By default use the Dockerfile, but can be overridden to use an alternative file # e.g DOCKERFILE=Dockerfile.s390x script/test/default DOCKERFILE="${DOCKERFILE:-Dockerfile}" DOCKER_BUILD_TARGET="${DOCKER_BUILD_TARGET:-build}" rm -rf coverage-html # Create the host directory so it's owned by $USER mkdir -p coverage-html docker build -f "${DOCKERFILE}" -t "${TAG}" --target "${DOCKER_BUILD_TARGET}" . GIT_VOLUME="--volume=$(pwd)/.git:/code/.git" . script/test/all compose-1.29.2/script/test/versions.py000077500000000000000000000114361404620552300177700ustar00rootroot00000000000000#!/usr/bin/env python """ Query the github API for the git tags of a project, and return a list of version tags for recent releases, or the default release. The default release is the most recent non-RC version. Recent is a list of unique major.minor versions, where each is the most recent version in the series. For example, if the list of versions is: 1.8.0-rc2 1.8.0-rc1 1.7.1 1.7.0 1.7.0-rc1 1.6.2 1.6.1 `default` would return `1.7.1` and `recent -n 3` would return `1.8.0-rc2 1.7.1 1.6.2` """ import argparse import itertools import operator import sys from collections import namedtuple import requests GITHUB_API = 'https://api.github.com/repos' STAGES = ['tp', 'beta', 'rc'] class Version(namedtuple('_Version', 'major minor patch stage edition')): @classmethod def parse(cls, version): edition = None version = version.lstrip('v') version, _, stage = version.partition('-') if stage: if not any(marker in stage for marker in STAGES): edition = stage stage = None elif '-' in stage: edition, stage = stage.split('-') major, minor, patch = version.split('.', 3) return cls(major, minor, patch, stage, edition) @property def major_minor(self): return self.major, self.minor @property def order(self): """Return a representation that allows this object to be sorted correctly with the default comparator. """ # non-GA releases should appear before GA releases # Order: tp -> beta -> rc -> GA if self.stage: for st in STAGES: if st in self.stage: stage = (STAGES.index(st), self.stage) break else: stage = (len(STAGES),) return (int(self.major), int(self.minor), int(self.patch)) + stage def __str__(self): stage = '-{}'.format(self.stage) if self.stage else '' edition = '-{}'.format(self.edition) if self.edition else '' return '.'.join(map(str, self[:3])) + edition + stage BLACKLIST = [ # List of versions known to be broken and should not be used Version.parse('18.03.0-ce-rc2'), ] def group_versions(versions): """Group versions by `major.minor` releases. Example: >>> group_versions([ Version(1, 0, 0), Version(2, 0, 0, 'rc1'), Version(2, 0, 0), Version(2, 1, 0), ]) [ [Version(1, 0, 0)], [Version(2, 0, 0), Version(2, 0, 0, 'rc1')], [Version(2, 1, 0)], ] """ return list( list(releases) for _, releases in itertools.groupby(versions, operator.attrgetter('major_minor')) ) def get_latest_versions(versions, num=1): """Return a list of the most recent versions for each major.minor version group. """ versions = group_versions(versions) num = min(len(versions), num) return [versions[index][0] for index in range(num)] def get_default(versions): """Return a :class:`Version` for the latest GA version.""" for version in versions: if not version.stage: return version def get_versions(tags): for tag in tags: try: v = Version.parse(tag['name']) if v in BLACKLIST: continue yield v except ValueError: print("Skipping invalid tag: {name}".format(**tag), file=sys.stderr) def get_github_releases(projects): """Query the Github API for a list of version tags and return them in sorted order. See https://developer.github.com/v3/repos/#list-tags """ versions = [] for project in projects: url = '{}/{}/tags'.format(GITHUB_API, project) response = requests.get(url) response.raise_for_status() versions.extend(get_versions(response.json())) return sorted(versions, reverse=True, key=operator.attrgetter('order')) def parse_args(argv): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('project', help="Github project name (ex: docker/docker)") parser.add_argument('command', choices=['recent', 'default']) parser.add_argument('-n', '--num', type=int, default=2, help="Number of versions to return from `recent`") return parser.parse_args(argv) def main(argv=None): args = parse_args(argv) versions = get_github_releases(args.project.split(',')) if args.command == 'recent': print(' '.join(map(str, get_latest_versions(versions, args.num)))) elif args.command == 'default': print(get_default(versions)) else: raise ValueError("Unknown command {}".format(args.command)) if __name__ == "__main__": main() compose-1.29.2/setup.cfg000066400000000000000000000000321404620552300150670ustar00rootroot00000000000000[bdist_wheel] universal=1 compose-1.29.2/setup.py000066400000000000000000000064641404620552300147770ustar00rootroot00000000000000#!/usr/bin/env python import codecs import os import re import sys import pkg_resources from setuptools import find_packages from setuptools import setup def read(*parts): path = os.path.join(os.path.dirname(__file__), *parts) with codecs.open(path, encoding='utf-8') as fobj: return fobj.read() def find_version(*file_paths): version_file = read(*file_paths) version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") install_requires = [ 'docopt >= 0.6.1, < 1', 'PyYAML >= 3.10, < 6', 'requests >= 2.20.0, < 3', 'texttable >= 0.9.0, < 2', 'websocket-client >= 0.32.0, < 1', 'distro >= 1.5.0, < 2', 'docker[ssh] >= 5', 'dockerpty >= 0.4.1, < 1', 'jsonschema >= 2.5.1, < 4', 'python-dotenv >= 0.13.0, < 1', ] tests_require = [ 'ddt >= 1.2.2, < 2', 'pytest < 6', ] if sys.version_info[:2] < (3, 4): tests_require.append('mock >= 1.0.1, < 4') extras_require = { ':python_version < "3.5"': ['backports.ssl_match_hostname >= 3.5, < 4'], ':python_version < "3.8"': ['cached-property >= 1.2.0, < 2'], ':sys_platform == "win32"': ['colorama >= 0.4, < 1'], 'socks': ['PySocks >= 1.5.6, != 1.5.7, < 2'], 'tests': tests_require, } try: if 'bdist_wheel' not in sys.argv: for key, value in extras_require.items(): if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]): install_requires.extend(value) except Exception as e: print("Failed to compute platform dependencies: {}. ".format(e) + "All dependencies will be installed as a result.", file=sys.stderr) for key, value in extras_require.items(): if key.startswith(':'): install_requires.extend(value) setup( name='docker-compose', version=find_version("compose", "__init__.py"), description='Multi-container orchestration for Docker', long_description=read('README.md'), long_description_content_type='text/markdown', url='https://www.docker.com/', project_urls={ 'Documentation': 'https://docs.docker.com/compose/overview', 'Changelog': 'https://github.com/docker/compose/blob/release/CHANGELOG.md', 'Source': 'https://github.com/docker/compose', 'Tracker': 'https://github.com/docker/compose/issues', }, author='Docker, Inc.', license='Apache License 2.0', packages=find_packages(exclude=['tests.*', 'tests']), include_package_data=True, install_requires=install_requires, extras_require=extras_require, tests_require=tests_require, python_requires='>=3.4', entry_points={ 'console_scripts': ['docker-compose=compose.cli.main:main'], }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ], ) compose-1.29.2/tests/000077500000000000000000000000001404620552300144155ustar00rootroot00000000000000compose-1.29.2/tests/__init__.py000066400000000000000000000000721404620552300165250ustar00rootroot00000000000000import unittest # NOQA from unittest import mock # NOQA compose-1.29.2/tests/acceptance/000077500000000000000000000000001404620552300165035ustar00rootroot00000000000000compose-1.29.2/tests/acceptance/__init__.py000066400000000000000000000000001404620552300206020ustar00rootroot00000000000000compose-1.29.2/tests/acceptance/cli_test.py000066400000000000000000003644301404620552300206750ustar00rootroot00000000000000import datetime import json import os.path import re import signal import subprocess import time from collections import Counter from collections import namedtuple from functools import reduce from operator import attrgetter import pytest import yaml from docker import errors from .. import mock from ..helpers import BUSYBOX_IMAGE_WITH_TAG from ..helpers import create_host_file from compose.cli.command import get_project from compose.config.errors import DuplicateOverrideFileFound from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 from compose.container import Container from compose.project import OneOffFilter from compose.utils import nanoseconds_from_time_seconds from tests.integration.testcases import DockerClientTestCase from tests.integration.testcases import get_links from tests.integration.testcases import is_cluster from tests.integration.testcases import no_cluster from tests.integration.testcases import pull_busybox from tests.integration.testcases import SWARM_SKIP_RM_VOLUMES DOCKER_COMPOSE_EXECUTABLE = 'docker-compose' ProcessResult = namedtuple('ProcessResult', 'stdout stderr') BUILD_CACHE_TEXT = 'Using cache' BUILD_PULL_TEXT = 'Status: Image is up to date for busybox:1.27.2' COMPOSE_COMPATIBILITY_DICT = { 'version': str(VERSION), 'volumes': {'foo': {'driver': 'default'}}, 'networks': {'bar': {}}, 'services': { 'foo': { 'command': '/bin/true', 'image': 'alpine:3.10.1', 'scale': 3, 'restart': 'always:7', 'mem_limit': '300M', 'mem_reservation': '100M', 'cpus': 0.7, 'volumes': ['foo:/bar:rw'], 'networks': {'bar': None}, } }, } def start_process(base_dir, options, executable=None, env=None): executable = executable or DOCKER_COMPOSE_EXECUTABLE proc = subprocess.Popen( [executable] + options, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=base_dir, env=env, ) print("Running process: %s" % proc.pid) return proc def wait_on_process(proc, returncode=0, stdin=None): stdout, stderr = proc.communicate(input=stdin) if proc.returncode != returncode: print("Stderr: {}".format(stderr)) print("Stdout: {}".format(stdout)) assert proc.returncode == returncode return ProcessResult(stdout.decode('utf-8'), stderr.decode('utf-8')) def dispatch(base_dir, options, project_options=None, returncode=0, stdin=None, executable=None, env=None): project_options = project_options or [] proc = start_process(base_dir, project_options + options, executable=executable, env=env) return wait_on_process(proc, returncode=returncode, stdin=stdin) def wait_on_condition(condition, delay=0.1, timeout=40): start_time = time.time() while not condition(): if time.time() - start_time > timeout: raise AssertionError("Timeout: %s" % condition) time.sleep(delay) def kill_service(service): for container in service.containers(): if container.is_running: container.kill() class ContainerCountCondition: def __init__(self, project, expected): self.project = project self.expected = expected def __call__(self): return len([c for c in self.project.containers() if c.is_running]) == self.expected def __str__(self): return "waiting for counter count == %s" % self.expected class ContainerStateCondition: def __init__(self, client, name, status): self.client = client self.name = name self.status = status def __call__(self): try: if self.name.endswith('*'): ctnrs = self.client.containers(all=True, filters={'name': self.name[:-1]}) if len(ctnrs) > 0: container = self.client.inspect_container(ctnrs[0]['Id']) else: return False else: container = self.client.inspect_container(self.name) return container['State']['Status'] == self.status except errors.APIError: return False def __str__(self): return "waiting for container to be %s" % self.status class CLITestCase(DockerClientTestCase): def setUp(self): super().setUp() self.base_dir = 'tests/fixtures/simple-composefile' self.override_dir = None def tearDown(self): if self.base_dir: self.project.kill() self.project.down(None, True) for container in self.project.containers(stopped=True, one_off=OneOffFilter.only): container.remove(force=True) networks = self.client.networks() for n in networks: if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)): self.client.remove_network(n['Name']) volumes = self.client.volumes().get('Volumes') or [] for v in volumes: if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)): self.client.remove_volume(v['Name']) if hasattr(self, '_project'): del self._project super().tearDown() @property def project(self): # Hack: allow project to be overridden if not hasattr(self, '_project'): self._project = get_project(self.base_dir, override_dir=self.override_dir) return self._project def dispatch(self, options, project_options=None, returncode=0, stdin=None): return dispatch(self.base_dir, options, project_options, returncode, stdin) def execute(self, container, cmd): # Remove once Hijack and CloseNotifier sign a peace treaty self.client.close() exc = self.client.exec_create(container.id, cmd) self.client.exec_start(exc) return self.client.exec_inspect(exc)['ExitCode'] def lookup(self, container, hostname): return self.execute(container, ["nslookup", hostname]) == 0 def test_help(self): self.base_dir = 'tests/fixtures/no-composefile' result = self.dispatch(['help', 'up'], returncode=0) assert 'Usage: up [options] [--scale SERVICE=NUM...] [--] [SERVICE...]' in result.stdout # Prevent tearDown from trying to create a project self.base_dir = None def test_quiet_build(self): self.base_dir = 'tests/fixtures/build-args' result = self.dispatch(['build'], None) quietResult = self.dispatch(['build', '-q'], None) assert result.stdout != "" assert quietResult.stdout == "" def test_help_nonexistent(self): self.base_dir = 'tests/fixtures/no-composefile' result = self.dispatch(['help', 'foobar'], returncode=1) assert 'No such command' in result.stderr self.base_dir = None def test_shorthand_host_opt(self): self.dispatch( ['-H={}'.format(os.environ.get('DOCKER_HOST', 'unix://')), 'up', '-d'], returncode=0 ) def test_shorthand_host_opt_interactive(self): self.dispatch( ['-H={}'.format(os.environ.get('DOCKER_HOST', 'unix://')), 'run', 'another', 'ls'], returncode=0 ) def test_host_not_reachable(self): result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1) assert "Couldn't connect to Docker daemon" in result.stderr def test_host_not_reachable_volumes_from_container(self): self.base_dir = 'tests/fixtures/volumes-from-container' container = self.client.create_container( 'busybox', 'true', name='composetest_data_container', host_config={} ) self.addCleanup(self.client.remove_container, container) result = self.dispatch(['-H=tcp://doesnotexist:8000', 'ps'], returncode=1) assert "Couldn't connect to Docker daemon" in result.stderr def test_config_list_profiles(self): self.base_dir = 'tests/fixtures/config-profiles' result = self.dispatch(['config', '--profiles']) assert set(result.stdout.rstrip().split('\n')) == {'debug', 'frontend', 'gui'} def test_config_list_services(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config', '--services']) assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'} def test_config_list_volumes(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config', '--volumes']) assert set(result.stdout.rstrip().split('\n')) == {'data'} def test_config_quiet_with_error(self): self.base_dir = None result = self.dispatch([ '-f', 'tests/fixtures/invalid-composefile/invalid.yml', 'config', '--quiet' ], returncode=1) assert "'notaservice' must be a mapping" in result.stderr def test_config_quiet(self): self.base_dir = 'tests/fixtures/v2-full' assert self.dispatch(['config', '--quiet']).stdout == '' def test_config_stdin(self): config = b"""version: "3.7" services: web: image: nginx other: image: alpine """ result = self.dispatch(['-f', '-', 'config', '--services'], stdin=config) assert set(result.stdout.rstrip().split('\n')) == {'web', 'other'} def test_config_with_hash_option(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config', '--hash=*']) for service in self.project.get_services(): assert '{} {}\n'.format(service.name, service.config_hash) in result.stdout svc = self.project.get_service('other') result = self.dispatch(['config', '--hash=other']) assert result.stdout == '{} {}\n'.format(svc.name, svc.config_hash) def test_config_default(self): self.base_dir = 'tests/fixtures/v2-full' result = self.dispatch(['config']) # assert there are no python objects encoded in the output assert '!!' not in result.stdout output = yaml.safe_load(result.stdout) expected = { 'version': '2', 'volumes': {'data': {'driver': 'local'}}, 'networks': {'front': {}}, 'services': { 'web': { 'build': { 'context': os.path.abspath(self.base_dir), }, 'networks': {'front': None, 'default': None}, 'volumes_from': ['service:other:rw'], }, 'other': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'volumes': ['/data'], }, }, } assert output == expected def test_config_restart(self): self.base_dir = 'tests/fixtures/restart' result = self.dispatch(['config']) assert yaml.safe_load(result.stdout) == { 'version': '2', 'services': { 'never': { 'image': 'busybox', 'restart': 'no', }, 'always': { 'image': 'busybox', 'restart': 'always', }, 'on-failure': { 'image': 'busybox', 'restart': 'on-failure', }, 'on-failure-5': { 'image': 'busybox', 'restart': 'on-failure:5', }, 'restart-null': { 'image': 'busybox', 'restart': '' }, }, } def test_config_external_network(self): self.base_dir = 'tests/fixtures/networks' result = self.dispatch(['-f', 'external-networks.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'networks' in json_result assert json_result['networks'] == { 'networks_foo': { 'external': True, 'name': 'networks_foo' }, 'bar': { 'external': True, 'name': 'networks_bar' } } def test_config_with_dot_env(self): self.base_dir = 'tests/fixtures/default-env-file' result = self.dispatch(['config']) json_result = yaml.safe_load(result.stdout) assert json_result == { 'version': '2.4', 'services': { 'web': { 'command': 'true', 'image': 'alpine:latest', 'ports': [{'target': 5643}, {'target': 9999}] } } } def test_config_with_env_file(self): self.base_dir = 'tests/fixtures/default-env-file' result = self.dispatch(['--env-file', '.env2', 'config']) json_result = yaml.safe_load(result.stdout) assert json_result == { 'version': '2.4', 'services': { 'web': { 'command': 'false', 'image': 'alpine:latest', 'ports': [{'target': 5644}, {'target': 9998}] } } } def test_config_with_dot_env_and_override_dir(self): self.base_dir = 'tests/fixtures/default-env-file' result = self.dispatch(['--project-directory', 'alt/', 'config']) json_result = yaml.safe_load(result.stdout) assert json_result == { 'version': '2.4', 'services': { 'web': { 'command': 'echo uwu', 'image': 'alpine:3.10.1', 'ports': [{'target': 3341}, {'target': 4449}] } } } def test_config_external_volume_v2(self): self.base_dir = 'tests/fixtures/volumes' result = self.dispatch(['-f', 'external-volumes-v2.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'volumes' in json_result assert json_result['volumes'] == { 'foo': { 'external': True, 'name': 'foo', }, 'bar': { 'external': True, 'name': 'some_bar', } } def test_config_external_volume_v2_x(self): self.base_dir = 'tests/fixtures/volumes' result = self.dispatch(['-f', 'external-volumes-v2-x.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'volumes' in json_result assert json_result['volumes'] == { 'foo': { 'external': True, 'name': 'some_foo', }, 'bar': { 'external': True, 'name': 'some_bar', } } def test_config_external_volume_v3_x(self): self.base_dir = 'tests/fixtures/volumes' result = self.dispatch(['-f', 'external-volumes-v3-x.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'volumes' in json_result assert json_result['volumes'] == { 'foo': { 'external': True, 'name': 'foo', }, 'bar': { 'external': True, 'name': 'some_bar', } } def test_config_external_volume_v3_4(self): self.base_dir = 'tests/fixtures/volumes' result = self.dispatch(['-f', 'external-volumes-v3-4.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'volumes' in json_result assert json_result['volumes'] == { 'foo': { 'external': True, 'name': 'some_foo', }, 'bar': { 'external': True, 'name': 'some_bar', } } def test_config_external_network_v3_5(self): self.base_dir = 'tests/fixtures/networks' result = self.dispatch(['-f', 'external-networks-v3-5.yml', 'config']) json_result = yaml.safe_load(result.stdout) assert 'networks' in json_result assert json_result['networks'] == { 'foo': { 'external': True, 'name': 'some_foo', }, 'bar': { 'external': True, 'name': 'some_bar', }, } def test_config_v1(self): self.base_dir = 'tests/fixtures/v1-config' result = self.dispatch(['config']) assert yaml.safe_load(result.stdout) == { 'version': str(V1), 'services': { 'net': { 'image': 'busybox', 'network_mode': 'bridge', }, 'volume': { 'image': 'busybox', 'volumes': ['/data'], 'network_mode': 'bridge', }, 'app': { 'image': 'busybox', 'volumes_from': ['service:volume:rw'], 'network_mode': 'service:net', }, }, } def test_config_v3(self): self.base_dir = 'tests/fixtures/v3-full' result = self.dispatch(['config']) assert yaml.safe_load(result.stdout) == { 'version': '3.5', 'volumes': { 'foobar': { 'labels': { 'com.docker.compose.test': 'true', }, }, }, 'services': { 'web': { 'image': 'busybox', 'deploy': { 'mode': 'replicated', 'replicas': 6, 'labels': ['FOO=BAR'], 'update_config': { 'parallelism': 3, 'delay': '10s', 'failure_action': 'continue', 'monitor': '60s', 'max_failure_ratio': 0.3, }, 'resources': { 'limits': { 'cpus': 0.05, 'memory': '50M', }, 'reservations': { 'cpus': 0.01, 'memory': '20M', }, }, 'restart_policy': { 'condition': 'on-failure', 'delay': '5s', 'max_attempts': 3, 'window': '120s', }, 'placement': { 'constraints': [ 'node.hostname==foo', 'node.role != manager' ], 'preferences': [{'spread': 'node.labels.datacenter'}] }, }, 'healthcheck': { 'test': 'cat /etc/passwd', 'interval': '10s', 'timeout': '1s', 'retries': 5, }, 'volumes': [{ 'read_only': True, 'source': '/host/path', 'target': '/container/path', 'type': 'bind' }, { 'source': 'foobar', 'target': '/container/volumepath', 'type': 'volume' }, { 'target': '/anonymous', 'type': 'volume' }, { 'source': 'foobar', 'target': '/container/volumepath2', 'type': 'volume', 'volume': {'nocopy': True} }], 'stop_grace_period': '20s', }, }, } @pytest.mark.skip(reason='deprecated option') def test_config_compatibility_mode(self): self.base_dir = 'tests/fixtures/compatibility-mode' result = self.dispatch(['--compatibility', 'config']) assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT @pytest.mark.skip(reason='deprecated option') @mock.patch.dict(os.environ) def test_config_compatibility_mode_from_env(self): self.base_dir = 'tests/fixtures/compatibility-mode' os.environ['COMPOSE_COMPATIBILITY'] = 'true' result = self.dispatch(['config']) assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT @pytest.mark.skip(reason='deprecated option') @mock.patch.dict(os.environ) def test_config_compatibility_mode_from_env_and_option_precedence(self): self.base_dir = 'tests/fixtures/compatibility-mode' os.environ['COMPOSE_COMPATIBILITY'] = 'false' result = self.dispatch(['--compatibility', 'config']) assert yaml.load(result.stdout) == COMPOSE_COMPATIBILITY_DICT def test_ps(self): self.project.get_service('simple').create_container() result = self.dispatch(['ps']) assert 'simple-composefile_simple_1' in result.stdout def test_ps_default_composefile(self): self.base_dir = 'tests/fixtures/multiple-composefiles' self.dispatch(['up', '-d']) result = self.dispatch(['ps']) assert 'multiple-composefiles_simple_1' in result.stdout assert 'multiple-composefiles_another_1' in result.stdout assert 'multiple-composefiles_yetanother_1' not in result.stdout def test_ps_alternate_composefile(self): config_path = os.path.abspath( 'tests/fixtures/multiple-composefiles/compose2.yml') self._project = get_project(self.base_dir, [config_path]) self.base_dir = 'tests/fixtures/multiple-composefiles' self.dispatch(['-f', 'compose2.yml', 'up', '-d']) result = self.dispatch(['-f', 'compose2.yml', 'ps']) assert 'multiple-composefiles_simple_1' not in result.stdout assert 'multiple-composefiles_another_1' not in result.stdout assert 'multiple-composefiles_yetanother_1' in result.stdout def test_ps_services_filter_option(self): self.base_dir = 'tests/fixtures/ps-services-filter' image = self.dispatch(['ps', '--services', '--filter', 'source=image']) build = self.dispatch(['ps', '--services', '--filter', 'source=build']) all_services = self.dispatch(['ps', '--services']) assert 'with_build' in all_services.stdout assert 'with_image' in all_services.stdout assert 'with_build' in build.stdout assert 'with_build' not in image.stdout assert 'with_image' in image.stdout assert 'with_image' not in build.stdout def test_ps_services_filter_status(self): self.base_dir = 'tests/fixtures/ps-services-filter' self.dispatch(['up', '-d']) self.dispatch(['pause', 'with_image']) paused = self.dispatch(['ps', '--services', '--filter', 'status=paused']) stopped = self.dispatch(['ps', '--services', '--filter', 'status=stopped']) running = self.dispatch(['ps', '--services', '--filter', 'status=running']) assert 'with_build' not in stopped.stdout assert 'with_image' not in stopped.stdout assert 'with_build' not in paused.stdout assert 'with_image' in paused.stdout assert 'with_build' in running.stdout assert 'with_image' in running.stdout def test_ps_all(self): self.project.get_service('simple').create_container(one_off='blahblah') result = self.dispatch(['ps']) assert 'simple-composefile_simple_run_' not in result.stdout result2 = self.dispatch(['ps', '--all']) assert 'simple-composefile_simple_run_' in result2.stdout def test_pull(self): result = self.dispatch(['pull']) assert 'Pulling simple' in result.stderr assert 'Pulling another' in result.stderr assert 'done' in result.stderr assert 'failed' not in result.stderr def test_pull_with_digest(self): result = self.dispatch(['-f', 'digest.yml', 'pull', '--no-parallel']) assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr assert ('Pulling digest (busybox@' 'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b520' '04ee8502d)...') in result.stderr def test_pull_with_ignore_pull_failures(self): result = self.dispatch([ '-f', 'ignore-pull-failures.yml', 'pull', '--ignore-pull-failures', '--no-parallel'] ) assert 'Pulling simple ({})...'.format(BUSYBOX_IMAGE_WITH_TAG) in result.stderr assert 'Pulling another (nonexisting-image:latest)...' in result.stderr assert ('repository nonexisting-image not found' in result.stderr or 'image library/nonexisting-image:latest not found' in result.stderr or 'pull access denied for nonexisting-image' in result.stderr) def test_pull_with_quiet(self): assert self.dispatch(['pull', '--quiet']).stderr == '' assert self.dispatch(['pull', '--quiet']).stdout == '' def test_pull_with_parallel_failure(self): result = self.dispatch([ '-f', 'ignore-pull-failures.yml', 'pull'], returncode=1 ) assert re.search(re.compile('^Pulling simple', re.MULTILINE), result.stderr) assert re.search(re.compile('^Pulling another', re.MULTILINE), result.stderr) assert re.search( re.compile('^ERROR: for another .*does not exist.*', re.MULTILINE), result.stderr ) assert re.search( re.compile('''^(ERROR: )?(b')?.* nonexisting-image''', re.MULTILINE), result.stderr ) def test_pull_can_build(self): result = self.dispatch([ '-f', 'can-build-pull-failures.yml', 'pull'], returncode=0 ) assert 'Some service image(s) must be built from source' in result.stderr assert 'docker-compose build can_build' in result.stderr def test_pull_with_no_deps(self): self.base_dir = 'tests/fixtures/links-composefile' result = self.dispatch(['pull', '--no-parallel', 'web']) assert sorted(result.stderr.split('\n'))[1:] == [ 'Pulling web (busybox:1.27.2)...', ] def test_pull_with_include_deps(self): self.base_dir = 'tests/fixtures/links-composefile' result = self.dispatch(['pull', '--no-parallel', '--include-deps', 'web']) assert sorted(result.stderr.split('\n'))[1:] == [ 'Pulling db (busybox:1.27.2)...', 'Pulling web (busybox:1.27.2)...', ] def test_build_plain(self): self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', 'simple']) assert BUILD_PULL_TEXT not in result.stdout def test_build_no_cache(self): self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', '--no-cache', 'simple']) assert BUILD_CACHE_TEXT not in result.stdout assert BUILD_PULL_TEXT not in result.stdout def test_up_ignore_missing_build_directory(self): self.base_dir = 'tests/fixtures/no-build' result = self.dispatch(['up', '--no-build']) assert 'alpine exited with code 0' in result.stdout self.base_dir = None def test_pull_ignore_missing_build_directory(self): self.base_dir = 'tests/fixtures/no-build' result = self.dispatch(['pull']) assert 'Pulling my-alpine' in result.stderr self.base_dir = None def test_build_pull(self): # Make sure we have the latest busybox already pull_busybox(self.client) self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple'], None) result = self.dispatch(['build', '--pull', 'simple']) if not is_cluster(self.client): # If previous build happened on another node, cache won't be available assert BUILD_CACHE_TEXT in result.stdout assert BUILD_PULL_TEXT in result.stdout def test_build_no_cache_pull(self): # Make sure we have the latest busybox already pull_busybox(self.client) self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', 'simple']) result = self.dispatch(['build', '--no-cache', '--pull', 'simple']) assert BUILD_CACHE_TEXT not in result.stdout assert BUILD_PULL_TEXT in result.stdout @mock.patch.dict(os.environ) def test_build_log_level(self): os.environ['COMPOSE_DOCKER_CLI_BUILD'] = '0' os.environ['DOCKER_BUILDKIT'] = '0' self.test_env_file_relative_to_compose_file() self.base_dir = 'tests/fixtures/simple-dockerfile' result = self.dispatch(['--log-level', 'warning', 'build', 'simple']) assert result.stderr == '' result = self.dispatch(['--log-level', 'debug', 'build', 'simple']) assert 'Building simple' in result.stderr assert 'Using configuration file' in result.stderr self.base_dir = 'tests/fixtures/simple-failing-dockerfile' result = self.dispatch(['--log-level', 'critical', 'build', 'simple'], returncode=1) assert result.stderr == '' result = self.dispatch(['--log-level', 'debug', 'build', 'simple'], returncode=1) assert 'Building simple' in result.stderr assert 'non-zero code' in result.stderr def test_build_failed(self): self.base_dir = 'tests/fixtures/simple-failing-dockerfile' self.dispatch(['build', 'simple'], returncode=1) labels = ["com.docker.compose.test_failing_image=true"] containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers( all=True, filters={"label": labels}) ] assert len(containers) == 1 def test_build_failed_forcerm(self): self.base_dir = 'tests/fixtures/simple-failing-dockerfile' self.dispatch(['build', '--force-rm', 'simple'], returncode=1) labels = ["com.docker.compose.test_failing_image=true"] containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers( all=True, filters={"label": labels}) ] assert not containers @pytest.mark.xfail(True, reason='Flaky on local') def test_build_rm(self): containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers(all=True) ] assert not containers self.base_dir = 'tests/fixtures/simple-dockerfile' self.dispatch(['build', '--no-rm', 'simple'], returncode=0) containers = [ Container.from_ps(self.project.client, c) for c in self.project.client.containers(all=True) ] assert containers for c in self.project.client.containers(all=True): self.addCleanup(self.project.client.remove_container, c, force=True) @mock.patch.dict(os.environ) def test_build_shm_size_build_option(self): os.environ['COMPOSE_DOCKER_CLI_BUILD'] = '0' pull_busybox(self.client) self.base_dir = 'tests/fixtures/build-shm-size' result = self.dispatch(['build', '--no-cache'], None) assert 'shm_size: 96' in result.stdout @mock.patch.dict(os.environ) def test_build_memory_build_option(self): os.environ['COMPOSE_DOCKER_CLI_BUILD'] = '0' pull_busybox(self.client) self.base_dir = 'tests/fixtures/build-memory' result = self.dispatch(['build', '--no-cache', '--memory', '96m', 'service'], None) assert 'memory: 100663296' in result.stdout # 96 * 1024 * 1024 def test_build_with_buildarg_from_compose_file(self): pull_busybox(self.client) self.base_dir = 'tests/fixtures/build-args' result = self.dispatch(['build'], None) assert 'Favorite Touhou Character: mariya.kirisame' in result.stdout def test_build_with_buildarg_cli_override(self): pull_busybox(self.client) self.base_dir = 'tests/fixtures/build-args' result = self.dispatch(['build', '--build-arg', 'favorite_th_character=sakuya.izayoi'], None) assert 'Favorite Touhou Character: sakuya.izayoi' in result.stdout @mock.patch.dict(os.environ) def test_build_with_buildarg_old_api_version(self): pull_busybox(self.client) self.base_dir = 'tests/fixtures/build-args' os.environ['COMPOSE_API_VERSION'] = '1.24' result = self.dispatch( ['build', '--build-arg', 'favorite_th_character=reimu.hakurei'], None, returncode=1 ) assert '--build-arg is only supported when services are specified' in result.stderr result = self.dispatch( ['build', '--build-arg', 'favorite_th_character=hong.meiling', 'web'], None ) assert 'Favorite Touhou Character: hong.meiling' in result.stdout def test_build_override_dir(self): self.base_dir = 'tests/fixtures/build-path-override-dir' self.override_dir = os.path.abspath('tests/fixtures') result = self.dispatch([ '--project-directory', self.override_dir, 'build']) assert 'Successfully built' in result.stdout def test_build_override_dir_invalid_path(self): config_path = os.path.abspath('tests/fixtures/build-path-override-dir/docker-compose.yml') result = self.dispatch([ '-f', config_path, 'build'], returncode=1) assert 'does not exist, is not accessible, or is not a valid URL' in result.stderr def test_build_parallel(self): self.base_dir = 'tests/fixtures/build-multiple-composefile' result = self.dispatch(['build', '--parallel']) assert 'Successfully tagged build-multiple-composefile_a:latest' in result.stdout assert 'Successfully tagged build-multiple-composefile_b:latest' in result.stdout assert 'Successfully built' in result.stdout def test_create(self): self.dispatch(['create']) service = self.project.get_service('simple') another = self.project.get_service('another') service_containers = service.containers(stopped=True) another_containers = another.containers(stopped=True) assert len(service_containers) == 1 assert len(another_containers) == 1 assert not service_containers[0].is_running assert not another_containers[0].is_running def test_create_with_force_recreate(self): self.dispatch(['create'], None) service = self.project.get_service('simple') service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running old_ids = [c.id for c in service.containers(stopped=True)] self.dispatch(['create', '--force-recreate'], None) service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running new_ids = [c.id for c in service_containers] assert old_ids != new_ids def test_create_with_no_recreate(self): self.dispatch(['create'], None) service = self.project.get_service('simple') service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running old_ids = [c.id for c in service.containers(stopped=True)] self.dispatch(['create', '--no-recreate'], None) service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running new_ids = [c.id for c in service_containers] assert old_ids == new_ids def test_run_one_off_with_volume(self): self.base_dir = 'tests/fixtures/simple-composefile-volume-ready' volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files')) node = create_host_file(self.client, os.path.join(volume_path, 'example.txt')) self.dispatch([ 'run', '-v', '{}:/data'.format(volume_path), '-e', 'constraint:node=={}'.format(node if node is not None else '*'), 'simple', 'test', '-f', '/data/example.txt' ], returncode=0) service = self.project.get_service('simple') container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0] mount = container_data.get('Mounts')[0] assert mount['Source'] == volume_path assert mount['Destination'] == '/data' assert mount['Type'] == 'bind' def test_run_one_off_with_multiple_volumes(self): self.base_dir = 'tests/fixtures/simple-composefile-volume-ready' volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files')) node = create_host_file(self.client, os.path.join(volume_path, 'example.txt')) self.dispatch([ 'run', '-v', '{}:/data'.format(volume_path), '-v', '{}:/data1'.format(volume_path), '-e', 'constraint:node=={}'.format(node if node is not None else '*'), 'simple', 'test', '-f', '/data/example.txt' ], returncode=0) self.dispatch([ 'run', '-v', '{}:/data'.format(volume_path), '-v', '{}:/data1'.format(volume_path), '-e', 'constraint:node=={}'.format(node if node is not None else '*'), 'simple', 'test', '-f' '/data1/example.txt' ], returncode=0) def test_run_one_off_with_volume_merge(self): self.base_dir = 'tests/fixtures/simple-composefile-volume-ready' volume_path = os.path.abspath(os.path.join(os.getcwd(), self.base_dir, 'files')) node = create_host_file(self.client, os.path.join(volume_path, 'example.txt')) self.dispatch([ '-f', 'docker-compose.merge.yml', 'run', '-v', '{}:/data'.format(volume_path), '-e', 'constraint:node=={}'.format(node if node is not None else '*'), 'simple', 'test', '-f', '/data/example.txt' ], returncode=0) service = self.project.get_service('simple') container_data = service.containers(one_off=OneOffFilter.only, stopped=True)[0] mounts = container_data.get('Mounts') assert len(mounts) == 2 config_mount = [m for m in mounts if m['Destination'] == '/data1'][0] override_mount = [m for m in mounts if m['Destination'] == '/data'][0] assert config_mount['Type'] == 'volume' assert override_mount['Source'] == volume_path assert override_mount['Type'] == 'bind' def test_create_with_force_recreate_and_no_recreate(self): self.dispatch( ['create', '--force-recreate', '--no-recreate'], returncode=1) def test_down_invalid_rmi_flag(self): result = self.dispatch(['down', '--rmi', 'bogus'], returncode=1) assert '--rmi flag must be' in result.stderr def test_down(self): self.base_dir = 'tests/fixtures/v2-full' self.dispatch(['up', '-d']) wait_on_condition(ContainerCountCondition(self.project, 2)) self.dispatch(['run', 'web', 'true']) self.dispatch(['run', '-d', 'web', 'tail', '-f', '/dev/null']) assert len(self.project.containers(one_off=OneOffFilter.only, stopped=True)) == 2 result = self.dispatch(['down', '--rmi=local', '--volumes']) assert 'Stopping v2-full_web_1' in result.stderr assert 'Stopping v2-full_other_1' in result.stderr assert 'Stopping v2-full_web_run_' in result.stderr assert 'Removing v2-full_web_1' in result.stderr assert 'Removing v2-full_other_1' in result.stderr assert 'Removing v2-full_web_run_' in result.stderr assert 'Removing v2-full_web_run_' in result.stderr assert 'Removing volume v2-full_data' in result.stderr assert 'Removing image v2-full_web' in result.stderr assert 'Removing image busybox' not in result.stderr assert 'Removing network v2-full_default' in result.stderr assert 'Removing network v2-full_front' in result.stderr def test_down_timeout(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running "" self.dispatch(['down', '-t', '1'], None) assert len(service.containers(stopped=True)) == 0 def test_down_signal(self): self.base_dir = 'tests/fixtures/stop-signal-composefile' self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['down', '-t', '1'], None) assert len(service.containers(stopped=True)) == 0 def test_up_detached(self): self.dispatch(['up', '-d']) service = self.project.get_service('simple') another = self.project.get_service('another') assert len(service.containers()) == 1 assert len(another.containers()) == 1 # Ensure containers don't have stdin and stdout connected in -d mode container, = service.containers() assert not container.get('Config.AttachStderr') assert not container.get('Config.AttachStdout') assert not container.get('Config.AttachStdin') def test_up_detached_long_form(self): self.dispatch(['up', '--detach']) service = self.project.get_service('simple') another = self.project.get_service('another') assert len(service.containers()) == 1 assert len(another.containers()) == 1 # Ensure containers don't have stdin and stdout connected in -d mode container, = service.containers() assert not container.get('Config.AttachStderr') assert not container.get('Config.AttachStdout') assert not container.get('Config.AttachStdin') def test_up_attached(self): self.base_dir = 'tests/fixtures/echo-services' result = self.dispatch(['up', '--no-color']) simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project another_name = self.project.get_service('another').containers( stopped=True )[0].name_without_project assert '{} | simple'.format(simple_name) in result.stdout assert '{} | another'.format(another_name) in result.stdout assert '{} exited with code 0'.format(simple_name) in result.stdout assert '{} exited with code 0'.format(another_name) in result.stdout def test_up(self): self.base_dir = 'tests/fixtures/v2-simple' self.dispatch(['up', '-d'], None) services = self.project.get_services() network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) assert len(networks) == 1 assert networks[0]['Driver'] == 'bridge' if not is_cluster(self.client) else 'overlay' assert 'com.docker.network.bridge.enable_icc' not in networks[0]['Options'] network = self.client.inspect_network(networks[0]['Id']) for service in services: containers = service.containers() assert len(containers) == 1 container = containers[0] assert container.id in network['Containers'] networks = container.get('NetworkSettings.Networks') assert list(networks) == [network['Name']] assert sorted(networks[network['Name']]['Aliases']) == sorted( [service.name, container.short_id] ) for service in services: assert self.lookup(container, service.name) def test_up_no_start(self): self.base_dir = 'tests/fixtures/v2-full' self.dispatch(['up', '--no-start'], None) services = self.project.get_services() default_network = self.project.networks.networks['default'].full_name front_network = self.project.networks.networks['front'].full_name networks = self.client.networks(names=[default_network, front_network]) assert len(networks) == 2 for service in services: containers = service.containers(stopped=True) assert len(containers) == 1 container = containers[0] assert not container.is_running assert container.get('State.Status') == 'created' volumes = self.project.volumes.volumes assert 'data' in volumes volume = volumes['data'] # The code below is a Swarm-compatible equivalent to volume.exists() remote_volumes = [ v for v in self.client.volumes().get('Volumes', []) if v['Name'].split('/')[-1] == volume.full_name ] assert len(remote_volumes) > 0 def test_up_no_start_remove_orphans(self): self.base_dir = 'tests/fixtures/v2-simple' self.dispatch(['up', '--no-start'], None) services = self.project.get_services() stopped = reduce((lambda prev, next: prev.containers( stopped=True) + next.containers(stopped=True)), services) assert len(stopped) == 2 self.dispatch(['-f', 'one-container.yml', 'up', '--no-start', '--remove-orphans'], None) stopped2 = reduce((lambda prev, next: prev.containers( stopped=True) + next.containers(stopped=True)), services) assert len(stopped2) == 1 def test_up_no_ansi(self): self.base_dir = 'tests/fixtures/v2-simple' result = self.dispatch(['--no-ansi', 'up', '-d'], None) assert "%c[2K\r" % 27 not in result.stderr assert "%c[1A" % 27 not in result.stderr assert "%c[1B" % 27 not in result.stderr def test_up_with_default_network_config(self): filename = 'default-network-config.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], None) network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) assert networks[0]['Options']['com.docker.network.bridge.enable_icc'] == 'false' def test_up_with_network_aliases(self): filename = 'network-aliases.yml' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'up', '-d'], None) back_name = '{}_back'.format(self.project.name) front_name = '{}_front'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] # Two networks were created: back and front assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name] web_container = self.project.get_service('web').containers()[0] back_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(back_name) ) assert 'web' in back_aliases front_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(front_name) ) assert 'web' in front_aliases assert 'forward_facing' in front_aliases assert 'ahead' in front_aliases def test_up_with_network_internal(self): self.require_api_version('1.23') filename = 'network-internal.yml' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'up', '-d'], None) internal_net = '{}_internal'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] # One network was created: internal assert sorted(n['Name'].split('/')[-1] for n in networks) == [internal_net] assert networks[0]['Internal'] is True def test_up_with_network_static_addresses(self): filename = 'network-static-addresses.yml' ipv4_address = '172.16.100.100' ipv6_address = 'fe80::1001:100' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'up', '-d'], None) static_net = '{}_static_test'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] # One networks was created: front assert sorted(n['Name'].split('/')[-1] for n in networks) == [static_net] web_container = self.project.get_service('web').containers()[0] ipam_config = web_container.get( 'NetworkSettings.Networks.{}.IPAMConfig'.format(static_net) ) assert ipv4_address in ipam_config.values() assert ipv6_address in ipam_config.values() def test_up_with_networks(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d'], None) back_name = '{}_back'.format(self.project.name) front_name = '{}_front'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] # Two networks were created: back and front assert sorted(n['Name'].split('/')[-1] for n in networks) == [back_name, front_name] # lookup by ID instead of name in case of duplicates back_network = self.client.inspect_network( [n for n in networks if n['Name'] == back_name][0]['Id'] ) front_network = self.client.inspect_network( [n for n in networks if n['Name'] == front_name][0]['Id'] ) web_container = self.project.get_service('web').containers()[0] app_container = self.project.get_service('app').containers()[0] db_container = self.project.get_service('db').containers()[0] for net_name in [front_name, back_name]: links = app_container.get('NetworkSettings.Networks.{}.Links'.format(net_name)) assert '{}:database'.format(db_container.name) in links # db and app joined the back network assert sorted(back_network['Containers']) == sorted([db_container.id, app_container.id]) # web and app joined the front network assert sorted(front_network['Containers']) == sorted([web_container.id, app_container.id]) # web can see app but not db assert self.lookup(web_container, "app") assert not self.lookup(web_container, "db") # app can see db assert self.lookup(app_container, "db") # app has aliased db to "database" assert self.lookup(app_container, "database") def test_up_missing_network(self): self.base_dir = 'tests/fixtures/networks' result = self.dispatch( ['-f', 'missing-network.yml', 'up', '-d'], returncode=1) assert 'Service "web" uses an undefined network "foo"' in result.stderr @no_cluster('container networks not supported in Swarm') def test_up_with_network_mode(self): c = self.client.create_container( 'busybox', 'top', name='composetest_network_mode_container', host_config={} ) self.addCleanup(self.client.remove_container, c, force=True) self.client.start(c) container_mode_source = 'container:{}'.format(c['Id']) filename = 'network-mode.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], None) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] assert not networks for name in ['bridge', 'host', 'none']: container = self.project.get_service(name).containers()[0] assert list(container.get('NetworkSettings.Networks')) == [name] assert container.get('HostConfig.NetworkMode') == name service_mode_source = 'container:{}'.format( self.project.get_service('bridge').containers()[0].id) service_mode_container = self.project.get_service('service').containers()[0] assert not service_mode_container.get('NetworkSettings.Networks') assert service_mode_container.get('HostConfig.NetworkMode') == service_mode_source container_mode_container = self.project.get_service('container').containers()[0] assert not container_mode_container.get('NetworkSettings.Networks') assert container_mode_container.get('HostConfig.NetworkMode') == container_mode_source def test_up_external_networks(self): filename = 'external-networks.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1) assert 'declared as external, but could not be found' in result.stderr networks = [ n['Name'] for n in self.client.networks() if n['Name'].startswith('{}_'.format(self.project.name)) ] assert not networks network_names = ['{}_{}'.format(self.project.name, n) for n in ['foo', 'bar']] for name in network_names: self.client.create_network(name, attachable=True) self.dispatch(['-f', filename, 'up', '-d']) container = self.project.containers()[0] assert sorted(list(container.get('NetworkSettings.Networks'))) == sorted(network_names) def test_up_with_external_default_network(self): filename = 'external-default.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) result = self.dispatch(['-f', filename, 'up', '-d'], returncode=1) assert 'declared as external, but could not be found' in result.stderr networks = [ n['Name'] for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] assert not networks network_name = 'composetest_external_network' self.client.create_network(network_name, attachable=True) self.dispatch(['-f', filename, 'up', '-d']) container = self.project.containers()[0] assert list(container.get('NetworkSettings.Networks')) == [network_name] def test_up_with_network_labels(self): filename = 'network-label.yml' self.base_dir = 'tests/fixtures/networks' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], returncode=0) network_with_label = '{}_network_with_label'.format(self.project.name) networks = [ n for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] assert [n['Name'].split('/')[-1] for n in networks] == [network_with_label] assert 'label_key' in networks[0]['Labels'] assert networks[0]['Labels']['label_key'] == 'label_val' def test_up_with_volume_labels(self): filename = 'volume-label.yml' self.base_dir = 'tests/fixtures/volumes' self._project = get_project(self.base_dir, [filename]) self.dispatch(['-f', filename, 'up', '-d'], returncode=0) volume_with_label = '{}_volume_with_label'.format(self.project.name) volumes = [ v for v in self.client.volumes().get('Volumes', []) if v['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] assert {v['Name'].split('/')[-1] for v in volumes} == {volume_with_label} assert 'label_key' in volumes[0]['Labels'] assert volumes[0]['Labels']['label_key'] == 'label_val' def test_up_no_services(self): self.base_dir = 'tests/fixtures/no-services' self.dispatch(['up', '-d'], None) network_names = [ n['Name'] for n in self.client.networks() if n['Name'].split('/')[-1].startswith('{}_'.format(self.project.name)) ] assert network_names == [] def test_up_with_links_v1(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'web'], None) # No network was created network_name = self.project.networks.networks['default'].full_name networks = self.client.networks(names=[network_name]) assert networks == [] web = self.project.get_service('web') db = self.project.get_service('db') console = self.project.get_service('console') # console was not started assert len(web.containers()) == 1 assert len(db.containers()) == 1 assert len(console.containers()) == 0 # web has links web_container = web.containers()[0] assert web_container.get('HostConfig.Links') def test_up_with_net_is_invalid(self): self.base_dir = 'tests/fixtures/net-container' result = self.dispatch( ['-f', 'v2-invalid.yml', 'up', '-d'], returncode=1) assert "Unsupported config option for services.bar: 'net'" in result.stderr @no_cluster("Legacy networking not supported on Swarm") def test_up_with_net_v1(self): self.base_dir = 'tests/fixtures/net-container' self.dispatch(['up', '-d'], None) bar = self.project.get_service('bar') bar_container = bar.containers()[0] foo = self.project.get_service('foo') foo_container = foo.containers()[0] assert foo_container.get('HostConfig.NetworkMode') == 'container:{}'.format( bar_container.id ) def test_up_with_healthcheck(self): def wait_on_health_status(container, status): def condition(): container.inspect() return container.get('State.Health.Status') == status return wait_on_condition(condition, delay=0.5) self.base_dir = 'tests/fixtures/healthcheck' self.dispatch(['up', '-d'], None) passes = self.project.get_service('passes') passes_container = passes.containers()[0] assert passes_container.get('Config.Healthcheck') == { "Test": ["CMD-SHELL", "/bin/true"], "Interval": nanoseconds_from_time_seconds(1), "Timeout": nanoseconds_from_time_seconds(30 * 60), "Retries": 1, } wait_on_health_status(passes_container, 'healthy') fails = self.project.get_service('fails') fails_container = fails.containers()[0] assert fails_container.get('Config.Healthcheck') == { "Test": ["CMD", "/bin/false"], "Interval": nanoseconds_from_time_seconds(2.5), "Retries": 2, } wait_on_health_status(fails_container, 'unhealthy') disabled = self.project.get_service('disabled') disabled_container = disabled.containers()[0] assert disabled_container.get('Config.Healthcheck') == { "Test": ["NONE"], } assert 'Health' not in disabled_container.get('State') def test_up_with_no_deps(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', '--no-deps', 'web'], None) web = self.project.get_service('web') db = self.project.get_service('db') console = self.project.get_service('console') assert len(web.containers()) == 1 assert len(db.containers()) == 0 assert len(console.containers()) == 0 def test_up_with_attach_dependencies(self): self.base_dir = 'tests/fixtures/echo-services-dependencies' result = self.dispatch(['up', '--attach-dependencies', '--no-color', 'simple'], None) simple_name = self.project.get_service('simple').containers(stopped=True)[0].name_without_project another_name = self.project.get_service('another').containers( stopped=True )[0].name_without_project assert '{} | simple'.format(simple_name) in result.stdout assert '{} | another'.format(another_name) in result.stdout def test_up_handles_aborted_dependencies(self): self.base_dir = 'tests/fixtures/abort-on-container-exit-dependencies' proc = start_process( self.base_dir, ['up', 'simple', '--attach-dependencies', '--abort-on-container-exit']) wait_on_condition(ContainerCountCondition(self.project, 0)) proc.wait() assert proc.returncode == 1 def test_up_with_force_recreate(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 old_ids = [c.id for c in service.containers()] self.dispatch(['up', '-d', '--force-recreate'], None) assert len(service.containers()) == 1 new_ids = [c.id for c in service.containers()] assert old_ids != new_ids def test_up_with_no_recreate(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 old_ids = [c.id for c in service.containers()] self.dispatch(['up', '-d', '--no-recreate'], None) assert len(service.containers()) == 1 new_ids = [c.id for c in service.containers()] assert old_ids == new_ids def test_up_with_force_recreate_and_no_recreate(self): self.dispatch( ['up', '-d', '--force-recreate', '--no-recreate'], returncode=1) def test_up_with_timeout(self): self.dispatch(['up', '-d', '-t', '1']) service = self.project.get_service('simple') another = self.project.get_service('another') assert len(service.containers()) == 1 assert len(another.containers()) == 1 @mock.patch.dict(os.environ) def test_up_with_ignore_remove_orphans(self): os.environ["COMPOSE_IGNORE_ORPHANS"] = "True" result = self.dispatch(['up', '-d', '--remove-orphans'], returncode=1) assert "COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined." in result.stderr def test_up_handles_sigint(self): proc = start_process(self.base_dir, ['up', '-t', '2']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGINT) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_up_handles_sigterm(self): proc = start_process(self.base_dir, ['up', '-t', '2']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_up_handles_force_shutdown(self): self.base_dir = 'tests/fixtures/sleeps-composefile' proc = start_process(self.base_dir, ['up', '-t', '200']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(proc.pid, signal.SIGTERM) time.sleep(0.1) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerCountCondition(self.project, 0)) def test_up_handles_abort_on_container_exit(self): self.base_dir = 'tests/fixtures/abort-on-container-exit-0' proc = start_process(self.base_dir, ['up', '--abort-on-container-exit']) wait_on_condition(ContainerCountCondition(self.project, 0)) proc.wait() assert proc.returncode == 0 def test_up_handles_abort_on_container_exit_code(self): self.base_dir = 'tests/fixtures/abort-on-container-exit-1' proc = start_process(self.base_dir, ['up', '--abort-on-container-exit']) wait_on_condition(ContainerCountCondition(self.project, 0)) proc.wait() assert proc.returncode == 1 @no_cluster('Container PID mode does not work across clusters') def test_up_with_pid_mode(self): c = self.client.create_container( 'busybox', 'top', name='composetest_pid_mode_container', host_config={} ) self.addCleanup(self.client.remove_container, c, force=True) self.client.start(c) container_mode_source = 'container:{}'.format(c['Id']) self.base_dir = 'tests/fixtures/pid-mode' self.dispatch(['up', '-d'], None) service_mode_source = 'container:{}'.format( self.project.get_service('container').containers()[0].id) service_mode_container = self.project.get_service('service').containers()[0] assert service_mode_container.get('HostConfig.PidMode') == service_mode_source container_mode_container = self.project.get_service('container').containers()[0] assert container_mode_container.get('HostConfig.PidMode') == container_mode_source host_mode_container = self.project.get_service('host').containers()[0] assert host_mode_container.get('HostConfig.PidMode') == 'host' @no_cluster('Container IPC mode does not work across clusters') def test_up_with_ipc_mode(self): c = self.client.create_container( 'busybox', 'top', name='composetest_ipc_mode_container', host_config={} ) self.addCleanup(self.client.remove_container, c, force=True) self.client.start(c) container_mode_source = 'container:{}'.format(c['Id']) self.base_dir = 'tests/fixtures/ipc-mode' self.dispatch(['up', '-d'], None) service_mode_source = 'container:{}'.format( self.project.get_service('shareable').containers()[0].id) service_mode_container = self.project.get_service('service').containers()[0] assert service_mode_container.get('HostConfig.IpcMode') == service_mode_source container_mode_container = self.project.get_service('container').containers()[0] assert container_mode_container.get('HostConfig.IpcMode') == container_mode_source shareable_mode_container = self.project.get_service('shareable').containers()[0] assert shareable_mode_container.get('HostConfig.IpcMode') == 'shareable' def test_profiles_up_with_no_profile(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['up']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'foo' in service_names assert len(containers) == 1 def test_profiles_up_with_profile(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['--profile', 'test', 'up']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'foo' in service_names assert 'bar' in service_names assert 'baz' in service_names assert len(containers) == 3 def test_profiles_up_invalid_dependency(self): self.base_dir = 'tests/fixtures/profiles' result = self.dispatch(['--profile', 'debug', 'up'], returncode=1) assert ('Service "bar" was pulled in as a dependency of service "zot" ' 'but is not enabled by the active profiles.') in result.stderr def test_profiles_up_with_multiple_profiles(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['--profile', 'debug', '--profile', 'test', 'up']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'foo' in service_names assert 'bar' in service_names assert 'baz' in service_names assert 'zot' in service_names assert len(containers) == 4 def test_profiles_up_with_profile_enabled_by_service(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['up', 'bar']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'bar' in service_names assert len(containers) == 1 def test_profiles_up_with_dependency_and_profile_enabled_by_service(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['up', 'baz']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'bar' in service_names assert 'baz' in service_names assert len(containers) == 2 def test_profiles_up_with_invalid_dependency_for_target_service(self): self.base_dir = 'tests/fixtures/profiles' result = self.dispatch(['up', 'zot'], returncode=1) assert ('Service "bar" was pulled in as a dependency of service "zot" ' 'but is not enabled by the active profiles.') in result.stderr def test_profiles_up_with_profile_for_dependency(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['--profile', 'test', 'up', 'zot']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'bar' in service_names assert 'zot' in service_names assert len(containers) == 2 def test_profiles_up_with_merged_profiles(self): self.base_dir = 'tests/fixtures/profiles' self.dispatch(['-f', 'docker-compose.yml', '-f', 'merge-profiles.yml', 'up', 'zot']) containers = self.project.containers(stopped=True) service_names = [c.service for c in containers] assert 'bar' in service_names assert 'zot' in service_names assert len(containers) == 2 def test_exec_without_tty(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'console']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/']) assert stderr == "" assert stdout == "/\n" @mock.patch.dict(os.environ) def test_exec_novalue_var_dotenv_file(self): os.environ['MYVAR'] = 'SUCCESS' self.base_dir = 'tests/fixtures/exec-novalue-var' self.dispatch(['up', '-d']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', 'nginx', 'env']) assert 'CHECK_VAR=SUCCESS' in stdout assert not stderr def test_exec_detach_long_form(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '--detach', 'console']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', 'console', 'ls', '-1d', '/']) assert stderr == "" assert stdout == "/\n" def test_exec_custom_user(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'console']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', '--user=operator', 'console', 'whoami']) assert stdout == "operator\n" assert stderr == "" def test_exec_workdir(self): self.base_dir = 'tests/fixtures/links-composefile' os.environ['COMPOSE_API_VERSION'] = '1.35' self.dispatch(['up', '-d', 'console']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', '--workdir', '/etc', 'console', 'ls']) assert 'passwd' in stdout def test_exec_service_with_environment_overridden(self): name = 'service' self.base_dir = 'tests/fixtures/environment-exec' self.dispatch(['up', '-d']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch([ 'exec', '-T', '-e', 'foo=notbar', '--env', 'alpha=beta', name, 'env', ]) # env overridden assert 'foo=notbar' in stdout # keep environment from yaml assert 'hello=world' in stdout # added option from command line assert 'alpha=beta' in stdout assert stderr == '' def test_run_service_without_links(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', 'console', '/bin/true']) assert len(self.project.containers()) == 0 # Ensure stdin/out was open container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] config = container.inspect()['Config'] assert config['AttachStderr'] assert config['AttachStdout'] assert config['AttachStdin'] def test_run_service_with_links(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', 'web', '/bin/true'], None) db = self.project.get_service('db') console = self.project.get_service('console') assert len(db.containers()) == 1 assert len(console.containers()) == 0 def test_run_service_with_dependencies(self): self.base_dir = 'tests/fixtures/v2-dependencies' self.dispatch(['run', 'web', '/bin/true'], None) db = self.project.get_service('db') console = self.project.get_service('console') assert len(db.containers()) == 1 assert len(console.containers()) == 0 def test_run_service_with_unhealthy_dependencies(self): self.base_dir = 'tests/fixtures/v2-unhealthy-dependencies' result = self.dispatch(['run', 'web', '/bin/true'], returncode=1) assert re.search( re.compile('for web .*is unhealthy.*', re.MULTILINE), result.stderr ) def test_run_service_with_scaled_dependencies(self): self.base_dir = 'tests/fixtures/v2-dependencies' self.dispatch(['up', '-d', '--scale', 'db=2', '--scale', 'console=0']) db = self.project.get_service('db') console = self.project.get_service('console') assert len(db.containers()) == 2 assert len(console.containers()) == 0 self.dispatch(['run', 'web', '/bin/true'], None) assert len(db.containers()) == 2 assert len(console.containers()) == 0 def test_run_with_no_deps(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['run', '--no-deps', 'web', '/bin/true']) db = self.project.get_service('db') assert len(db.containers()) == 0 def test_run_does_not_recreate_linked_containers(self): self.base_dir = 'tests/fixtures/links-composefile' self.dispatch(['up', '-d', 'db']) db = self.project.get_service('db') assert len(db.containers()) == 1 old_ids = [c.id for c in db.containers()] self.dispatch(['run', 'web', '/bin/true'], None) assert len(db.containers()) == 1 new_ids = [c.id for c in db.containers()] assert old_ids == new_ids def test_run_without_command(self): self.base_dir = 'tests/fixtures/commands-composefile' self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test') self.dispatch(['run', 'implicit']) service = self.project.get_service('implicit') containers = service.containers(stopped=True, one_off=OneOffFilter.only) assert [c.human_readable_command for c in containers] == ['/bin/sh -c echo "success"'] self.dispatch(['run', 'explicit']) service = self.project.get_service('explicit') containers = service.containers(stopped=True, one_off=OneOffFilter.only) assert [c.human_readable_command for c in containers] == ['/bin/true'] @pytest.mark.skipif(SWARM_SKIP_RM_VOLUMES, reason='Swarm DELETE /containers/ bug') def test_run_rm(self): self.base_dir = 'tests/fixtures/volume' proc = start_process(self.base_dir, ['run', '--rm', 'test']) service = self.project.get_service('test') wait_on_condition(ContainerStateCondition( self.project.client, 'volume_test_run_*', 'running') ) containers = service.containers(one_off=OneOffFilter.only) assert len(containers) == 1 mounts = containers[0].get('Mounts') for mount in mounts: if mount['Destination'] == '/container-path': anonymous_name = mount['Name'] break os.kill(proc.pid, signal.SIGINT) wait_on_process(proc, 1) assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0 volumes = self.client.volumes()['Volumes'] assert volumes is not None for volume in service.options.get('volumes'): if volume.internal == '/container-named-path': name = volume.external break volume_names = [v['Name'].split('/')[-1] for v in volumes] assert name in volume_names assert anonymous_name not in volume_names def test_run_service_with_dockerfile_entrypoint(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['printf'] assert container.get('Config.Cmd') == ['default', 'args'] def test_run_service_with_unset_entrypoint(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', '--entrypoint=""', 'test', 'true']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') is None assert container.get('Config.Cmd') == ['true'] self.dispatch(['run', '--entrypoint', '""', 'test', 'true']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') is None assert container.get('Config.Cmd') == ['true'] def test_run_service_with_dockerfile_entrypoint_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', '--entrypoint', 'echo', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert not container.get('Config.Cmd') def test_run_service_with_dockerfile_entrypoint_and_command_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-dockerfile' self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == ['foo'] def test_run_service_with_compose_file_entrypoint(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['printf'] assert container.get('Config.Cmd') == ['default', 'args'] def test_run_service_with_compose_file_entrypoint_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert not container.get('Config.Cmd') def test_run_service_with_compose_file_entrypoint_and_command_overridden(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test', 'foo']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == ['foo'] def test_run_service_with_compose_file_entrypoint_and_empty_string_command(self): self.base_dir = 'tests/fixtures/entrypoint-composefile' self.dispatch(['run', '--entrypoint', 'echo', 'test', '']) container = self.project.containers(stopped=True, one_off=OneOffFilter.only)[0] assert container.get('Config.Entrypoint') == ['echo'] assert container.get('Config.Cmd') == [''] def test_run_service_with_user_overridden(self): self.base_dir = 'tests/fixtures/user-composefile' name = 'service' user = 'sshd' self.dispatch(['run', '--user={user}'.format(user=user), name], returncode=1) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] assert user == container.get('Config.User') def test_run_service_with_user_overridden_short_form(self): self.base_dir = 'tests/fixtures/user-composefile' name = 'service' user = 'sshd' self.dispatch(['run', '-u', user, name], returncode=1) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] assert user == container.get('Config.User') def test_run_service_with_environment_overridden(self): name = 'service' self.base_dir = 'tests/fixtures/environment-composefile' self.dispatch([ 'run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo', '-e', 'alpha=beta', name, '/bin/true', ]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=OneOffFilter.only)[0] # env overridden assert 'notbar' == container.environment['foo'] # keep environment from yaml assert 'world' == container.environment['hello'] # added option from command line assert 'beta' == container.environment['alpha'] # make sure a value with a = don't crash out assert 'moto=bobo' == container.environment['allo'] def test_run_service_without_map_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) port_assigned = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports assert port_random is None assert port_assigned is None def test_run_service_with_map_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_random = container.get_local_port(3000) port_assigned = container.get_local_port(3001) port_range = container.get_local_port(3002), container.get_local_port(3003) # close all one off containers we just created container.stop() # check the ports assert port_random is not None assert port_assigned.endswith(':49152') assert port_range[0].endswith(':49153') assert port_range[1].endswith(':49154') def test_run_service_with_explicitly_mapped_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) port_full = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports assert port_short.endswith(':30000') assert port_full.endswith(':30001') def test_run_service_with_explicitly_mapped_ip_ports(self): # create one off container self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch([ 'run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple' ]) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] # get port information port_short = container.get_local_port(3000) port_full = container.get_local_port(3001) # close all one off containers we just created container.stop() # check the ports assert port_short == "127.0.0.1:30000" assert port_full == "127.0.0.1:30001" def test_run_with_expose_ports(self): # create one off container self.base_dir = 'tests/fixtures/expose-composefile' self.dispatch(['run', '-d', '--service-ports', 'simple']) container = self.project.get_service('simple').containers(one_off=OneOffFilter.only)[0] ports = container.ports assert len(ports) == 9 # exposed ports are not mapped to host ports assert ports['3000/tcp'] is None assert ports['3001/tcp'] is None assert ports['3001/udp'] is None assert ports['3002/tcp'] is None assert ports['3003/tcp'] is None assert ports['3004/tcp'] is None assert ports['3005/tcp'] is None assert ports['3006/udp'] is None assert ports['3007/udp'] is None # close all one off containers we just created container.stop() def test_run_with_custom_name(self): self.base_dir = 'tests/fixtures/environment-composefile' name = 'the-container-name' self.dispatch(['run', '--name', name, 'service', '/bin/true']) service = self.project.get_service('service') container, = service.containers(stopped=True, one_off=OneOffFilter.only) assert container.name == name def test_run_service_with_workdir_overridden(self): self.base_dir = 'tests/fixtures/run-workdir' name = 'service' workdir = '/var' self.dispatch(['run', '--workdir={workdir}'.format(workdir=workdir), name]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=True)[0] assert workdir == container.get('Config.WorkingDir') def test_run_service_with_workdir_overridden_short_form(self): self.base_dir = 'tests/fixtures/run-workdir' name = 'service' workdir = '/var' self.dispatch(['run', '-w', workdir, name]) service = self.project.get_service(name) container = service.containers(stopped=True, one_off=True)[0] assert workdir == container.get('Config.WorkingDir') def test_run_service_with_use_aliases(self): filename = 'network-aliases.yml' self.base_dir = 'tests/fixtures/networks' self.dispatch(['-f', filename, 'run', '-d', '--use-aliases', 'web', 'top']) back_name = '{}_back'.format(self.project.name) front_name = '{}_front'.format(self.project.name) web_container = self.project.get_service('web').containers(one_off=OneOffFilter.only)[0] back_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(back_name) ) assert 'web' in back_aliases front_aliases = web_container.get( 'NetworkSettings.Networks.{}.Aliases'.format(front_name) ) assert 'web' in front_aliases assert 'forward_facing' in front_aliases assert 'ahead' in front_aliases def test_run_interactive_connects_to_network(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d']) self.dispatch(['run', 'app', 'nslookup', 'app']) self.dispatch(['run', 'app', 'nslookup', 'db']) containers = self.project.get_service('app').containers( stopped=True, one_off=OneOffFilter.only) assert len(containers) == 2 for container in containers: networks = container.get('NetworkSettings.Networks') assert sorted(list(networks)) == [ '{}_{}'.format(self.project.name, name) for name in ['back', 'front'] ] for _, config in networks.items(): # TODO: once we drop support for API <1.24, this can be changed to: # assert config['Aliases'] == [container.short_id] aliases = set(config['Aliases'] or []) - {container.short_id} assert not aliases def test_run_detached_connects_to_network(self): self.base_dir = 'tests/fixtures/networks' self.dispatch(['up', '-d']) self.dispatch(['run', '-d', 'app', 'top']) container = self.project.get_service('app').containers(one_off=OneOffFilter.only)[0] networks = container.get('NetworkSettings.Networks') assert sorted(list(networks)) == [ '{}_{}'.format(self.project.name, name) for name in ['back', 'front'] ] for _, config in networks.items(): # TODO: once we drop support for API <1.24, this can be changed to: # assert config['Aliases'] == [container.short_id] aliases = set(config['Aliases'] or []) - {container.short_id} assert not aliases assert self.lookup(container, 'app') assert self.lookup(container, 'db') def test_run_handles_sigint(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'running')) os.kill(proc.pid, signal.SIGINT) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'exited')) def test_run_handles_sigterm(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'running')) os.kill(proc.pid, signal.SIGTERM) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'exited')) def test_run_handles_sighup(self): proc = start_process(self.base_dir, ['run', '-T', 'simple', 'top']) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'running')) os.kill(proc.pid, signal.SIGHUP) wait_on_condition(ContainerStateCondition( self.project.client, 'simple-composefile_simple_run_*', 'exited')) @mock.patch.dict(os.environ) def test_run_unicode_env_values_from_system(self): value = 'ą, ć, ę, ł, ń, ó, ś, ź, ż' os.environ['BAR'] = value self.base_dir = 'tests/fixtures/unicode-environment' self.dispatch(['run', 'simple']) container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0] environment = container.get('Config.Env') assert 'FOO={}'.format(value) in environment @mock.patch.dict(os.environ) def test_run_env_values_from_system(self): os.environ['FOO'] = 'bar' os.environ['BAR'] = 'baz' self.dispatch(['run', '-e', 'FOO', 'simple', 'true'], None) container = self.project.containers(one_off=OneOffFilter.only, stopped=True)[0] environment = container.get('Config.Env') assert 'FOO=bar' in environment assert 'BAR=baz' not in environment def test_run_label_flag(self): self.base_dir = 'tests/fixtures/run-labels' name = 'service' self.dispatch(['run', '-l', 'default', '--label', 'foo=baz', name, '/bin/true']) service = self.project.get_service(name) container, = service.containers(stopped=True, one_off=OneOffFilter.only) labels = container.labels assert labels['default'] == '' assert labels['foo'] == 'baz' assert labels['hello'] == 'world' def test_rm(self): service = self.project.get_service('simple') service.create_container() kill_service(service) assert len(service.containers(stopped=True)) == 1 self.dispatch(['rm', '--force'], None) assert len(service.containers(stopped=True)) == 0 service = self.project.get_service('simple') service.create_container() kill_service(service) assert len(service.containers(stopped=True)) == 1 self.dispatch(['rm', '-f'], None) assert len(service.containers(stopped=True)) == 0 service = self.project.get_service('simple') service.create_container() self.dispatch(['rm', '-fs'], None) assert len(service.containers(stopped=True)) == 0 def test_rm_stop(self): self.dispatch(['up', '-d'], None) simple = self.project.get_service('simple') another = self.project.get_service('another') assert len(simple.containers()) == 1 assert len(another.containers()) == 1 self.dispatch(['rm', '-fs'], None) assert len(simple.containers(stopped=True)) == 0 assert len(another.containers(stopped=True)) == 0 self.dispatch(['up', '-d'], None) assert len(simple.containers()) == 1 assert len(another.containers()) == 1 self.dispatch(['rm', '-fs', 'another'], None) assert len(simple.containers()) == 1 assert len(another.containers(stopped=True)) == 0 def test_rm_all(self): service = self.project.get_service('simple') service.create_container(one_off=False) service.create_container(one_off=True) kill_service(service) assert len(service.containers(stopped=True)) == 1 assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1 self.dispatch(['rm', '-f'], None) assert len(service.containers(stopped=True)) == 0 assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0 service.create_container(one_off=False) service.create_container(one_off=True) kill_service(service) assert len(service.containers(stopped=True)) == 1 assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 1 self.dispatch(['rm', '-f', '--all'], None) assert len(service.containers(stopped=True)) == 0 assert len(service.containers(stopped=True, one_off=OneOffFilter.only)) == 0 def test_stop(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['stop', '-t', '1'], None) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running def test_stop_signal(self): self.base_dir = 'tests/fixtures/stop-signal-composefile' self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['stop', '-t', '1'], None) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running assert service.containers(stopped=True)[0].exit_code == 0 def test_start_no_containers(self): result = self.dispatch(['start'], returncode=1) assert 'failed' in result.stderr assert 'No containers to start' in result.stderr def test_up_logging(self): self.base_dir = 'tests/fixtures/logging-composefile' self.dispatch(['up', '-d']) simple = self.project.get_service('simple').containers()[0] log_config = simple.get('HostConfig.LogConfig') assert log_config assert log_config.get('Type') == 'none' another = self.project.get_service('another').containers()[0] log_config = another.get('HostConfig.LogConfig') assert log_config assert log_config.get('Type') == 'json-file' assert log_config.get('Config')['max-size'] == '10m' def test_up_logging_legacy(self): self.base_dir = 'tests/fixtures/logging-composefile-legacy' self.dispatch(['up', '-d']) simple = self.project.get_service('simple').containers()[0] log_config = simple.get('HostConfig.LogConfig') assert log_config assert log_config.get('Type') == 'none' another = self.project.get_service('another').containers()[0] log_config = another.get('HostConfig.LogConfig') assert log_config assert log_config.get('Type') == 'json-file' assert log_config.get('Config')['max-size'] == '10m' def test_pause_unpause(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert not service.containers()[0].is_paused self.dispatch(['pause'], None) assert service.containers()[0].is_paused self.dispatch(['unpause'], None) assert not service.containers()[0].is_paused def test_pause_no_containers(self): result = self.dispatch(['pause'], returncode=1) assert 'No containers to pause' in result.stderr def test_unpause_no_containers(self): result = self.dispatch(['unpause'], returncode=1) assert 'No containers to unpause' in result.stderr def test_logs_invalid_service_name(self): self.dispatch(['logs', 'madeupname'], returncode=1) def test_logs_follow(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up', '-d']) result = self.dispatch(['logs', '-f']) if not is_cluster(self.client): assert result.stdout.count('\n') == 5 else: # Sometimes logs are picked up from old containers that haven't yet # been removed (removal in Swarm is async) assert result.stdout.count('\n') >= 5 assert 'simple' in result.stdout assert 'another' in result.stdout assert 'exited with code 0' in result.stdout @pytest.mark.skip(reason="race condition between up and logs") def test_logs_follow_logs_from_new_containers(self): self.base_dir = 'tests/fixtures/logs-composefile' self.dispatch(['up', '-d', 'simple']) proc = start_process(self.base_dir, ['logs', '-f']) self.dispatch(['up', '-d', 'another']) another_name = self.project.get_service('another').get_container().name_without_project wait_on_condition( ContainerStateCondition( self.project.client, 'logs-composefile_another_*', 'exited' ) ) simple_name = self.project.get_service('simple').get_container().name_without_project self.dispatch(['kill', 'simple']) result = wait_on_process(proc) assert 'hello' in result.stdout assert 'test' in result.stdout assert '{} exited with code 0'.format(another_name) in result.stdout assert '{} exited with code 137'.format(simple_name) in result.stdout @pytest.mark.skip(reason="race condition between up and logs") def test_logs_follow_logs_from_restarted_containers(self): self.base_dir = 'tests/fixtures/logs-restart-composefile' proc = start_process(self.base_dir, ['up']) wait_on_condition( ContainerStateCondition( self.project.client, 'logs-restart-composefile_another_*', 'exited' ) ) self.dispatch(['kill', 'simple']) result = wait_on_process(proc) assert result.stdout.count( r'logs-restart-composefile_another_1 exited with code 1' ) == 3 assert result.stdout.count('world') == 3 @pytest.mark.skip(reason="race condition between up and logs") def test_logs_default(self): self.base_dir = 'tests/fixtures/logs-composefile' self.dispatch(['up', '-d']) result = self.dispatch(['logs']) assert 'hello' in result.stdout assert 'test' in result.stdout assert 'exited with' not in result.stdout def test_logs_on_stopped_containers_exits(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up']) result = self.dispatch(['logs']) assert 'simple' in result.stdout assert 'another' in result.stdout assert 'exited with' not in result.stdout def test_logs_timestamps(self): self.base_dir = 'tests/fixtures/echo-services' self.dispatch(['up', '-d']) result = self.dispatch(['logs', '-f', '-t']) assert re.search(r'(\d{4})-(\d{2})-(\d{2})T(\d{2})\:(\d{2})\:(\d{2})', result.stdout) def test_logs_tail(self): self.base_dir = 'tests/fixtures/logs-tail-composefile' self.dispatch(['up']) result = self.dispatch(['logs', '--tail', '2']) assert 'y\n' in result.stdout assert 'z\n' in result.stdout assert 'w\n' not in result.stdout assert 'x\n' not in result.stdout def test_kill(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['kill'], None) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running def test_kill_signal_sigstop(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['kill', '-s', 'SIGSTOP'], None) assert len(service.containers()) == 1 # The container is still running. It has only been paused assert service.containers()[0].is_running def test_kill_stopped_service(self): self.dispatch(['up', '-d'], None) service = self.project.get_service('simple') self.dispatch(['kill', '-s', 'SIGSTOP'], None) assert service.containers()[0].is_running self.dispatch(['kill', '-s', 'SIGKILL'], None) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running def test_restart(self): service = self.project.get_service('simple') container = service.create_container() service.start_container(container) started_at = container.dictionary['State']['StartedAt'] self.dispatch(['restart', '-t', '1'], None) container.inspect() assert container.dictionary['State']['FinishedAt'] != '0001-01-01T00:00:00Z' assert container.dictionary['State']['StartedAt'] != started_at def test_restart_stopped_container(self): service = self.project.get_service('simple') container = service.create_container() container.start() container.kill() assert len(service.containers(stopped=True)) == 1 self.dispatch(['restart', '-t', '1'], None) assert len(service.containers(stopped=False)) == 1 def test_restart_no_containers(self): result = self.dispatch(['restart'], returncode=1) assert 'No containers to restart' in result.stderr def test_scale(self): project = self.project self.dispatch(['scale', 'simple=1']) assert len(project.get_service('simple').containers()) == 1 self.dispatch(['scale', 'simple=3', 'another=2']) assert len(project.get_service('simple').containers()) == 3 assert len(project.get_service('another').containers()) == 2 self.dispatch(['scale', 'simple=1', 'another=1']) assert len(project.get_service('simple').containers()) == 1 assert len(project.get_service('another').containers()) == 1 self.dispatch(['scale', 'simple=1', 'another=1']) assert len(project.get_service('simple').containers()) == 1 assert len(project.get_service('another').containers()) == 1 self.dispatch(['scale', 'simple=0', 'another=0']) assert len(project.get_service('simple').containers()) == 0 assert len(project.get_service('another').containers()) == 0 def test_up_scale_scale_up(self): self.base_dir = 'tests/fixtures/scale' project = self.project self.dispatch(['up', '-d']) assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 0 self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'worker=1']) assert len(project.get_service('web').containers()) == 3 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 1 def test_up_scale_scale_down(self): self.base_dir = 'tests/fixtures/scale' project = self.project self.dispatch(['up', '-d']) assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 0 self.dispatch(['up', '-d', '--scale', 'web=1']) assert len(project.get_service('web').containers()) == 1 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 0 def test_up_scale_reset(self): self.base_dir = 'tests/fixtures/scale' project = self.project self.dispatch(['up', '-d', '--scale', 'web=3', '--scale', 'db=3', '--scale', 'worker=3']) assert len(project.get_service('web').containers()) == 3 assert len(project.get_service('db').containers()) == 3 assert len(project.get_service('worker').containers()) == 3 self.dispatch(['up', '-d']) assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 0 def test_up_scale_to_zero(self): self.base_dir = 'tests/fixtures/scale' project = self.project self.dispatch(['up', '-d']) assert len(project.get_service('web').containers()) == 2 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('worker').containers()) == 0 self.dispatch(['up', '-d', '--scale', 'web=0', '--scale', 'db=0', '--scale', 'worker=0']) assert len(project.get_service('web').containers()) == 0 assert len(project.get_service('db').containers()) == 0 assert len(project.get_service('worker').containers()) == 0 def test_port(self): self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['up', '-d'], None) container = self.project.get_service('simple').get_container() def get_port(number): result = self.dispatch(['port', 'simple', str(number)]) return result.stdout.rstrip() assert get_port(3000) == container.get_local_port(3000) assert ':49152' in get_port(3001) assert ':49153' in get_port(3002) def test_expanded_port(self): self.base_dir = 'tests/fixtures/ports-composefile' self.dispatch(['-f', 'expanded-notation.yml', 'up', '-d']) container = self.project.get_service('simple').get_container() def get_port(number): result = self.dispatch(['port', 'simple', str(number)]) return result.stdout.rstrip() assert get_port(3000) == container.get_local_port(3000) assert ':53222' in get_port(3001) assert ':53223' in get_port(3002) def test_port_with_scale(self): self.base_dir = 'tests/fixtures/ports-composefile-scale' self.dispatch(['scale', 'simple=2'], None) containers = sorted( self.project.containers(service_names=['simple']), key=attrgetter('name')) def get_port(number, index=None): if index is None: result = self.dispatch(['port', 'simple', str(number)]) else: result = self.dispatch(['port', '--index=' + str(index), 'simple', str(number)]) return result.stdout.rstrip() assert get_port(3000) in (containers[0].get_local_port(3000), containers[1].get_local_port(3000)) assert get_port(3000, index=containers[0].number) == containers[0].get_local_port(3000) assert get_port(3000, index=containers[1].number) == containers[1].get_local_port(3000) assert get_port(3002) == "" def test_events_json(self): events_proc = start_process(self.base_dir, ['events', '--json']) self.dispatch(['up', '-d']) wait_on_condition(ContainerCountCondition(self.project, 2)) os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')] assert Counter(e['action'] for e in lines) == {'create': 2, 'start': 2} def test_events_human_readable(self): def has_timestamp(string): str_iso_date, str_iso_time, container_info = string.split(' ', 2) try: return isinstance(datetime.datetime.strptime( '{} {}'.format(str_iso_date, str_iso_time), '%Y-%m-%d %H:%M:%S.%f'), datetime.datetime) except ValueError: return False events_proc = start_process(self.base_dir, ['events']) self.dispatch(['up', '-d', 'simple']) wait_on_condition(ContainerCountCondition(self.project, 1)) os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = result.stdout.rstrip().split('\n') assert len(lines) == 2 container, = self.project.containers() expected_template = ' container {} {}' expected_meta_info = ['image=busybox:1.27.2', 'name=simple-composefile_simple_'] assert expected_template.format('create', container.id) in lines[0] assert expected_template.format('start', container.id) in lines[1] for line in lines: for info in expected_meta_info: assert info in line assert has_timestamp(lines[0]) def test_env_file_relative_to_compose_file(self): config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml') self.dispatch(['-f', config_path, 'up', '-d'], None) self._project = get_project(self.base_dir, [config_path]) containers = self.project.containers(stopped=True) assert len(containers) == 1 assert "FOO=1" in containers[0].get('Config.Env') @mock.patch.dict(os.environ) def test_home_and_env_var_in_volume_path(self): os.environ['VOLUME_NAME'] = 'my-volume' os.environ['HOME'] = '/tmp/home-dir' self.base_dir = 'tests/fixtures/volume-path-interpolation' self.dispatch(['up', '-d'], None) container = self.project.containers(stopped=True)[0] actual_host_path = container.get_mount('/container-path')['Source'] components = actual_host_path.split('/') assert components[-2:] == ['home-dir', 'my-volume'] def test_up_with_default_override_file(self): self.base_dir = 'tests/fixtures/override-files' self.dispatch(['up', '-d'], None) containers = self.project.containers() assert len(containers) == 2 web, db = containers assert web.human_readable_command == 'top' assert db.human_readable_command == 'top' def test_up_with_multiple_files(self): self.base_dir = 'tests/fixtures/override-files' config_paths = [ 'docker-compose.yml', 'docker-compose.override.yml', 'extra.yml', ] self._project = get_project(self.base_dir, config_paths) self.dispatch( [ '-f', config_paths[0], '-f', config_paths[1], '-f', config_paths[2], 'up', '-d', ], None) containers = self.project.containers() assert len(containers) == 3 web, other, db = containers assert web.human_readable_command == 'top' assert db.human_readable_command == 'top' assert other.human_readable_command == 'top' def test_up_with_extends(self): self.base_dir = 'tests/fixtures/extends' self.dispatch(['up', '-d'], None) assert {s.name for s in self.project.services} == {'mydb', 'myweb'} # Sort by name so we get [db, web] containers = sorted( self.project.containers(stopped=True), key=lambda c: c.name, ) assert len(containers) == 2 web = containers[1] db_name = containers[0].name_without_project assert set(get_links(web)) == {'db', db_name, 'extends_{}'.format(db_name)} expected_env = {"FOO=1", "BAR=2", "BAZ=2"} assert expected_env <= set(web.get('Config.Env')) def test_top_services_not_running(self): self.base_dir = 'tests/fixtures/top' result = self.dispatch(['top']) assert len(result.stdout) == 0 def test_top_services_running(self): self.base_dir = 'tests/fixtures/top' self.dispatch(['up', '-d']) result = self.dispatch(['top']) assert 'top_service_a' in result.stdout assert 'top_service_b' in result.stdout assert 'top_not_a_service' not in result.stdout def test_top_processes_running(self): self.base_dir = 'tests/fixtures/top' self.dispatch(['up', '-d']) result = self.dispatch(['top']) assert result.stdout.count("top") == 4 def test_forward_exitval(self): self.base_dir = 'tests/fixtures/exit-code-from' proc = start_process( self.base_dir, ['up', '--abort-on-container-exit', '--exit-code-from', 'another'] ) result = wait_on_process(proc, returncode=1) assert 'exit-code-from_another_1 exited with code 1' in result.stdout def test_exit_code_from_signal_stop(self): self.base_dir = 'tests/fixtures/exit-code-from' proc = start_process( self.base_dir, ['up', '--abort-on-container-exit', '--exit-code-from', 'simple'] ) result = wait_on_process(proc, returncode=137) # SIGKILL name = self.project.get_service('another').containers(stopped=True)[0].name_without_project assert '{} exited with code 1'.format(name) in result.stdout def test_images(self): self.project.get_service('simple').create_container() result = self.dispatch(['images']) assert 'busybox' in result.stdout assert 'simple-composefile_simple_' in result.stdout def test_images_default_composefile(self): self.base_dir = 'tests/fixtures/multiple-composefiles' self.dispatch(['up', '-d']) result = self.dispatch(['images']) assert 'busybox' in result.stdout assert '_another_1' in result.stdout assert '_simple_1' in result.stdout @mock.patch.dict(os.environ) def test_images_tagless_image(self): self.base_dir = 'tests/fixtures/tagless-image' stream = self.client.build(self.base_dir, decode=True) img_id = None for data in stream: if 'aux' in data: img_id = data['aux']['ID'] break if 'stream' in data and 'Successfully built' in data['stream']: img_id = self.client.inspect_image(data['stream'].split(' ')[2].strip())['Id'] assert img_id os.environ['IMAGE_ID'] = img_id self.project.get_service('foo').create_container() result = self.dispatch(['images']) assert '' in result.stdout assert 'tagless-image_foo_1' in result.stdout def test_up_with_override_yaml(self): self.base_dir = 'tests/fixtures/override-yaml-files' self._project = get_project(self.base_dir, []) self.dispatch(['up', '-d'], None) containers = self.project.containers() assert len(containers) == 2 web, db = containers assert web.human_readable_command == 'sleep 100' assert db.human_readable_command == 'top' def test_up_with_duplicate_override_yaml_files(self): self.base_dir = 'tests/fixtures/duplicate-override-yaml-files' with pytest.raises(DuplicateOverrideFileFound): get_project(self.base_dir, []) self.base_dir = None def test_images_use_service_tag(self): pull_busybox(self.client) self.base_dir = 'tests/fixtures/images-service-tag' self.dispatch(['up', '-d', '--build']) result = self.dispatch(['images']) assert re.search(r'foo1.+test[ \t]+dev', result.stdout) is not None assert re.search(r'foo2.+test[ \t]+prod', result.stdout) is not None assert re.search(r'foo3.+test[ \t]+latest', result.stdout) is not None def test_build_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' result = self.dispatch(['build', '--pull', '--', '--test-service']) assert BUILD_PULL_TEXT in result.stdout def test_events_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' events_proc = start_process(self.base_dir, ['events', '--json', '--', '--test-service']) self.dispatch(['up', '-d', '--', '--test-service']) wait_on_condition(ContainerCountCondition(self.project, 1)) os.kill(events_proc.pid, signal.SIGINT) result = wait_on_process(events_proc, returncode=1) lines = [json.loads(line) for line in result.stdout.rstrip().split('\n')] assert Counter(e['action'] for e in lines) == {'create': 1, 'start': 1} def test_exec_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) assert len(self.project.containers()) == 1 stdout, stderr = self.dispatch(['exec', '-T', '--', '--test-service', 'ls', '-1d', '/']) assert stderr == "" assert stdout == "/\n" def test_images_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) result = self.dispatch(['images', '--', '--test-service']) assert "busybox" in result.stdout def test_kill_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) service = self.project.get_service('--test-service') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['kill', '--', '--test-service']) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running def test_logs_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--log-service']) result = self.dispatch(['logs', '--', '--log-service']) assert 'hello' in result.stdout assert 'exited with' not in result.stdout def test_port_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) result = self.dispatch(['port', '--', '--test-service', '80']) assert result.stdout.strip() == "0.0.0.0:8080" def test_ps_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) result = self.dispatch(['ps', '--', '--test-service']) assert 'flag-as-service-name_--test-service_1' in result.stdout def test_pull_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' result = self.dispatch(['pull', '--', '--test-service']) assert 'Pulling --test-service' in result.stderr assert 'failed' not in result.stderr def test_rm_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '--no-start', '--', '--test-service']) service = self.project.get_service('--test-service') assert len(service.containers(stopped=True)) == 1 self.dispatch(['rm', '--force', '--', '--test-service']) assert len(service.containers(stopped=True)) == 0 def test_run_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' result = self.dispatch(['run', '--no-deps', '--', '--test-service', 'echo', '-hello']) assert 'hello' in result.stdout assert len(self.project.containers()) == 0 def test_stop_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) service = self.project.get_service('--test-service') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['stop', '-t', '1', '--', '--test-service']) assert len(service.containers(stopped=True)) == 1 assert not service.containers(stopped=True)[0].is_running def test_restart_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service']) service = self.project.get_service('--test-service') assert len(service.containers()) == 1 assert service.containers()[0].is_running self.dispatch(['restart', '-t', '1', '--', '--test-service']) assert len(service.containers()) == 1 assert service.containers()[0].is_running def test_up_with_stop_process_flag(self): self.base_dir = 'tests/fixtures/flag-as-service-name' self.dispatch(['up', '-d', '--', '--test-service', '--log-service']) service = self.project.get_service('--test-service') another = self.project.get_service('--log-service') assert len(service.containers()) == 1 assert len(another.containers()) == 1 def test_up_no_log_prefix(self): self.base_dir = 'tests/fixtures/echo-services' result = self.dispatch(['up', '--no-log-prefix']) assert 'simple' in result.stdout assert 'another' in result.stdout assert 'exited with code 0' in result.stdout assert 'exited with code 0' in result.stdout compose-1.29.2/tests/acceptance/context_test.py000066400000000000000000000031441404620552300216020ustar00rootroot00000000000000import os import shutil import unittest from docker import ContextAPI from tests.acceptance.cli_test import dispatch class ContextTestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.docker_dir = os.path.join(os.environ.get("HOME", "/tmp"), '.docker') if not os.path.exists(cls.docker_dir): os.makedirs(cls.docker_dir) f = open(os.path.join(cls.docker_dir, "config.json"), "w") f.write("{}") f.close() cls.docker_config = os.path.join(cls.docker_dir, "config.json") os.environ['DOCKER_CONFIG'] = cls.docker_config ContextAPI.create_context("testcontext", host="tcp://doesnotexist:8000") @classmethod def tearDownClass(cls): shutil.rmtree(cls.docker_dir, ignore_errors=True) def setUp(self): self.base_dir = 'tests/fixtures/simple-composefile' self.override_dir = None def dispatch(self, options, project_options=None, returncode=0, stdin=None): return dispatch(self.base_dir, options, project_options, returncode, stdin) def test_help(self): result = self.dispatch(['help'], returncode=0) assert '-c, --context NAME' in result.stdout def test_fail_on_both_host_and_context_opt(self): result = self.dispatch(['-H', 'unix://', '-c', 'default', 'up'], returncode=1) assert '-H, --host and -c, --context are mutually exclusive' in result.stderr def test_fail_run_on_inexistent_context(self): result = self.dispatch(['-c', 'testcontext', 'up', '-d'], returncode=1) assert "Couldn't connect to Docker daemon" in result.stderr compose-1.29.2/tests/conftest.py000066400000000000000000000204721404620552300166210ustar00rootroot00000000000000import pytest import tests.acceptance.cli_test # FIXME Skipping all the acceptance tests when in `--conformity` non_conformity_tests = [ "test_build_failed", "test_build_failed_forcerm", "test_build_log_level", "test_build_memory_build_option", "test_build_no_cache", "test_build_no_cache_pull", "test_build_override_dir", "test_build_override_dir_invalid_path", "test_build_parallel", "test_build_plain", "test_build_pull", "test_build_rm", "test_build_shm_size_build_option", "test_build_with_buildarg_cli_override", "test_build_with_buildarg_from_compose_file", "test_build_with_buildarg_old_api_version", "test_config_compatibility_mode", "test_config_compatibility_mode_from_env", "test_config_compatibility_mode_from_env_and_option_precedence", "test_config_default", "test_config_external_network", "test_config_external_network_v3_5", "test_config_external_volume_v2", "test_config_external_volume_v2_x", "test_config_external_volume_v3_4", "test_config_external_volume_v3_x", "test_config_list_services", "test_config_list_volumes", "test_config_quiet", "test_config_quiet_with_error", "test_config_restart", "test_config_stdin", "test_config_v1", "test_config_v3", "test_config_with_dot_env", "test_config_with_dot_env_and_override_dir", "test_config_with_env_file", "test_config_with_hash_option", "test_create", "test_create_with_force_recreate", "test_create_with_force_recreate_and_no_recreate", "test_create_with_no_recreate", "test_down", "test_down_invalid_rmi_flag", "test_down_signal", "test_down_timeout", "test_env_file_relative_to_compose_file", "test_events_human_readable", "test_events_json", "test_exec_custom_user", "test_exec_detach_long_form", "test_exec_novalue_var_dotenv_file", "test_exec_service_with_environment_overridden", "test_exec_without_tty", "test_exec_workdir", "test_exit_code_from_signal_stop", "test_expanded_port", "test_forward_exitval", "test_help", "test_help_nonexistent", "test_home_and_env_var_in_volume_path", "test_host_not_reachable", "test_host_not_reachable_volumes_from_container", "test_host_not_reachable_volumes_from_container", "test_images", "test_images_default_composefile", "test_images_tagless_image", "test_images_use_service_tag", "test_kill", "test_kill_signal_sigstop", "test_kill_stopped_service", "test_logs_default", "test_logs_follow", "test_logs_follow_logs_from_new_containers", "test_logs_follow_logs_from_restarted_containers", "test_logs_invalid_service_name", "test_logs_on_stopped_containers_exits", "test_logs_tail", "test_logs_timestamps", "test_pause_no_containers", "test_pause_unpause", "test_port", "test_port_with_scale", "test_ps", "test_ps_all", "test_ps_alternate_composefile", "test_ps_default_composefile", "test_ps_services_filter_option", "test_ps_services_filter_status", "test_pull", "test_pull_can_build", "test_pull_with_digest", "test_pull_with_ignore_pull_failures", "test_pull_with_include_deps", "test_pull_with_no_deps", "test_pull_with_parallel_failure", "test_pull_with_quiet", "test_quiet_build", "test_restart", "test_restart_no_containers", "test_restart_stopped_container", "test_rm", "test_rm_all", "test_rm_stop", "test_run_detached_connects_to_network", "test_run_does_not_recreate_linked_containers", "test_run_env_values_from_system", "test_run_handles_sighup", "test_run_handles_sigint", "test_run_handles_sigterm", "test_run_interactive_connects_to_network", "test_run_label_flag", "test_run_one_off_with_multiple_volumes", "test_run_one_off_with_volume", "test_run_one_off_with_volume_merge", "test_run_rm", "test_run_service_with_compose_file_entrypoint", "test_run_service_with_compose_file_entrypoint_and_command_overridden", "test_run_service_with_compose_file_entrypoint_and_empty_string_command", "test_run_service_with_compose_file_entrypoint_overridden", "test_run_service_with_dependencies", "test_run_service_with_dockerfile_entrypoint", "test_run_service_with_dockerfile_entrypoint_and_command_overridden", "test_run_service_with_dockerfile_entrypoint_overridden", "test_run_service_with_environment_overridden", "test_run_service_with_explicitly_mapped_ip_ports", "test_run_service_with_explicitly_mapped_ports", "test_run_service_with_links", "test_run_service_with_map_ports", "test_run_service_with_scaled_dependencies", "test_run_service_with_unset_entrypoint", "test_run_service_with_use_aliases", "test_run_service_with_user_overridden", "test_run_service_with_user_overridden_short_form", "test_run_service_with_workdir_overridden", "test_run_service_with_workdir_overridden_short_form", "test_run_service_without_links", "test_run_service_without_map_ports", "test_run_unicode_env_values_from_system", "test_run_with_custom_name", "test_run_with_expose_ports", "test_run_with_no_deps", "test_run_without_command", "test_scale", "test_scale_v2_2", "test_shorthand_host_opt", "test_shorthand_host_opt_interactive", "test_start_no_containers", "test_stop", "test_stop_signal", "test_top_processes_running", "test_top_services_not_running", "test_top_services_running", "test_unpause_no_containers", "test_up", "test_up_attached", "test_up_detached", "test_up_detached_long_form", "test_up_external_networks", "test_up_handles_abort_on_container_exit", "test_up_handles_abort_on_container_exit_code", "test_up_handles_aborted_dependencies", "test_up_handles_force_shutdown", "test_up_handles_sigint", "test_up_handles_sigterm", "test_up_logging", "test_up_logging_legacy", "test_up_missing_network", "test_up_no_ansi", "test_up_no_services", "test_up_no_start", "test_up_no_start_remove_orphans", "test_up_scale_reset", "test_up_scale_scale_down", "test_up_scale_scale_up", "test_up_scale_to_zero", "test_up_with_attach_dependencies", "test_up_with_default_network_config", "test_up_with_default_override_file", "test_up_with_duplicate_override_yaml_files", "test_up_with_extends", "test_up_with_external_default_network", "test_up_with_force_recreate", "test_up_with_force_recreate_and_no_recreate", "test_up_with_healthcheck", "test_up_with_ignore_remove_orphans", "test_up_with_links_v1", "test_up_with_multiple_files", "test_up_with_net_is_invalid", "test_up_with_net_v1", "test_up_with_network_aliases", "test_up_with_network_internal", "test_up_with_network_labels", "test_up_with_network_mode", "test_up_with_network_static_addresses", "test_up_with_networks", "test_up_with_no_deps", "test_up_with_no_recreate", "test_up_with_override_yaml", "test_up_with_pid_mode", "test_up_with_timeout", "test_up_with_volume_labels", "test_fail_on_both_host_and_context_opt", "test_fail_run_on_inexistent_context", ] def pytest_addoption(parser): parser.addoption( "--conformity", action="store_true", default=False, help="Only runs tests that are not black listed as non conformity test. " "The conformity tests check for compatibility with the Compose spec." ) parser.addoption( "--binary", default=tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE, help="Forces the execution of a binary in the PATH. Default is `docker-compose`." ) def pytest_collection_modifyitems(config, items): if not config.getoption("--conformity"): return if config.getoption("--binary"): tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE = config.getoption("--binary") print("Binary -> {}".format(tests.acceptance.cli_test.DOCKER_COMPOSE_EXECUTABLE)) skip_non_conformity = pytest.mark.skip(reason="skipping because that's not a conformity test") for item in items: if item.name in non_conformity_tests: print("Skipping '{}' when running in compatibility mode".format(item.name)) item.add_marker(skip_non_conformity) compose-1.29.2/tests/fixtures/000077500000000000000000000000001404620552300162665ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/UpperCaseDir/000077500000000000000000000000001404620552300206145ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/UpperCaseDir/docker-compose.yml000066400000000000000000000001551404620552300242520ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/abort-on-container-exit-0/000077500000000000000000000000001404620552300230735ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/abort-on-container-exit-0/docker-compose.yml000066400000000000000000000001561404620552300265320ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: ls . compose-1.29.2/tests/fixtures/abort-on-container-exit-1/000077500000000000000000000000001404620552300230745ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/abort-on-container-exit-1/docker-compose.yml000066400000000000000000000001731404620552300265320ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: ls /thecakeisalie compose-1.29.2/tests/fixtures/abort-on-container-exit-dependencies/000077500000000000000000000000001404620552300253625ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/abort-on-container-exit-dependencies/docker-compose.yml000066400000000000000000000003001404620552300310100ustar00rootroot00000000000000version: "2.0" services: simple: image: busybox:1.31.0-uclibc command: top depends_on: - another another: image: busybox:1.31.0-uclibc command: ls /thecakeisalie compose-1.29.2/tests/fixtures/build-args/000077500000000000000000000000001404620552300203175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-args/Dockerfile000066400000000000000000000002351404620552300223110ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc LABEL com.docker.compose.test_image=true ARG favorite_th_character RUN echo "Favorite Touhou Character: ${favorite_th_character}" compose-1.29.2/tests/fixtures/build-args/docker-compose.yml000066400000000000000000000001701404620552300237520ustar00rootroot00000000000000version: '2.2' services: web: build: context: . args: - favorite_th_character=mariya.kirisame compose-1.29.2/tests/fixtures/build-ctx/000077500000000000000000000000001404620552300201615ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-ctx/Dockerfile000066400000000000000000000001271404620552300221530ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc LABEL com.docker.compose.test_image=true CMD echo "success" compose-1.29.2/tests/fixtures/build-memory/000077500000000000000000000000001404620552300206735ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-memory/Dockerfile000066400000000000000000000002351404620552300226650ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc # Report the memory (through the size of the group memory) RUN echo "memory:" $(cat /sys/fs/cgroup/memory/memory.limit_in_bytes) compose-1.29.2/tests/fixtures/build-memory/docker-compose.yml000066400000000000000000000001011404620552300243200ustar00rootroot00000000000000version: '3.5' services: service: build: context: . compose-1.29.2/tests/fixtures/build-multiple-composefile/000077500000000000000000000000001404620552300235215ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-multiple-composefile/a/000077500000000000000000000000001404620552300237415ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-multiple-composefile/a/Dockerfile000066400000000000000000000000611404620552300257300ustar00rootroot00000000000000 FROM busybox:1.31.0-uclibc RUN echo a CMD top compose-1.29.2/tests/fixtures/build-multiple-composefile/b/000077500000000000000000000000001404620552300237425ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-multiple-composefile/b/Dockerfile000066400000000000000000000000611404620552300257310ustar00rootroot00000000000000 FROM busybox:1.31.0-uclibc RUN echo b CMD top compose-1.29.2/tests/fixtures/build-multiple-composefile/docker-compose.yml000066400000000000000000000001011404620552300271460ustar00rootroot00000000000000 version: "2" services: a: build: ./a b: build: ./b compose-1.29.2/tests/fixtures/build-path-override-dir/000077500000000000000000000000001404620552300227105ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-path-override-dir/docker-compose.yml000066400000000000000000000000331404620552300263410ustar00rootroot00000000000000foo: build: ./build-ctx/ compose-1.29.2/tests/fixtures/build-path/000077500000000000000000000000001404620552300203175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-path/docker-compose.yml000066400000000000000000000000341404620552300237510ustar00rootroot00000000000000foo: build: ../build-ctx/ compose-1.29.2/tests/fixtures/build-shm-size/000077500000000000000000000000001404620552300211225ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/build-shm-size/Dockerfile000066400000000000000000000002111404620552300231060ustar00rootroot00000000000000FROM busybox # Report the shm_size (through the size of /dev/shm) RUN echo "shm_size:" $(df -h /dev/shm | tail -n 1 | awk '{print $2}') compose-1.29.2/tests/fixtures/build-shm-size/docker-compose.yml000066400000000000000000000001521404620552300245550ustar00rootroot00000000000000version: '3.5' services: custom_shm_size: build: context: . shm_size: 100663296 # =96M compose-1.29.2/tests/fixtures/commands-composefile/000077500000000000000000000000001404620552300223725ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/commands-composefile/docker-compose.yml000066400000000000000000000001431404620552300260250ustar00rootroot00000000000000implicit: image: composetest_test explicit: image: composetest_test command: [ "/bin/true" ] compose-1.29.2/tests/fixtures/compatibility-mode/000077500000000000000000000000001404620552300220615ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/compatibility-mode/docker-compose.yml000066400000000000000000000006621404620552300255220ustar00rootroot00000000000000version: '3.5' services: foo: image: alpine:3.10.1 command: /bin/true deploy: replicas: 3 restart_policy: condition: any max_attempts: 7 resources: limits: memory: 300M cpus: '0.7' reservations: memory: 100M volumes: - foo:/bar networks: - bar volumes: foo: driver: default networks: bar: attachable: true compose-1.29.2/tests/fixtures/config-profiles/000077500000000000000000000000001404620552300213545ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/config-profiles/docker-compose.yml000066400000000000000000000003531404620552300250120ustar00rootroot00000000000000version: '3.8' services: frontend: image: frontend profiles: ["frontend", "gui"] phpmyadmin: image: phpmyadmin depends_on: - db profiles: - debug backend: image: backend db: image: mysql compose-1.29.2/tests/fixtures/default-env-file/000077500000000000000000000000001404620552300214155ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/default-env-file/.env000066400000000000000000000000671404620552300222110ustar00rootroot00000000000000IMAGE=alpine:latest COMMAND=true PORT1=5643 PORT2=9999 compose-1.29.2/tests/fixtures/default-env-file/.env2000066400000000000000000000000701404620552300222650ustar00rootroot00000000000000IMAGE=alpine:latest COMMAND=false PORT1=5644 PORT2=9998 compose-1.29.2/tests/fixtures/default-env-file/alt/000077500000000000000000000000001404620552300221755ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/default-env-file/alt/.env000066400000000000000000000000731404620552300227660ustar00rootroot00000000000000IMAGE=alpine:3.10.1 COMMAND=echo uwu PORT1=3341 PORT2=4449 compose-1.29.2/tests/fixtures/default-env-file/docker-compose.yml000066400000000000000000000001711404620552300250510ustar00rootroot00000000000000version: '2.4' services: web: image: ${IMAGE} command: ${COMMAND} ports: - $PORT1 - $PORT2 compose-1.29.2/tests/fixtures/dockerfile-with-volume/000077500000000000000000000000001404620552300226535ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/dockerfile-with-volume/Dockerfile000066400000000000000000000001311404620552300246400ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc LABEL com.docker.compose.test_image=true VOLUME /data CMD top compose-1.29.2/tests/fixtures/duplicate-override-yaml-files/000077500000000000000000000000001404620552300241155ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yaml000066400000000000000000000000301404620552300315220ustar00rootroot00000000000000 db: command: "top" compose-1.29.2/tests/fixtures/duplicate-override-yaml-files/docker-compose.override.yml000066400000000000000000000000361404620552300313670ustar00rootroot00000000000000 db: command: "sleep 300" compose-1.29.2/tests/fixtures/duplicate-override-yaml-files/docker-compose.yml000066400000000000000000000002271404620552300275530ustar00rootroot00000000000000 web: image: busybox:1.31.0-uclibc command: "sleep 100" links: - db db: image: busybox:1.31.0-uclibc command: "sleep 200" compose-1.29.2/tests/fixtures/echo-services-dependencies/000077500000000000000000000000001404620552300234515ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/echo-services-dependencies/docker-compose.yml000066400000000000000000000003031404620552300271020ustar00rootroot00000000000000version: "2.0" services: simple: image: busybox:1.31.0-uclibc command: echo simple depends_on: - another another: image: busybox:1.31.0-uclibc command: echo another compose-1.29.2/tests/fixtures/echo-services/000077500000000000000000000000001404620552300210255ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/echo-services/docker-compose.yml000066400000000000000000000001761404620552300244660ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: echo simple another: image: busybox:1.31.0-uclibc command: echo another compose-1.29.2/tests/fixtures/entrypoint-composefile/000077500000000000000000000000001404620552300230045ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/entrypoint-composefile/docker-compose.yml000066400000000000000000000001431404620552300264370ustar00rootroot00000000000000version: "2" services: test: image: busybox entrypoint: printf command: default args compose-1.29.2/tests/fixtures/entrypoint-dockerfile/000077500000000000000000000000001404620552300226065ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/entrypoint-dockerfile/Dockerfile000066400000000000000000000001621404620552300245770ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc LABEL com.docker.compose.test_image=true ENTRYPOINT ["printf"] CMD ["default", "args"] compose-1.29.2/tests/fixtures/entrypoint-dockerfile/docker-compose.yml000066400000000000000000000000541404620552300262420ustar00rootroot00000000000000version: "2" services: test: build: . compose-1.29.2/tests/fixtures/env-file-override/000077500000000000000000000000001404620552300216105ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/env-file-override/.env000066400000000000000000000000211404620552300223720ustar00rootroot00000000000000WHEREAMI=default compose-1.29.2/tests/fixtures/env-file-override/.env.conf000066400000000000000000000000421404620552300233210ustar00rootroot00000000000000WHEREAMI DEFAULT_CONF_LOADED=true compose-1.29.2/tests/fixtures/env-file-override/.env.override000066400000000000000000000000221404620552300242110ustar00rootroot00000000000000WHEREAMI=override compose-1.29.2/tests/fixtures/env-file-override/docker-compose.yml000066400000000000000000000001401404620552300252400ustar00rootroot00000000000000version: '3.7' services: test: image: busybox env_file: .env.conf entrypoint: env compose-1.29.2/tests/fixtures/env-file/000077500000000000000000000000001404620552300177735ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/env-file/docker-compose.yml000066400000000000000000000001021404620552300234210ustar00rootroot00000000000000web: image: busybox command: /bin/true env_file: ./test.env compose-1.29.2/tests/fixtures/env-file/test.env000066400000000000000000000000061404620552300214600ustar00rootroot00000000000000FOO=1 compose-1.29.2/tests/fixtures/env/000077500000000000000000000000001404620552300170565ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/env/one.env000066400000000000000000000001731404620552300203520ustar00rootroot00000000000000# Keep the blank lines and comments in this file, please ONE=2 TWO=1 # (thanks) THREE=3 FOO=bar # FOO=somethingelse compose-1.29.2/tests/fixtures/env/resolve.env000066400000000000000000000000551404620552300212470ustar00rootroot00000000000000FILE_DEF=bär FILE_DEF_EMPTY= ENV_DEF NO_DEF compose-1.29.2/tests/fixtures/env/three.env000066400000000000000000000000421404620552300206730ustar00rootroot00000000000000FOO=NO $ENV VAR DOO=NO ${ENV} VAR compose-1.29.2/tests/fixtures/env/two.env000066400000000000000000000000201404620552300203710ustar00rootroot00000000000000FOO=baz DOO=dah compose-1.29.2/tests/fixtures/environment-composefile/000077500000000000000000000000001404620552300231355ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/environment-composefile/docker-compose.yml000066400000000000000000000001451404620552300265720ustar00rootroot00000000000000service: image: busybox:1.31.0-uclibc command: top environment: foo: bar hello: world compose-1.29.2/tests/fixtures/environment-exec/000077500000000000000000000000001404620552300215545ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/environment-exec/docker-compose.yml000066400000000000000000000002041404620552300252050ustar00rootroot00000000000000version: "2.2" services: service: image: busybox:1.27.2 command: top environment: foo: bar hello: world compose-1.29.2/tests/fixtures/environment-interpolation-with-defaults/000077500000000000000000000000001404620552300262755ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/environment-interpolation-with-defaults/docker-compose.yml000066400000000000000000000004201404620552300317260ustar00rootroot00000000000000version: "2.1" services: web: # set value with default, default must be ignored image: ${IMAGE:-alpine} # unset value with default value ports: - "${HOST_PORT:-80}:8000" # unset value with empty default hostname: "host-${UNSET_VALUE:-}" compose-1.29.2/tests/fixtures/environment-interpolation/000077500000000000000000000000001404620552300235175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/environment-interpolation/docker-compose.yml000066400000000000000000000004121404620552300271510ustar00rootroot00000000000000web: # unbracketed name image: $IMAGE # array element ports: - "${HOST_PORT}:8000" # dictionary item value labels: mylabel: "${LABEL_VALUE}" # unset value hostname: "host-${UNSET_VALUE}" # escaped interpolation command: "$${ESCAPED}" compose-1.29.2/tests/fixtures/exec-novalue-var/000077500000000000000000000000001404620552300214475ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/exec-novalue-var/docker-compose.yml000066400000000000000000000001351404620552300251030ustar00rootroot00000000000000version: '3' services: nginx: image: nginx environment: - CHECK_VAR=${MYVAR} compose-1.29.2/tests/fixtures/exit-code-from/000077500000000000000000000000001404620552300211105ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/exit-code-from/docker-compose.yml000066400000000000000000000002301404620552300245400ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: sh -c "echo hello && tail -f /dev/null" another: image: busybox:1.31.0-uclibc command: /bin/false compose-1.29.2/tests/fixtures/expose-composefile/000077500000000000000000000000001404620552300220745ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/expose-composefile/docker-compose.yml000066400000000000000000000002561404620552300255340ustar00rootroot00000000000000 simple: image: busybox:1.31.0-uclibc command: top expose: - '3000' - '3001/tcp' - '3001/udp' - '3002-3003' - '3004-3005/tcp' - '3006-3007/udp' compose-1.29.2/tests/fixtures/extends/000077500000000000000000000000001404620552300177405ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/extends/circle-1.yml000066400000000000000000000002231404620552300220570ustar00rootroot00000000000000foo: image: busybox bar: image: busybox web: extends: file: circle-2.yml service: other baz: image: busybox quux: image: busybox compose-1.29.2/tests/fixtures/extends/circle-2.yml000066400000000000000000000002231404620552300220600ustar00rootroot00000000000000foo: image: busybox bar: image: busybox other: extends: file: circle-1.yml service: web baz: image: busybox quux: image: busybox compose-1.29.2/tests/fixtures/extends/common-env-labels-ulimits.yml000066400000000000000000000003041404620552300254620ustar00rootroot00000000000000web: extends: file: common.yml service: web environment: - FOO=2 - BAZ=3 labels: ['label=one'] ulimits: nproc: 65535 memlock: soft: 1024 hard: 2048 compose-1.29.2/tests/fixtures/extends/common.yml000066400000000000000000000001361404620552300217530ustar00rootroot00000000000000web: image: busybox command: /bin/true net: host environment: - FOO=1 - BAR=1 compose-1.29.2/tests/fixtures/extends/docker-compose.yml000066400000000000000000000003641404620552300234000ustar00rootroot00000000000000myweb: extends: file: common.yml service: web command: top links: - "mydb:db" environment: # leave FOO alone # override BAR BAR: "2" # add BAZ BAZ: "2" net: bridge mydb: image: busybox command: top compose-1.29.2/tests/fixtures/extends/healthcheck-1.yml000066400000000000000000000002451404620552300230650ustar00rootroot00000000000000version: '2.1' services: demo: image: foobar:latest healthcheck: test: ["CMD", "/health.sh"] interval: 10s timeout: 5s retries: 36 compose-1.29.2/tests/fixtures/extends/healthcheck-2.yml000066400000000000000000000001401404620552300230600ustar00rootroot00000000000000version: '2.1' services: demo: extends: file: healthcheck-1.yml service: demo compose-1.29.2/tests/fixtures/extends/invalid-links.yml000066400000000000000000000001751404620552300232320ustar00rootroot00000000000000mydb: build: '.' myweb: build: '.' extends: service: web command: top web: build: '.' links: - "mydb:db" compose-1.29.2/tests/fixtures/extends/invalid-net-v2.yml000066400000000000000000000002541404620552300232230ustar00rootroot00000000000000version: "2" services: myweb: build: '.' extends: service: web command: top web: build: '.' network_mode: "service:net" net: build: '.' compose-1.29.2/tests/fixtures/extends/invalid-net.yml000066400000000000000000000001471404620552300226770ustar00rootroot00000000000000myweb: build: '.' extends: service: web command: top web: build: '.' net: "container:db" compose-1.29.2/tests/fixtures/extends/invalid-volumes.yml000066400000000000000000000001541404620552300236010ustar00rootroot00000000000000myweb: build: '.' extends: service: web command: top web: build: '.' volumes_from: - "db" compose-1.29.2/tests/fixtures/extends/nested-intermediate.yml000066400000000000000000000001371404620552300244160ustar00rootroot00000000000000webintermediate: extends: file: common.yml service: web environment: - "FOO=2" compose-1.29.2/tests/fixtures/extends/nested.yml000066400000000000000000000001561404620552300217470ustar00rootroot00000000000000myweb: extends: file: nested-intermediate.yml service: webintermediate environment: - "BAR=2" compose-1.29.2/tests/fixtures/extends/no-file-specified.yml000066400000000000000000000001631404620552300237450ustar00rootroot00000000000000myweb: extends: service: web environment: - "BAR=1" web: image: busybox environment: - "BAZ=3" compose-1.29.2/tests/fixtures/extends/nonexistent-path-base.yml000066400000000000000000000001371404620552300247040ustar00rootroot00000000000000dnebase: build: nonexistent.path command: /bin/true environment: - FOO=1 - BAR=1 compose-1.29.2/tests/fixtures/extends/nonexistent-path-child.yml000066400000000000000000000002171404620552300250540ustar00rootroot00000000000000dnechild: extends: file: nonexistent-path-base.yml service: dnebase image: busybox command: /bin/true environment: - BAR=2 compose-1.29.2/tests/fixtures/extends/nonexistent-service.yml000066400000000000000000000000621404620552300244750ustar00rootroot00000000000000web: image: busybox extends: service: foo compose-1.29.2/tests/fixtures/extends/service-with-invalid-schema.yml000066400000000000000000000001111404620552300257470ustar00rootroot00000000000000myweb: extends: file: valid-composite-extends.yml service: web compose-1.29.2/tests/fixtures/extends/service-with-valid-composite-extends.yml000066400000000000000000000001301404620552300276330ustar00rootroot00000000000000myweb: build: '.' extends: file: 'valid-composite-extends.yml' service: web compose-1.29.2/tests/fixtures/extends/specify-file-as-self.yml000066400000000000000000000004221404620552300243700ustar00rootroot00000000000000myweb: extends: file: specify-file-as-self.yml service: web environment: - "BAR=1" web: extends: file: specify-file-as-self.yml service: otherweb image: busybox environment: - "BAZ=3" otherweb: image: busybox environment: - "YEP=1" compose-1.29.2/tests/fixtures/extends/valid-common-config.yml000066400000000000000000000001441404620552300243120ustar00rootroot00000000000000myweb: build: '.' extends: file: valid-common.yml service: common-config command: top compose-1.29.2/tests/fixtures/extends/valid-common.yml000066400000000000000000000000521404620552300230450ustar00rootroot00000000000000common-config: environment: - FOO=1 compose-1.29.2/tests/fixtures/extends/valid-composite-extends.yml000066400000000000000000000000241404620552300252260ustar00rootroot00000000000000web: command: top compose-1.29.2/tests/fixtures/extends/valid-interpolation-2.yml000066400000000000000000000000671404620552300246110ustar00rootroot00000000000000web: build: '.' hostname: "host-${HOSTNAME_VALUE}" compose-1.29.2/tests/fixtures/extends/valid-interpolation.yml000066400000000000000000000001261404620552300244460ustar00rootroot00000000000000myweb: extends: service: web file: valid-interpolation-2.yml command: top compose-1.29.2/tests/fixtures/extends/verbose-and-shorthand.yml000066400000000000000000000002611404620552300246570ustar00rootroot00000000000000base: image: busybox environment: - "BAR=1" verbose: extends: service: base environment: - "FOO=1" shorthand: extends: base environment: - "FOO=2" compose-1.29.2/tests/fixtures/flag-as-service-name/000077500000000000000000000000001404620552300221545ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/flag-as-service-name/Dockerfile000066400000000000000000000001201404620552300241370ustar00rootroot00000000000000FROM busybox:1.27.2 LABEL com.docker.compose.test_image=true CMD echo "success" compose-1.29.2/tests/fixtures/flag-as-service-name/docker-compose.yml000066400000000000000000000003501404620552300256070ustar00rootroot00000000000000version: "2" services: --test-service: image: busybox:1.27.0.2 build: . command: top ports: - "8080:80" --log-service: image: busybox:1.31.0-uclibc command: sh -c "echo hello && tail -f /dev/null" compose-1.29.2/tests/fixtures/healthcheck/000077500000000000000000000000001404620552300205315ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/healthcheck/docker-compose.yml000066400000000000000000000006021404620552300241640ustar00rootroot00000000000000version: "3" services: passes: image: busybox command: top healthcheck: test: "/bin/true" interval: 1s timeout: 30m retries: 1 fails: image: busybox command: top healthcheck: test: ["CMD", "/bin/false"] interval: 2.5s retries: 2 disabled: image: busybox command: top healthcheck: disable: true compose-1.29.2/tests/fixtures/images-service-tag/000077500000000000000000000000001404620552300217425ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/images-service-tag/Dockerfile000066400000000000000000000000521404620552300237310ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc RUN touch /foo compose-1.29.2/tests/fixtures/images-service-tag/docker-compose.yml000066400000000000000000000002301404620552300253720ustar00rootroot00000000000000version: "2.4" services: foo1: build: . image: test:dev foo2: build: . image: test:prod foo3: build: . image: test:latest compose-1.29.2/tests/fixtures/invalid-composefile/000077500000000000000000000000001404620552300222175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/invalid-composefile/invalid.yml000066400000000000000000000000621404620552300243660ustar00rootroot00000000000000 notaservice: oops web: image: 'alpine:edge' compose-1.29.2/tests/fixtures/ipc-mode/000077500000000000000000000000001404620552300177635ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/ipc-mode/docker-compose.yml000066400000000000000000000004211404620552300234150ustar00rootroot00000000000000version: "2.4" services: service: image: busybox command: top ipc: "service:shareable" container: image: busybox command: top ipc: "container:composetest_ipc_mode_container" shareable: image: busybox command: top ipc: shareable compose-1.29.2/tests/fixtures/links-composefile/000077500000000000000000000000001404620552300217115ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/links-composefile/docker-compose.yml000066400000000000000000000002341404620552300253450ustar00rootroot00000000000000db: image: busybox:1.27.2 command: top web: image: busybox:1.27.2 command: top links: - db:db console: image: busybox:1.27.2 command: top compose-1.29.2/tests/fixtures/logging-composefile-legacy/000077500000000000000000000000001404620552300234615ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/logging-composefile-legacy/docker-compose.yml000066400000000000000000000002731404620552300271200ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top log_driver: "none" another: image: busybox:1.31.0-uclibc command: top log_driver: "json-file" log_opt: max-size: "10m" compose-1.29.2/tests/fixtures/logging-composefile/000077500000000000000000000000001404620552300222175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/logging-composefile/docker-compose.yml000066400000000000000000000004021404620552300256500ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.31.0-uclibc command: top logging: driver: "none" another: image: busybox:1.31.0-uclibc command: top logging: driver: "json-file" options: max-size: "10m" compose-1.29.2/tests/fixtures/logs-composefile/000077500000000000000000000000001404620552300215355ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/logs-composefile/docker-compose.yml000066400000000000000000000002651404620552300251750ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: sh -c "sleep 1 && echo hello && tail -f /dev/null" another: image: busybox:1.31.0-uclibc command: sh -c "sleep 1 && echo test" compose-1.29.2/tests/fixtures/logs-restart-composefile/000077500000000000000000000000001404620552300232175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/logs-restart-composefile/docker-compose.yml000066400000000000000000000003231404620552300266520ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: sh -c "echo hello && tail -f /dev/null" another: image: busybox:1.31.0-uclibc command: sh -c "sleep 2 && echo world && /bin/false" restart: "on-failure:2" compose-1.29.2/tests/fixtures/logs-tail-composefile/000077500000000000000000000000001404620552300224645ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/logs-tail-composefile/docker-compose.yml000066400000000000000000000001371404620552300261220ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: sh -c "echo w && echo x && echo y && echo z" compose-1.29.2/tests/fixtures/longer-filename-composefile/000077500000000000000000000000001404620552300236355ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/longer-filename-composefile/docker-compose.yaml000066400000000000000000000001031404620552300274250ustar00rootroot00000000000000definedinyamlnotyml: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/multiple-composefiles/000077500000000000000000000000001404620552300226075ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/multiple-composefiles/compose2.yml000066400000000000000000000000721404620552300250600ustar00rootroot00000000000000yetanother: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/multiple-composefiles/docker-compose.yml000066400000000000000000000001551404620552300262450ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/net-container/000077500000000000000000000000001404620552300210345ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/net-container/docker-compose.yml000066400000000000000000000001411404620552300244650ustar00rootroot00000000000000foo: image: busybox command: top net: "container:bar" bar: image: busybox command: top compose-1.29.2/tests/fixtures/net-container/v2-invalid.yml000066400000000000000000000002071404620552300235310ustar00rootroot00000000000000version: "2" services: foo: image: busybox command: top bar: image: busybox command: top net: "container:foo" compose-1.29.2/tests/fixtures/networks/000077500000000000000000000000001404620552300201425ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/networks/bridge.yml000066400000000000000000000001601404620552300221160ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: - bridge - default compose-1.29.2/tests/fixtures/networks/default-network-config.yml000066400000000000000000000003771404620552300252520ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: top networks: default: driver: bridge driver_opts: "com.docker.network.bridge.enable_icc": "false" compose-1.29.2/tests/fixtures/networks/docker-compose.yml000066400000000000000000000004741404620552300236040ustar00rootroot00000000000000version: "2" services: web: image: alpine:3.10.1 command: top networks: ["front"] app: image: alpine:3.10.1 command: top networks: ["front", "back"] links: - "db:database" db: image: alpine:3.10.1 command: top networks: ["back"] networks: front: {} back: {} compose-1.29.2/tests/fixtures/networks/external-default.yml000066400000000000000000000003341404620552300241310ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.31.0-uclibc command: top another: image: busybox:1.31.0-uclibc command: top networks: default: external: name: composetest_external_network compose-1.29.2/tests/fixtures/networks/external-networks-v3-5.yml000066400000000000000000000003151404620552300250500ustar00rootroot00000000000000version: "3.5" services: web: image: busybox command: top networks: - foo - bar networks: foo: external: true name: some_foo bar: external: name: some_bar compose-1.29.2/tests/fixtures/networks/external-networks.yml000066400000000000000000000003161404620552300243610ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: - networks_foo - bar networks: networks_foo: external: true bar: external: name: networks_bar compose-1.29.2/tests/fixtures/networks/missing-network.yml000066400000000000000000000001561404620552300240270ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: ["foo"] networks: bar: {} compose-1.29.2/tests/fixtures/networks/network-aliases.yml000066400000000000000000000003121404620552300237710ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: front: aliases: - forward_facing - ahead back: networks: front: {} back: {} compose-1.29.2/tests/fixtures/networks/network-internal.yml000077500000000000000000000002371404620552300241750ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: - internal networks: internal: driver: bridge internal: True compose-1.29.2/tests/fixtures/networks/network-label.yml000066400000000000000000000002711404620552300234330ustar00rootroot00000000000000version: "2.1" services: web: image: busybox command: top networks: - network_with_label networks: network_with_label: labels: - "label_key=label_val" compose-1.29.2/tests/fixtures/networks/network-mode.yml000066400000000000000000000006551404620552300233060ustar00rootroot00000000000000version: "2" services: bridge: image: busybox command: top network_mode: bridge service: image: busybox command: top network_mode: "service:bridge" container: image: busybox command: top network_mode: "container:composetest_network_mode_container" host: image: busybox command: top network_mode: host none: image: busybox command: top network_mode: none compose-1.29.2/tests/fixtures/networks/network-static-addresses.yml000077500000000000000000000006751404620552300256310ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top networks: static_test: ipv4_address: 172.16.100.100 ipv6_address: fe80::1001:100 networks: static_test: driver: bridge driver_opts: com.docker.network.enable_ipv6: "true" ipam: driver: default config: - subnet: 172.16.100.0/24 gateway: 172.16.100.1 - subnet: fe80::/64 gateway: fe80::1001:1 compose-1.29.2/tests/fixtures/no-build/000077500000000000000000000000001404620552300177775ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/no-build/docker-compose.yml000066400000000000000000000004121404620552300234310ustar00rootroot00000000000000version: "3" services: my-alpine: image: alpine:3.12 container_name: alpine entrypoint: 'echo It works!' build: context: /this/path/doesnt/exist # and we don't really care. We just want to run containers already pulled. compose-1.29.2/tests/fixtures/no-composefile/000077500000000000000000000000001404620552300212055ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/no-composefile/.gitignore000066400000000000000000000000001404620552300231630ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/no-links-composefile/000077500000000000000000000000001404620552300223235ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/no-links-composefile/docker-compose.yml000066400000000000000000000002341404620552300257570ustar00rootroot00000000000000db: image: busybox:1.31.0-uclibc command: top web: image: busybox:1.31.0-uclibc command: top console: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/no-services/000077500000000000000000000000001404620552300205235ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/no-services/docker-compose.yml000066400000000000000000000000541404620552300241570ustar00rootroot00000000000000version: "2" networks: foo: {} bar: {} compose-1.29.2/tests/fixtures/override-files/000077500000000000000000000000001404620552300212055ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/override-files/docker-compose.override.yml000066400000000000000000000001151404620552300264550ustar00rootroot00000000000000version: '2.2' services: web: command: "top" db: command: "top" compose-1.29.2/tests/fixtures/override-files/docker-compose.yml000066400000000000000000000002671404620552300246470ustar00rootroot00000000000000version: '2.2' services: web: image: busybox:1.31.0-uclibc command: "sleep 200" depends_on: - db db: image: busybox:1.31.0-uclibc command: "sleep 200" compose-1.29.2/tests/fixtures/override-files/extra.yml000066400000000000000000000002131404620552300230470ustar00rootroot00000000000000version: '2.2' services: web: depends_on: - db - other other: image: busybox:1.31.0-uclibc command: "top" compose-1.29.2/tests/fixtures/override-yaml-files/000077500000000000000000000000001404620552300221455ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/override-yaml-files/docker-compose.override.yaml000066400000000000000000000000301404620552300275520ustar00rootroot00000000000000 db: command: "top" compose-1.29.2/tests/fixtures/override-yaml-files/docker-compose.yml000066400000000000000000000002271404620552300256030ustar00rootroot00000000000000 web: image: busybox:1.31.0-uclibc command: "sleep 100" links: - db db: image: busybox:1.31.0-uclibc command: "sleep 200" compose-1.29.2/tests/fixtures/pid-mode/000077500000000000000000000000001404620552300177645ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/pid-mode/docker-compose.yml000066400000000000000000000004071404620552300234220ustar00rootroot00000000000000version: "2.2" services: service: image: busybox command: top pid: "service:container" container: image: busybox command: top pid: "container:composetest_pid_mode_container" host: image: busybox command: top pid: host compose-1.29.2/tests/fixtures/ports-composefile-scale/000077500000000000000000000000001404620552300230255ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/ports-composefile-scale/docker-compose.yml000066400000000000000000000001301404620552300264540ustar00rootroot00000000000000 simple: image: busybox:1.31.0-uclibc command: /bin/sleep 300 ports: - '3000' compose-1.29.2/tests/fixtures/ports-composefile/000077500000000000000000000000001404620552300217405ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/ports-composefile/docker-compose.yml000066400000000000000000000001761404620552300254010ustar00rootroot00000000000000 simple: image: busybox:1.31.0-uclibc command: top ports: - '3000' - '49152:3001' - '49153-49154:3002-3003' compose-1.29.2/tests/fixtures/ports-composefile/expanded-notation.yml000066400000000000000000000005051404620552300261040ustar00rootroot00000000000000version: '3.2' services: simple: image: busybox:1.31.0-uclibc command: top ports: - target: 3000 - target: 3001 published: 53222 - target: 3002 published: 53223 protocol: tcp - target: 3003 published: 53224 protocol: udp compose-1.29.2/tests/fixtures/profiles/000077500000000000000000000000001404620552300201115ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/profiles/docker-compose.yml000066400000000000000000000005011404620552300235420ustar00rootroot00000000000000version: "3" services: foo: image: busybox:1.31.0-uclibc bar: image: busybox:1.31.0-uclibc profiles: - test baz: image: busybox:1.31.0-uclibc depends_on: - bar profiles: - test zot: image: busybox:1.31.0-uclibc depends_on: - bar profiles: - debug compose-1.29.2/tests/fixtures/profiles/merge-profiles.yml000066400000000000000000000000721404620552300235530ustar00rootroot00000000000000version: "3" services: bar: profiles: - debug compose-1.29.2/tests/fixtures/ps-services-filter/000077500000000000000000000000001404620552300220145ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/ps-services-filter/docker-compose.yml000066400000000000000000000001541404620552300254510ustar00rootroot00000000000000with_image: image: busybox:1.31.0-uclibc command: top with_build: build: ../build-ctx/ command: top compose-1.29.2/tests/fixtures/restart/000077500000000000000000000000001404620552300177525ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/restart/docker-compose.yml000066400000000000000000000004411404620552300234060ustar00rootroot00000000000000version: "2" services: never: image: busybox restart: "no" always: image: busybox restart: always on-failure: image: busybox restart: on-failure on-failure-5: image: busybox restart: "on-failure:5" restart-null: image: busybox restart: "" compose-1.29.2/tests/fixtures/run-labels/000077500000000000000000000000001404620552300203325ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/run-labels/docker-compose.yml000066400000000000000000000001401404620552300237620ustar00rootroot00000000000000service: image: busybox:1.31.0-uclibc command: top labels: foo: bar hello: world compose-1.29.2/tests/fixtures/run-workdir/000077500000000000000000000000001404620552300205515ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/run-workdir/docker-compose.yml000066400000000000000000000001211404620552300242000ustar00rootroot00000000000000service: image: busybox:1.31.0-uclibc working_dir: /etc command: /bin/true compose-1.29.2/tests/fixtures/scale/000077500000000000000000000000001404620552300173555ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/scale/docker-compose.yml000066400000000000000000000003341404620552300230120ustar00rootroot00000000000000version: '2.2' services: web: image: busybox command: top scale: 2 db: image: busybox command: top worker: image: busybox command: top scale: 0 compose-1.29.2/tests/fixtures/secrets/000077500000000000000000000000001404620552300177365ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/secrets/default000066400000000000000000000000231404620552300213000ustar00rootroot00000000000000This is the secret compose-1.29.2/tests/fixtures/simple-composefile-volume-ready/000077500000000000000000000000001404620552300244715ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/simple-composefile-volume-ready/docker-compose.merge.yml000066400000000000000000000002031404620552300312170ustar00rootroot00000000000000version: '2.2' services: simple: image: busybox:1.31.0-uclibc volumes: - datastore:/data1 volumes: datastore: compose-1.29.2/tests/fixtures/simple-composefile-volume-ready/docker-compose.yml000066400000000000000000000000471404620552300301270ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc compose-1.29.2/tests/fixtures/simple-composefile-volume-ready/files/000077500000000000000000000000001404620552300255735ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/simple-composefile-volume-ready/files/example.txt000066400000000000000000000000151404620552300277630ustar00rootroot00000000000000FILE_CONTENT compose-1.29.2/tests/fixtures/simple-composefile/000077500000000000000000000000001404620552300220625ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/simple-composefile/can-build-pull-failures.yml000066400000000000000000000001641404620552300272260ustar00rootroot00000000000000version: '3' services: can_build: image: nonexisting-image-but-can-build:latest build: . command: top compose-1.29.2/tests/fixtures/simple-composefile/digest.yml000066400000000000000000000002461404620552300240660ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top digest: image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d command: top compose-1.29.2/tests/fixtures/simple-composefile/docker-compose.yml000066400000000000000000000001461404620552300255200ustar00rootroot00000000000000simple: image: busybox:1.27.2 command: top another: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/simple-composefile/ignore-pull-failures.yml000066400000000000000000000001601404620552300266470ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: top another: image: nonexisting-image:latest command: top compose-1.29.2/tests/fixtures/simple-composefile/pull-with-build.yml000066400000000000000000000002651404620552300256320ustar00rootroot00000000000000version: "3" services: build_simple: image: simple build: . command: top from_simple: image: simple another: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/simple-dockerfile/000077500000000000000000000000001404620552300216645ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/simple-dockerfile/Dockerfile000066400000000000000000000001201404620552300236470ustar00rootroot00000000000000FROM busybox:1.27.2 LABEL com.docker.compose.test_image=true CMD echo "success" compose-1.29.2/tests/fixtures/simple-dockerfile/docker-compose.yml000066400000000000000000000000231404620552300253140ustar00rootroot00000000000000simple: build: . compose-1.29.2/tests/fixtures/simple-failing-dockerfile/000077500000000000000000000000001404620552300232735ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/simple-failing-dockerfile/Dockerfile000066400000000000000000000004671404620552300252740ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc LABEL com.docker.compose.test_image=true LABEL com.docker.compose.test_failing_image=true # With the following label the container will be cleaned up automatically # Must be kept in sync with LABEL_PROJECT from compose/const.py LABEL com.docker.compose.project=composetest RUN exit 1 compose-1.29.2/tests/fixtures/simple-failing-dockerfile/docker-compose.yml000066400000000000000000000000231404620552300267230ustar00rootroot00000000000000simple: build: . compose-1.29.2/tests/fixtures/sleeps-composefile/000077500000000000000000000000001404620552300220645ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/sleeps-composefile/docker-compose.yml000066400000000000000000000002361404620552300255220ustar00rootroot00000000000000 version: "2" services: simple: image: busybox:1.31.0-uclibc command: sleep 200 another: image: busybox:1.31.0-uclibc command: sleep 200 compose-1.29.2/tests/fixtures/stop-signal-composefile/000077500000000000000000000000001404620552300230315ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/stop-signal-composefile/docker-compose.yml000066400000000000000000000002761404620552300264730ustar00rootroot00000000000000simple: image: busybox:1.31.0-uclibc command: - sh - '-c' - | trap 'exit 0' SIGINT trap 'exit 1' SIGTERM while true; do :; done stop_signal: SIGINT compose-1.29.2/tests/fixtures/tagless-image/000077500000000000000000000000001404620552300210105ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/tagless-image/Dockerfile000066400000000000000000000000531404620552300230000ustar00rootroot00000000000000FROM busybox:1.31.0-uclibc RUN touch /blah compose-1.29.2/tests/fixtures/tagless-image/docker-compose.yml000066400000000000000000000001101404620552300244350ustar00rootroot00000000000000version: '2.3' services: foo: image: ${IMAGE_ID} command: top compose-1.29.2/tests/fixtures/tls/000077500000000000000000000000001404620552300170705ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/tls/ca.pem000066400000000000000000000000001404620552300201440ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/tls/cert.pem000066400000000000000000000000001404620552300205160ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/tls/key.pem000066400000000000000000000000001404620552300203510ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/top/000077500000000000000000000000001404620552300170705ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/top/docker-compose.yml000066400000000000000000000001621404620552300225240ustar00rootroot00000000000000service_a: image: busybox:1.31.0-uclibc command: top service_b: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/unicode-environment/000077500000000000000000000000001404620552300222565ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/unicode-environment/docker-compose.yml000066400000000000000000000002051404620552300257100ustar00rootroot00000000000000version: '2' services: simple: image: busybox:1.31.0-uclibc command: sh -c 'echo $$FOO' environment: FOO: ${BAR} compose-1.29.2/tests/fixtures/user-composefile/000077500000000000000000000000001404620552300215475ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/user-composefile/docker-compose.yml000066400000000000000000000001071404620552300252020ustar00rootroot00000000000000service: image: busybox:1.31.0-uclibc user: notauser command: id compose-1.29.2/tests/fixtures/v1-config/000077500000000000000000000000001404620552300200575ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v1-config/docker-compose.yml000066400000000000000000000002161404620552300235130ustar00rootroot00000000000000net: image: busybox volume: image: busybox volumes: - /data app: image: busybox net: "container:net" volumes_from: ["volume"] compose-1.29.2/tests/fixtures/v2-dependencies/000077500000000000000000000000001404620552300212415ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v2-dependencies/docker-compose.yml000066400000000000000000000003701404620552300246760ustar00rootroot00000000000000version: "2.0" services: db: image: busybox:1.31.0-uclibc command: top web: image: busybox:1.31.0-uclibc command: top depends_on: - db console: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/v2-full/000077500000000000000000000000001404620552300175555ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v2-full/Dockerfile000066400000000000000000000000711404620552300215450ustar00rootroot00000000000000 FROM busybox:1.31.0-uclibc RUN echo something CMD top compose-1.29.2/tests/fixtures/v2-full/docker-compose.yml000066400000000000000000000004131404620552300232100ustar00rootroot00000000000000 version: "2" volumes: data: driver: local networks: front: {} services: web: build: . networks: - front - default volumes_from: - other other: image: busybox:1.31.0-uclibc command: top volumes: - /data compose-1.29.2/tests/fixtures/v2-simple/000077500000000000000000000000001404620552300201045ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v2-simple/docker-compose.yml000066400000000000000000000002021404620552300235330ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.27.2 command: top another: image: busybox:1.27.2 command: top compose-1.29.2/tests/fixtures/v2-simple/links-invalid.yml000066400000000000000000000002531404620552300233730ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.31.0-uclibc command: top links: - another another: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/v2-simple/one-container.yml000066400000000000000000000001231404620552300233640ustar00rootroot00000000000000version: "2" services: simple: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/v2-unhealthy-dependencies/000077500000000000000000000000001404620552300232405ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v2-unhealthy-dependencies/docker-compose.yml000066400000000000000000000006001404620552300266710ustar00rootroot00000000000000version: "2.1" services: db: image: busybox:1.31.0-uclibc command: top healthcheck: test: exit 1 interval: 1s timeout: 1s retries: 1 web: image: busybox:1.31.0-uclibc command: top depends_on: db: condition: service_healthy console: image: busybox:1.31.0-uclibc command: top compose-1.29.2/tests/fixtures/v3-full/000077500000000000000000000000001404620552300175565ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/v3-full/docker-compose.yml000066400000000000000000000023601404620552300232140ustar00rootroot00000000000000version: "3.5" services: web: image: busybox deploy: mode: replicated replicas: 6 labels: [FOO=BAR] update_config: parallelism: 3 delay: 10s failure_action: continue monitor: 60s max_failure_ratio: 0.3 resources: limits: cpus: 0.05 memory: 50M reservations: cpus: 0.01 memory: 20M restart_policy: condition: on-failure delay: 5s max_attempts: 3 window: 120s placement: constraints: - node.hostname==foo - node.role != manager preferences: - spread: node.labels.datacenter healthcheck: test: cat /etc/passwd interval: 10s timeout: 1s retries: 5 volumes: - source: /host/path target: /container/path type: bind read_only: true - source: foobar type: volume target: /container/volumepath - type: volume target: /anonymous - type: volume source: foobar target: /container/volumepath2 volume: nocopy: true stop_grace_period: 20s volumes: foobar: labels: com.docker.compose.test: 'true' compose-1.29.2/tests/fixtures/volume-path-interpolation/000077500000000000000000000000001404620552300234145ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volume-path-interpolation/docker-compose.yml000066400000000000000000000001321404620552300270450ustar00rootroot00000000000000test: image: busybox command: top volumes: - "~/${VOLUME_NAME}:/container-path" compose-1.29.2/tests/fixtures/volume-path/000077500000000000000000000000001404620552300205275ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volume-path/common/000077500000000000000000000000001404620552300220175ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volume-path/common/services.yml000066400000000000000000000001021404620552300243560ustar00rootroot00000000000000db: image: busybox volumes: - ./foo:/foo - ./bar:/bar compose-1.29.2/tests/fixtures/volume-path/docker-compose.yml000066400000000000000000000001311404620552300241570ustar00rootroot00000000000000db: extends: file: common/services.yml service: db volumes: - ./bar:/bar compose-1.29.2/tests/fixtures/volume/000077500000000000000000000000001404620552300175755ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volume/docker-compose.yml000066400000000000000000000002541404620552300232330ustar00rootroot00000000000000version: '2' services: test: image: busybox command: top volumes: - /container-path - testvolume:/container-named-path volumes: testvolume: {} compose-1.29.2/tests/fixtures/volumes-from-container/000077500000000000000000000000001404620552300227015ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volumes-from-container/docker-compose.yml000066400000000000000000000001551404620552300263370ustar00rootroot00000000000000version: "2" services: test: image: busybox volumes_from: ["container:composetest_data_container"] compose-1.29.2/tests/fixtures/volumes/000077500000000000000000000000001404620552300177605ustar00rootroot00000000000000compose-1.29.2/tests/fixtures/volumes/docker-compose.yml000066400000000000000000000000341404620552300234120ustar00rootroot00000000000000version: '2.1' services: {} compose-1.29.2/tests/fixtures/volumes/external-volumes-v2-x.yml000066400000000000000000000003331404620552300246060ustar00rootroot00000000000000version: "2.1" services: web: image: busybox command: top volumes: - foo:/var/lib/ - bar:/etc/ volumes: foo: external: true name: some_foo bar: external: name: some_bar compose-1.29.2/tests/fixtures/volumes/external-volumes-v2.yml000066400000000000000000000003061404620552300243410ustar00rootroot00000000000000version: "2" services: web: image: busybox command: top volumes: - foo:/var/lib/ - bar:/etc/ volumes: foo: external: true bar: external: name: some_bar compose-1.29.2/tests/fixtures/volumes/external-volumes-v3-4.yml000066400000000000000000000003331404620552300245030ustar00rootroot00000000000000version: "3.4" services: web: image: busybox command: top volumes: - foo:/var/lib/ - bar:/etc/ volumes: foo: external: true name: some_foo bar: external: name: some_bar compose-1.29.2/tests/fixtures/volumes/external-volumes-v3-x.yml000066400000000000000000000003101404620552300246020ustar00rootroot00000000000000version: "3.0" services: web: image: busybox command: top volumes: - foo:/var/lib/ - bar:/etc/ volumes: foo: external: true bar: external: name: some_bar compose-1.29.2/tests/fixtures/volumes/volume-label.yml000066400000000000000000000002731404620552300230710ustar00rootroot00000000000000version: "2.1" services: web: image: busybox command: top volumes: - volume_with_label:/data volumes: volume_with_label: labels: - "label_key=label_val" compose-1.29.2/tests/helpers.py000066400000000000000000000037021404620552300164330ustar00rootroot00000000000000import contextlib import os from compose.config.config import ConfigDetails from compose.config.config import ConfigFile from compose.config.config import load BUSYBOX_IMAGE_NAME = 'busybox' BUSYBOX_DEFAULT_TAG = '1.31.0-uclibc' BUSYBOX_IMAGE_WITH_TAG = '{}:{}'.format(BUSYBOX_IMAGE_NAME, BUSYBOX_DEFAULT_TAG) def build_config(contents, **kwargs): return load(build_config_details(contents, **kwargs)) def build_config_details(contents, working_dir='working_dir', filename='filename.yml'): return ConfigDetails( working_dir, [ConfigFile(filename, contents)], ) def create_custom_host_file(client, filename, content): dirname = os.path.dirname(filename) container = client.create_container( BUSYBOX_IMAGE_WITH_TAG, ['sh', '-c', 'echo -n "{}" > {}'.format(content, filename)], volumes={dirname: {}}, host_config=client.create_host_config( binds={dirname: {'bind': dirname, 'ro': False}}, network_mode='none', ), ) try: client.start(container) exitcode = client.wait(container)['StatusCode'] if exitcode != 0: output = client.logs(container) raise Exception( "Container exited with code {}:\n{}".format(exitcode, output)) container_info = client.inspect_container(container) if 'Node' in container_info: return container_info['Node']['Name'] finally: client.remove_container(container, force=True) def create_host_file(client, filename): with open(filename) as fh: content = fh.read() return create_custom_host_file(client, filename, content) @contextlib.contextmanager def cd(path): """ A context manager which changes the working directory to the given path, and then changes it back to its previous value on exit. """ prev_cwd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(prev_cwd) compose-1.29.2/tests/integration/000077500000000000000000000000001404620552300167405ustar00rootroot00000000000000compose-1.29.2/tests/integration/__init__.py000066400000000000000000000000001404620552300210370ustar00rootroot00000000000000compose-1.29.2/tests/integration/environment_test.py000066400000000000000000000066571404620552300227330ustar00rootroot00000000000000import tempfile import pytest from ddt import data from ddt import ddt from .. import mock from ..acceptance.cli_test import dispatch from compose.cli.command import get_project from compose.cli.command import project_from_options from compose.config.environment import Environment from compose.config.errors import EnvFileNotFound from tests.integration.testcases import DockerClientTestCase @ddt class EnvironmentTest(DockerClientTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.compose_file = tempfile.NamedTemporaryFile(mode='w+b') cls.compose_file.write(bytes("""version: '3.2' services: svc: image: busybox:1.31.0-uclibc environment: TEST_VARIABLE: ${TEST_VARIABLE}""", encoding='utf-8')) cls.compose_file.flush() @classmethod def tearDownClass(cls): super().tearDownClass() cls.compose_file.close() @data('events', 'exec', 'kill', 'logs', 'pause', 'ps', 'restart', 'rm', 'start', 'stop', 'top', 'unpause') def _test_no_warning_on_missing_host_environment_var_on_silent_commands(self, cmd): options = {'COMMAND': cmd, '--file': [EnvironmentTest.compose_file.name]} with mock.patch('compose.config.environment.log') as fake_log: # Note that the warning silencing and the env variables check is # done in `project_from_options` # So no need to have a proper options map, the `COMMAND` key is enough project_from_options('.', options) assert fake_log.warn.call_count == 0 class EnvironmentOverrideFileTest(DockerClientTestCase): def test_env_file_override(self): base_dir = 'tests/fixtures/env-file-override' # '--env-file' are relative to the current working dir env = Environment.from_env_file(base_dir, base_dir+'/.env.override') dispatch(base_dir, ['--env-file', '.env.override', 'up']) project = get_project(project_dir=base_dir, config_path=['docker-compose.yml'], environment=env, override_dir=base_dir) containers = project.containers(stopped=True) assert len(containers) == 1 assert "WHEREAMI=override" in containers[0].get('Config.Env') assert "DEFAULT_CONF_LOADED=true" in containers[0].get('Config.Env') dispatch(base_dir, ['--env-file', '.env.override', 'down'], None) def test_env_file_not_found_error(self): base_dir = 'tests/fixtures/env-file-override' with pytest.raises(EnvFileNotFound) as excinfo: Environment.from_env_file(base_dir, '.env.override') assert "Couldn't find env file" in excinfo.exconly() def test_dot_env_file(self): base_dir = 'tests/fixtures/env-file-override' # '.env' is relative to the project_dir (base_dir) env = Environment.from_env_file(base_dir, None) dispatch(base_dir, ['up']) project = get_project(project_dir=base_dir, config_path=['docker-compose.yml'], environment=env, override_dir=base_dir) containers = project.containers(stopped=True) assert len(containers) == 1 assert "WHEREAMI=default" in containers[0].get('Config.Env') dispatch(base_dir, ['down'], None) compose-1.29.2/tests/integration/metrics_test.py000066400000000000000000000110141404620552300220140ustar00rootroot00000000000000import logging import os import socket from http.server import BaseHTTPRequestHandler from http.server import HTTPServer from threading import Thread import requests from docker.transport import UnixHTTPAdapter from tests.acceptance.cli_test import dispatch from tests.integration.testcases import DockerClientTestCase TEST_SOCKET_FILE = '/tmp/test-metrics-docker-cli.sock' class MetricsTest(DockerClientTestCase): test_session = requests.sessions.Session() test_env = None base_dir = 'tests/fixtures/v3-full' @classmethod def setUpClass(cls): super().setUpClass() MetricsTest.test_session.mount("http+unix://", UnixHTTPAdapter(TEST_SOCKET_FILE)) MetricsTest.test_env = os.environ.copy() MetricsTest.test_env['METRICS_SOCKET_FILE'] = TEST_SOCKET_FILE MetricsServer().start() @classmethod def test_metrics_help(cls): # root `docker-compose` command is considered as a `--help` dispatch(cls.base_dir, [], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose --help", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['help', 'run'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose help", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['--help'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose --help", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['run', '--help'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose --help run", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['up', '--help', 'extra_args'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose --help up", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' @classmethod def test_metrics_simple_commands(cls): dispatch(cls.base_dir, ['ps'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose ps", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['version'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose version", "context": "moby", ' \ b'"source": "docker-compose", "status": "success"}' dispatch(cls.base_dir, ['version', '--yyy'], env=MetricsTest.test_env) assert cls.get_content() == \ b'{"command": "compose version", "context": "moby", ' \ b'"source": "docker-compose", "status": "failure"}' @staticmethod def get_content(): resp = MetricsTest.test_session.get("http+unix://localhost") print(resp.content) return resp.content def start_server(uri=TEST_SOCKET_FILE): try: os.remove(uri) except OSError: pass httpd = HTTPServer(uri, MetricsHTTPRequestHandler, False) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.bind(TEST_SOCKET_FILE) sock.listen(0) httpd.socket = sock print('Serving on ', uri) httpd.serve_forever() sock.shutdown(socket.SHUT_RDWR) sock.close() os.remove(uri) class MetricsServer: @classmethod def start(cls): t = Thread(target=start_server, daemon=True) t.start() class MetricsHTTPRequestHandler(BaseHTTPRequestHandler): usages = [] def do_GET(self): self.client_address = ('',) # avoid exception in BaseHTTPServer.py log_message() self.send_response(200) self.end_headers() for u in MetricsHTTPRequestHandler.usages: self.wfile.write(u) MetricsHTTPRequestHandler.usages = [] def do_POST(self): self.client_address = ('',) # avoid exception in BaseHTTPServer.py log_message() content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) print(body) MetricsHTTPRequestHandler.usages.append(body) self.send_response(200) self.end_headers() if __name__ == '__main__': logging.getLogger("urllib3").propagate = False logging.getLogger("requests").propagate = False start_server() compose-1.29.2/tests/integration/network_test.py000066400000000000000000000017721404620552300220510ustar00rootroot00000000000000import pytest from .testcases import DockerClientTestCase from compose.config.errors import ConfigurationError from compose.const import LABEL_NETWORK from compose.const import LABEL_PROJECT from compose.network import Network class NetworkTest(DockerClientTestCase): def test_network_default_labels(self): net = Network(self.client, 'composetest', 'foonet') net.ensure() net_data = net.inspect() labels = net_data['Labels'] assert labels[LABEL_NETWORK] == net.name assert labels[LABEL_PROJECT] == net.project def test_network_external_default_ensure(self): net = Network( self.client, 'composetest', 'foonet', external=True ) with pytest.raises(ConfigurationError): net.ensure() def test_network_external_overlay_ensure(self): net = Network( self.client, 'composetest', 'foonet', driver='overlay', external=True ) assert net.ensure() is None compose-1.29.2/tests/integration/project_test.py000066400000000000000000002152301404620552300220220ustar00rootroot00000000000000import copy import json import os import random import shutil import tempfile import pytest from docker.errors import APIError from docker.errors import NotFound from .. import mock from ..helpers import build_config as load_config from ..helpers import BUSYBOX_IMAGE_WITH_TAG from ..helpers import cd from ..helpers import create_host_file from .testcases import DockerClientTestCase from .testcases import SWARM_SKIP_CONTAINERS_ALL from compose.config import config from compose.config import ConfigurationError from compose.config import types from compose.config.types import VolumeFromSpec from compose.config.types import VolumeSpec from compose.const import COMPOSE_SPEC as VERSION from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.container import Container from compose.errors import CompletedUnsuccessfully from compose.errors import HealthCheckFailed from compose.errors import NoHealthCheckConfigured from compose.project import Project from compose.project import ProjectError from compose.service import ConvergenceStrategy from tests.integration.testcases import if_runtime_available from tests.integration.testcases import is_cluster from tests.integration.testcases import no_cluster def build_config(**kwargs): return config.Config( config_version=kwargs.get('version', VERSION), version=kwargs.get('version', VERSION), services=kwargs.get('services'), volumes=kwargs.get('volumes'), networks=kwargs.get('networks'), secrets=kwargs.get('secrets'), configs=kwargs.get('configs'), ) class ProjectTest(DockerClientTestCase): def test_containers(self): web = self.create_service('web') db = self.create_service('db') project = Project('composetest', [web, db], self.client) project.up() containers = project.containers() assert len(containers) == 2 @pytest.mark.skipif(SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug') def test_containers_stopped(self): web = self.create_service('web') db = self.create_service('db') project = Project('composetest', [web, db], self.client) project.up() assert len(project.containers()) == 2 assert len(project.containers(stopped=True)) == 2 project.stop() assert len(project.containers()) == 0 assert len(project.containers(stopped=True)) == 2 def test_containers_with_service_names(self): web = self.create_service('web') db = self.create_service('db') project = Project('composetest', [web, db], self.client) project.up() containers = project.containers(['web']) assert len(containers) == 1 assert containers[0].name.startswith('composetest_web_') def test_containers_with_extra_service(self): web = self.create_service('web') web_1 = web.create_container() db = self.create_service('db') db_1 = db.create_container() self.create_service('extra').create_container() project = Project('composetest', [web, db], self.client) assert set(project.containers(stopped=True)) == {web_1, db_1} def test_parallel_pull_with_no_image(self): config_data = build_config( services=[{ 'name': 'web', 'build': {'context': '.'}, }], ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.pull(parallel_pull=True) def test_volumes_from_service(self): project = Project.from_config( name='composetest', config_data=load_config({ 'data': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': ['/var/data'], }, 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': ['data'], }, }), client=self.client, ) db = project.get_service('db') data = project.get_service('data') assert db.volumes_from == [VolumeFromSpec(data, 'rw', 'service')] def test_volumes_from_container(self): data_container = Container.create( self.client, image=BUSYBOX_IMAGE_WITH_TAG, volumes=['/var/data'], name='composetest_data_container', labels={LABEL_PROJECT: 'composetest'}, host_config={}, ) project = Project.from_config( name='composetest', config_data=load_config({ 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': ['composetest_data_container'], }, }), client=self.client, ) db = project.get_service('db') assert db._get_volumes_from() == [data_container.id + ':rw'] @no_cluster('container networks not supported in Swarm') def test_network_mode_from_service(self): project = Project.from_config( name='composetest', client=self.client, config_data=load_config({ 'services': { 'net': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"] }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'network_mode': 'service:net', 'command': ["top"] }, }, }), ) project.up() web = project.get_service('web') net = project.get_service('net') assert web.network_mode.mode == 'container:' + net.containers()[0].id @no_cluster('container networks not supported in Swarm') def test_network_mode_from_container(self): def get_project(): return Project.from_config( name='composetest', config_data=load_config({ 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'network_mode': 'container:composetest_net_container' }, }, }), client=self.client, ) with pytest.raises(ConfigurationError) as excinfo: get_project() assert "container 'composetest_net_container' which does not exist" in excinfo.exconly() net_container = Container.create( self.client, image=BUSYBOX_IMAGE_WITH_TAG, name='composetest_net_container', command='top', labels={LABEL_PROJECT: 'composetest'}, host_config={}, ) net_container.start() project = get_project() project.up() web = project.get_service('web') assert web.network_mode.mode == 'container:' + net_container.id @no_cluster('container networks not supported in Swarm') def test_net_from_service_v1(self): project = Project.from_config( name='composetest', config_data=load_config({ 'net': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"] }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'net': 'container:net', 'command': ["top"] }, }), client=self.client, ) project.up() web = project.get_service('web') net = project.get_service('net') assert web.network_mode.mode == 'container:' + net.containers()[0].id @no_cluster('container networks not supported in Swarm') def test_net_from_container_v1(self): def get_project(): return Project.from_config( name='composetest', config_data=load_config({ 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'net': 'container:composetest_net_container' }, }), client=self.client, ) with pytest.raises(ConfigurationError) as excinfo: get_project() assert "container 'composetest_net_container' which does not exist" in excinfo.exconly() net_container = Container.create( self.client, image=BUSYBOX_IMAGE_WITH_TAG, name='composetest_net_container', command='top', labels={LABEL_PROJECT: 'composetest'}, host_config={}, ) net_container.start() project = get_project() project.up() web = project.get_service('web') assert web.network_mode.mode == 'container:' + net_container.id def test_start_pause_unpause_stop_kill_remove(self): web = self.create_service('web') db = self.create_service('db') project = Project('composetest', [web, db], self.client) project.start() assert len(web.containers()) == 0 assert len(db.containers()) == 0 web_container_1 = web.create_container() web_container_2 = web.create_container() db_container = db.create_container() project.start(service_names=['web']) assert {c.name for c in project.containers() if c.is_running} == { web_container_1.name, web_container_2.name} project.start() assert {c.name for c in project.containers() if c.is_running} == { web_container_1.name, web_container_2.name, db_container.name} project.pause(service_names=['web']) assert {c.name for c in project.containers() if c.is_paused} == { web_container_1.name, web_container_2.name} project.pause() assert {c.name for c in project.containers() if c.is_paused} == { web_container_1.name, web_container_2.name, db_container.name} project.unpause(service_names=['db']) assert len([c.name for c in project.containers() if c.is_paused]) == 2 project.unpause() assert len([c.name for c in project.containers() if c.is_paused]) == 0 project.stop(service_names=['web'], timeout=1) assert {c.name for c in project.containers() if c.is_running} == {db_container.name} project.kill(service_names=['db']) assert len([c for c in project.containers() if c.is_running]) == 0 assert len(project.containers(stopped=True)) == 3 project.remove_stopped(service_names=['web']) assert len(project.containers(stopped=True)) == 1 project.remove_stopped() assert len(project.containers(stopped=True)) == 0 def test_create(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) project = Project('composetest', [web, db], self.client) project.create(['db']) containers = project.containers(stopped=True) assert len(containers) == 1 assert not containers[0].is_running db_containers = db.containers(stopped=True) assert len(db_containers) == 1 assert not db_containers[0].is_running assert len(web.containers(stopped=True)) == 0 def test_create_twice(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) project = Project('composetest', [web, db], self.client) project.create(['db', 'web']) project.create(['db', 'web']) containers = project.containers(stopped=True) assert len(containers) == 2 db_containers = db.containers(stopped=True) assert len(db_containers) == 1 assert not db_containers[0].is_running web_containers = web.containers(stopped=True) assert len(web_containers) == 1 assert not web_containers[0].is_running def test_create_with_links(self): db = self.create_service('db') web = self.create_service('web', links=[(db, 'db')]) project = Project('composetest', [db, web], self.client) project.create(['web']) # self.assertEqual(len(project.containers()), 0) assert len(project.containers(stopped=True)) == 2 assert not [c for c in project.containers(stopped=True) if c.is_running] assert len(db.containers(stopped=True)) == 1 assert len(web.containers(stopped=True)) == 1 def test_create_strategy_always(self): db = self.create_service('db') project = Project('composetest', [db], self.client) project.create(['db']) old_id = project.containers(stopped=True)[0].id project.create(['db'], strategy=ConvergenceStrategy.always) assert len(project.containers(stopped=True)) == 1 db_container = project.containers(stopped=True)[0] assert not db_container.is_running assert db_container.id != old_id def test_create_strategy_never(self): db = self.create_service('db') project = Project('composetest', [db], self.client) project.create(['db']) old_id = project.containers(stopped=True)[0].id project.create(['db'], strategy=ConvergenceStrategy.never) assert len(project.containers(stopped=True)) == 1 db_container = project.containers(stopped=True)[0] assert not db_container.is_running assert db_container.id == old_id def test_project_up(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) project = Project('composetest', [web, db], self.client) project.start() assert len(project.containers()) == 0 project.up(['db']) assert len(project.containers()) == 1 assert len(db.containers()) == 1 assert len(web.containers()) == 0 def test_project_up_starts_uncreated_services(self): db = self.create_service('db') web = self.create_service('web', links=[(db, 'db')]) project = Project('composetest', [db, web], self.client) project.up(['db']) assert len(project.containers()) == 1 project.up() assert len(project.containers()) == 2 assert len(db.containers()) == 1 assert len(web.containers()) == 1 def test_recreate_preserves_volumes(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/etc')]) project = Project('composetest', [web, db], self.client) project.start() assert len(project.containers()) == 0 project.up(['db']) assert len(project.containers()) == 1 old_db_id = project.containers()[0].id db_volume_path = project.containers()[0].get('Volumes./etc') project.up(strategy=ConvergenceStrategy.always) assert len(project.containers()) == 2 db_container = [c for c in project.containers() if c.service == 'db'][0] assert db_container.id != old_db_id assert db_container.get('Volumes./etc') == db_volume_path def test_recreate_preserves_mounts(self): web = self.create_service('web') db = self.create_service('db', volumes=[types.MountSpec(type='volume', target='/etc')]) project = Project('composetest', [web, db], self.client) project.start() assert len(project.containers()) == 0 project.up(['db']) assert len(project.containers()) == 1 old_db_id = project.containers()[0].id db_volume_path = project.containers()[0].get_mount('/etc')['Source'] project.up(strategy=ConvergenceStrategy.always) assert len(project.containers()) == 2 db_container = [c for c in project.containers() if c.service == 'db'][0] assert db_container.id != old_db_id assert db_container.get_mount('/etc')['Source'] == db_volume_path def test_project_up_with_no_recreate_running(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) project = Project('composetest', [web, db], self.client) project.start() assert len(project.containers()) == 0 project.up(['db']) assert len(project.containers()) == 1 container, = project.containers() old_db_id = container.id db_volume_path = container.get_mount('/var/db')['Source'] project.up(strategy=ConvergenceStrategy.never) assert len(project.containers()) == 2 db_container = [c for c in project.containers() if c.name == container.name][0] assert db_container.id == old_db_id assert db_container.get_mount('/var/db')['Source'] == db_volume_path def test_project_up_with_no_recreate_stopped(self): web = self.create_service('web') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) project = Project('composetest', [web, db], self.client) project.start() assert len(project.containers()) == 0 project.up(['db']) project.kill() old_containers = project.containers(stopped=True) assert len(old_containers) == 1 old_container, = old_containers old_db_id = old_container.id db_volume_path = old_container.get_mount('/var/db')['Source'] project.up(strategy=ConvergenceStrategy.never) new_containers = project.containers(stopped=True) assert len(new_containers) == 2 assert [c.is_running for c in new_containers] == [True, True] db_container = [c for c in new_containers if c.service == 'db'][0] assert db_container.id == old_db_id assert db_container.get_mount('/var/db')['Source'] == db_volume_path def test_project_up_without_all_services(self): console = self.create_service('console') db = self.create_service('db') project = Project('composetest', [console, db], self.client) project.start() assert len(project.containers()) == 0 project.up() assert len(project.containers()) == 2 assert len(db.containers()) == 1 assert len(console.containers()) == 1 def test_project_up_starts_links(self): console = self.create_service('console') db = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) web = self.create_service('web', links=[(db, 'db')]) project = Project('composetest', [web, db, console], self.client) project.start() assert len(project.containers()) == 0 project.up(['web']) assert len(project.containers()) == 2 assert len(web.containers()) == 1 assert len(db.containers()) == 1 assert len(console.containers()) == 0 def test_project_up_starts_depends(self): project = Project.from_config( name='composetest', config_data=load_config({ 'console': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], }, 'data': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"] }, 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], 'volumes_from': ['data'], }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], 'links': ['db'], }, }), client=self.client, ) project.start() assert len(project.containers()) == 0 project.up(['web']) assert len(project.containers()) == 3 assert len(project.get_service('web').containers()) == 1 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('data').containers()) == 1 assert len(project.get_service('console').containers()) == 0 def test_project_up_with_no_deps(self): project = Project.from_config( name='composetest', config_data=load_config({ 'console': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], }, 'data': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"] }, 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], 'volumes_from': ['data'], }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': ["top"], 'links': ['db'], }, }), client=self.client, ) project.start() assert len(project.containers()) == 0 project.up(['db'], start_deps=False) assert len(project.containers(stopped=True)) == 2 assert len(project.get_service('web').containers()) == 0 assert len(project.get_service('db').containers()) == 1 assert len(project.get_service('data').containers(stopped=True)) == 1 assert not project.get_service('data').containers(stopped=True)[0].is_running assert len(project.get_service('console').containers()) == 0 def test_project_up_recreate_with_tmpfs_volume(self): # https://github.com/docker/compose/issues/4751 project = Project.from_config( name='composetest', config_data=load_config({ 'version': '2.1', 'services': { 'foo': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'tmpfs': ['/dev/shm'], 'volumes': ['/dev/shm'] } } }), client=self.client ) project.up() project.up(strategy=ConvergenceStrategy.always) def test_unscale_after_restart(self): web = self.create_service('web') project = Project('composetest', [web], self.client) project.start() service = project.get_service('web') service.scale(1) assert len(service.containers()) == 1 service.scale(3) assert len(service.containers()) == 3 project.up() service = project.get_service('web') assert len(service.containers()) == 1 service.scale(1) assert len(service.containers()) == 1 project.up(scale_override={'web': 3}) service = project.get_service('web') assert len(service.containers()) == 3 # does scale=0 ,makes any sense? after recreating at least 1 container is running service.scale(0) project.up() service = project.get_service('web') assert len(service.containers()) == 1 def test_project_up_networks(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'networks': { 'foo': None, 'bar': None, 'baz': {'aliases': ['extra']}, }, }], networks={ 'foo': {'driver': 'bridge'}, 'bar': {'driver': None}, 'baz': {}, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() containers = project.containers() assert len(containers) == 1 container, = containers for net_name in ['foo', 'bar', 'baz']: full_net_name = 'composetest_{}'.format(net_name) network_data = self.client.inspect_network(full_net_name) assert network_data['Name'] == full_net_name aliases_key = 'NetworkSettings.Networks.{net}.Aliases' assert 'web' in container.get(aliases_key.format(net='composetest_foo')) assert 'web' in container.get(aliases_key.format(net='composetest_baz')) assert 'extra' in container.get(aliases_key.format(net='composetest_baz')) foo_data = self.client.inspect_network('composetest_foo') assert foo_data['Driver'] == 'bridge' def test_up_with_ipam_config(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': {'front': None}, }], networks={ 'front': { 'driver': 'bridge', 'driver_opts': { "com.docker.network.bridge.enable_icc": "false", }, 'ipam': { 'driver': 'default', 'config': [{ "subnet": "172.28.0.0/16", "ip_range": "172.28.5.0/24", "gateway": "172.28.5.254", "aux_addresses": { "a": "172.28.1.5", "b": "172.28.1.6", "c": "172.28.1.7", }, }], }, }, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() network = self.client.networks(names=['composetest_front'])[0] assert network['Options'] == { "com.docker.network.bridge.enable_icc": "false" } assert network['IPAM'] == { 'Driver': 'default', 'Options': None, 'Config': [{ 'Subnet': "172.28.0.0/16", 'IPRange': "172.28.5.0/24", 'Gateway': "172.28.5.254", 'AuxiliaryAddresses': { 'a': '172.28.1.5', 'b': '172.28.1.6', 'c': '172.28.1.7', }, }], } def test_up_with_ipam_options(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': {'front': None}, }], networks={ 'front': { 'driver': 'bridge', 'ipam': { 'driver': 'default', 'options': { "com.docker.compose.network.test": "9-29-045" } }, }, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() network = self.client.networks(names=['composetest_front'])[0] assert network['IPAM']['Options'] == { "com.docker.compose.network.test": "9-29-045" } def test_up_with_network_static_addresses(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'networks': { 'static_test': { 'ipv4_address': '172.16.100.100', 'ipv6_address': 'fe80::1001:102' } }, }], networks={ 'static_test': { 'driver': 'bridge', 'driver_opts': { "com.docker.network.enable_ipv6": "true", }, 'ipam': { 'driver': 'default', 'config': [ {"subnet": "172.16.100.0/24", "gateway": "172.16.100.1"}, {"subnet": "fe80::/64", "gateway": "fe80::1001:1"} ] }, 'enable_ipv6': True, } } ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up(detached=True) service_container = project.get_service('web').containers()[0] ipam_config = (service_container.inspect().get('NetworkSettings', {}). get('Networks', {}).get('composetest_static_test', {}). get('IPAMConfig', {})) assert ipam_config.get('IPv4Address') == '172.16.100.100' assert ipam_config.get('IPv6Address') == 'fe80::1001:102' def test_up_with_network_priorities(self): mac_address = '74:6f:75:68:6f:75' def get_config_data(p1, p2, p3): return build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': { 'n1': { 'priority': p1, }, 'n2': { 'priority': p2, }, 'n3': { 'priority': p3, } }, 'command': 'top', 'mac_address': mac_address }], networks={ 'n1': {}, 'n2': {}, 'n3': {} } ) config1 = get_config_data(1000, 1, 1) config2 = get_config_data(2, 3, 1) config3 = get_config_data(5, 40, 100) project = Project.from_config( client=self.client, name='composetest', config_data=config1 ) project.up(detached=True) service_container = project.get_service('web').containers()[0] net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n1'] assert net_config['MacAddress'] == mac_address project = Project.from_config( client=self.client, name='composetest', config_data=config2 ) project.up(detached=True) service_container = project.get_service('web').containers()[0] net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n2'] assert net_config['MacAddress'] == mac_address project = Project.from_config( client=self.client, name='composetest', config_data=config3 ) project.up(detached=True) service_container = project.get_service('web').containers()[0] net_config = service_container.inspect()['NetworkSettings']['Networks']['composetest_n3'] assert net_config['MacAddress'] == mac_address def test_up_with_enable_ipv6(self): self.require_api_version('1.23') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'networks': { 'static_test': { 'ipv6_address': 'fe80::1001:102' } }, }], networks={ 'static_test': { 'driver': 'bridge', 'enable_ipv6': True, 'ipam': { 'driver': 'default', 'config': [ {"subnet": "fe80::/64", "gateway": "fe80::1001:1"} ] } } } ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up(detached=True) network = [n for n in self.client.networks() if 'static_test' in n['Name']][0] service_container = project.get_service('web').containers()[0] assert network['EnableIPv6'] is True ipam_config = (service_container.inspect().get('NetworkSettings', {}). get('Networks', {}).get('composetest_static_test', {}). get('IPAMConfig', {})) assert ipam_config.get('IPv6Address') == 'fe80::1001:102' def test_up_with_network_static_addresses_missing_subnet(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': { 'static_test': { 'ipv4_address': '172.16.100.100', 'ipv6_address': 'fe80::1001:101' } }, }], networks={ 'static_test': { 'driver': 'bridge', 'driver_opts': { "com.docker.network.enable_ipv6": "true", }, 'ipam': { 'driver': 'default', }, }, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) with pytest.raises(ProjectError): project.up() def test_up_with_network_link_local_ips(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': { 'linklocaltest': { 'link_local_ips': ['169.254.8.8'] } } }], networks={ 'linklocaltest': {'driver': 'bridge'} } ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up(detached=True) service_container = project.get_service('web').containers(stopped=True)[0] ipam_config = service_container.inspect().get( 'NetworkSettings', {} ).get( 'Networks', {} ).get( 'composetest_linklocaltest', {} ).get('IPAMConfig', {}) assert 'LinkLocalIPs' in ipam_config assert ipam_config['LinkLocalIPs'] == ['169.254.8.8'] def test_up_with_custom_name_resources(self): config_data = build_config( services=[{ 'name': 'web', 'volumes': [VolumeSpec.parse('foo:/container-path')], 'networks': {'foo': {}}, 'image': BUSYBOX_IMAGE_WITH_TAG }], networks={ 'foo': { 'name': 'zztop', 'labels': {'com.docker.compose.test_value': 'sharpdressedman'} } }, volumes={ 'foo': { 'name': 'acdc', 'labels': {'com.docker.compose.test_value': 'thefuror'} } } ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up(detached=True) network = [n for n in self.client.networks() if n['Name'] == 'zztop'][0] volume = [v for v in self.client.volumes()['Volumes'] if v['Name'] == 'acdc'][0] assert network['Labels']['com.docker.compose.test_value'] == 'sharpdressedman' assert volume['Labels']['com.docker.compose.test_value'] == 'thefuror' def test_up_with_isolation(self): self.require_api_version('1.24') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'isolation': 'default' }], ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up(detached=True) service_container = project.get_service('web').containers(stopped=True)[0] assert service_container.inspect()['HostConfig']['Isolation'] == 'default' def test_up_with_invalid_isolation(self): self.require_api_version('1.24') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'isolation': 'foobar' }], ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) with pytest.raises(ProjectError): project.up() @if_runtime_available('runc') def test_up_with_runtime(self): self.require_api_version('1.30') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'runtime': 'runc' }], ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up(detached=True) service_container = project.get_service('web').containers(stopped=True)[0] assert service_container.inspect()['HostConfig']['Runtime'] == 'runc' def test_up_with_invalid_runtime(self): self.require_api_version('1.30') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'runtime': 'foobar' }], ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) with pytest.raises(ProjectError): project.up() @if_runtime_available('nvidia') def test_up_with_nvidia_runtime(self): self.require_api_version('1.30') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'runtime': 'nvidia' }], ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up(detached=True) service_container = project.get_service('web').containers(stopped=True)[0] assert service_container.inspect()['HostConfig']['Runtime'] == 'nvidia' def test_project_up_with_network_internal(self): self.require_api_version('1.23') config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': {'internal': None}, }], networks={ 'internal': {'driver': 'bridge', 'internal': True}, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() network = self.client.networks(names=['composetest_internal'])[0] assert network['Internal'] is True def test_project_up_with_network_label(self): self.require_api_version('1.23') network_name = 'network_with_label' config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': {network_name: None} }], networks={ network_name: {'labels': {'label_key': 'label_val'}} } ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data ) project.up() networks = [ n for n in self.client.networks() if n['Name'].startswith('composetest_') ] assert [n['Name'] for n in networks] == ['composetest_{}'.format(network_name)] assert 'label_key' in networks[0]['Labels'] assert networks[0]['Labels']['label_key'] == 'label_val' def test_project_up_volumes(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {'driver': 'local'}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() assert len(project.containers()) == 1 volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' def test_project_up_with_volume_labels(self): self.require_api_version('1.23') volume_name = 'volume_with_label' config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': [VolumeSpec.parse('{}:/data'.format(volume_name))] }], volumes={ volume_name: { 'labels': { 'label_key': 'label_val' } } }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() volumes = [ v for v in self.client.volumes().get('Volumes', []) if v['Name'].split('/')[-1].startswith('composetest_') ] assert {v['Name'].split('/')[-1] for v in volumes} == { 'composetest_{}'.format(volume_name) } assert 'label_key' in volumes[0]['Labels'] assert volumes[0]['Labels']['label_key'] == 'label_val' def test_project_up_logging_with_multiple_files(self): base_file = config.ConfigFile( 'base.yml', { 'services': { 'simple': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'}, 'another': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'logging': { 'driver': "json-file", 'options': { 'max-size': "10m" } } } } }) override_file = config.ConfigFile( 'override.yml', { 'services': { 'another': { 'logging': { 'driver': "none" } } } }) details = config.ConfigDetails('.', [base_file, override_file]) tmpdir = tempfile.mkdtemp('logging_test') self.addCleanup(shutil.rmtree, tmpdir) with cd(tmpdir): config_data = config.load(details) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() containers = project.containers() assert len(containers) == 2 another = project.get_service('another').containers()[0] log_config = another.get('HostConfig.LogConfig') assert log_config assert log_config.get('Type') == 'none' def test_project_up_port_mappings_with_multiple_files(self): base_file = config.ConfigFile( 'base.yml', { 'services': { 'simple': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'ports': ['1234:1234'] }, }, }) override_file = config.ConfigFile( 'override.yml', { 'services': { 'simple': { 'ports': ['1234:1234'] } } }) details = config.ConfigDetails('.', [base_file, override_file]) config_data = config.load(details) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() containers = project.containers() assert len(containers) == 1 def test_project_up_config_scale(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'scale': 3 }] ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() assert len(project.containers()) == 3 project.up(scale_override={'web': 2}) assert len(project.containers()) == 2 project.up(scale_override={'web': 4}) assert len(project.containers()) == 4 project.stop() project.up() assert len(project.containers()) == 3 def test_project_up_scale_with_stopped_containers(self): config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'scale': 2 }] ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() containers = project.containers() assert len(containers) == 2 self.client.stop(containers[0].id) project.up(scale_override={'web': 2}) containers = project.containers() assert len(containers) == 2 self.client.stop(containers[0].id) project.up(scale_override={'web': 3}) assert len(project.containers()) == 3 self.client.stop(containers[0].id) project.up(scale_override={'web': 1}) assert len(project.containers()) == 1 def test_initialize_volumes(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' def test_project_up_implicit_volume_driver(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' def test_project_up_with_secrets(self): node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default')) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'cat /run/secrets/special', 'secrets': [ types.ServiceSecret.parse({'source': 'super', 'target': 'special'}), ], 'environment': ['constraint:node=={}'.format(node if node is not None else '*')] }], secrets={ 'super': { 'file': os.path.abspath('tests/fixtures/secrets/default'), }, }, ) project = Project.from_config( client=self.client, name='composetest', config_data=config_data, ) project.up() project.stop() containers = project.containers(stopped=True) assert len(containers) == 1 container, = containers output = container.logs() assert output == b"This is the secret\n" def test_project_up_with_added_secrets(self): node = create_host_file(self.client, os.path.abspath('tests/fixtures/secrets/default')) config_input1 = { 'services': [ { 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'cat /run/secrets/special', 'environment': ['constraint:node=={}'.format(node if node is not None else '')] } ], 'secrets': { 'super': { 'file': os.path.abspath('tests/fixtures/secrets/default') } } } config_input2 = copy.deepcopy(config_input1) # Add the secret config_input2['services'][0]['secrets'] = [ types.ServiceSecret.parse({'source': 'super', 'target': 'special'}) ] config_data1 = build_config(**config_input1) config_data2 = build_config(**config_input2) # First up with non-secret project = Project.from_config( client=self.client, name='composetest', config_data=config_data1, ) project.up() # Then up with secret project = Project.from_config( client=self.client, name='composetest', config_data=config_data2, ) project.up() project.stop() containers = project.containers(stopped=True) assert len(containers) == 1 container, = containers output = container.logs() assert output == b"This is the secret\n" def test_initialize_volumes_invalid_volume_driver(self): vol_name = '{:x}'.format(random.getrandbits(32)) config_data = build_config( version=VERSION, services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {'driver': 'foobar'}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(APIError if is_cluster(self.client) else config.ConfigurationError): project.volumes.initialize() @no_cluster('inspect volume by name defect on Swarm Classic') def test_initialize_volumes_updated_driver(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {'driver': 'local'}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' config_data = config_data._replace( volumes={vol_name: {'driver': 'smb'}} ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(config.ConfigurationError) as e: project.volumes.initialize() assert 'Configuration for volume {} specifies driver smb'.format( vol_name ) in str(e.value) @no_cluster('inspect volume by name defect on Swarm Classic') def test_initialize_volumes_updated_driver_opts(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) tmpdir = tempfile.mkdtemp(prefix='compose_test_') self.addCleanup(shutil.rmtree, tmpdir) driver_opts = {'o': 'bind', 'device': tmpdir, 'type': 'none'} config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={ vol_name: { 'driver': 'local', 'driver_opts': driver_opts } }, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' assert volume_data['Options'] == driver_opts driver_opts['device'] = '/opt/data/localdata' project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(config.ConfigurationError) as e: project.volumes.initialize() assert 'Configuration for volume {} specifies "device" driver_opt {}'.format( vol_name, driver_opts['device'] ) in str(e.value) def test_initialize_volumes_updated_blank_driver(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={vol_name: {'driver': 'local'}}, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' config_data = config_data._replace( volumes={vol_name: {}} ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() volume_data = self.get_volume_data(full_vol_name) assert volume_data['Name'].split('/')[-1] == full_vol_name assert volume_data['Driver'] == 'local' @no_cluster('inspect volume by name defect on Swarm Classic') def test_initialize_volumes_external_volumes(self): # Use composetest_ prefix so it gets garbage-collected in tearDown() vol_name = 'composetest_{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) self.client.create_volume(vol_name) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={ vol_name: {'external': True, 'name': vol_name} }, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.volumes.initialize() with pytest.raises(NotFound): self.client.inspect_volume(full_vol_name) def test_initialize_volumes_inexistent_external_volume(self): vol_name = '{:x}'.format(random.getrandbits(32)) config_data = build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top' }], volumes={ vol_name: {'external': True, 'name': vol_name} }, ) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(config.ConfigurationError) as e: project.volumes.initialize() assert 'Volume {} declared as external'.format( vol_name ) in str(e.value) def test_project_up_named_volumes_in_binds(self): vol_name = '{:x}'.format(random.getrandbits(32)) full_vol_name = 'composetest_{}'.format(vol_name) base_file = config.ConfigFile( 'base.yml', { 'services': { 'simple': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'volumes': ['{}:/data'.format(vol_name)] }, }, 'volumes': { vol_name: {'driver': 'local'} } }) config_details = config.ConfigDetails('.', [base_file]) config_data = config.load(config_details) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) service = project.services[0] assert service.name == 'simple' volumes = service.options.get('volumes') assert len(volumes) == 1 assert volumes[0].external == full_vol_name project.up() engine_volumes = self.client.volumes()['Volumes'] container = service.get_container() assert [mount['Name'] for mount in container.get('Mounts')] == [full_vol_name] assert next((v for v in engine_volumes if v['Name'] == vol_name), None) is None def test_project_up_orphans(self): config_dict = { 'service1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() config_dict['service2'] = config_dict['service1'] del config_dict['service1'] config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with mock.patch('compose.project.log') as mock_log: project.up() mock_log.warning.assert_called_once_with(mock.ANY) assert len([ ctnr for ctnr in project._labeled_containers() if ctnr.labels.get(LABEL_SERVICE) == 'service1' ]) == 1 project.up(remove_orphans=True) assert len([ ctnr for ctnr in project._labeled_containers() if ctnr.labels.get(LABEL_SERVICE) == 'service1' ]) == 0 def test_project_up_ignore_orphans(self): config_dict = { 'service1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() config_dict['service2'] = config_dict['service1'] del config_dict['service1'] config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with mock.patch('compose.project.log') as mock_log: project.up(ignore_orphans=True) mock_log.warning.assert_not_called() def test_project_up_healthy_dependency(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'healthcheck': { 'test': 'exit 0', 'retries': 1, 'timeout': '10s', 'interval': '1s' }, }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_healthy'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() containers = project.containers() assert len(containers) == 2 svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') assert 'svc1' in svc2.get_dependency_names() assert svc1.is_healthy() def test_project_up_unhealthy_dependency(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'healthcheck': { 'test': 'exit 1', 'retries': 1, 'timeout': '10s', 'interval': '1s' }, }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_healthy'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(ProjectError): project.up() containers = project.containers() assert len(containers) == 1 svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') assert 'svc1' in svc2.get_dependency_names() with pytest.raises(HealthCheckFailed): svc1.is_healthy() def test_project_up_no_healthcheck_dependency(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'healthcheck': { 'disable': True }, }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_healthy'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(ProjectError): project.up() containers = project.containers() assert len(containers) == 1 svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') assert 'svc1' in svc2.get_dependency_names() with pytest.raises(NoHealthCheckConfigured): svc1.is_healthy() def test_project_up_completed_successfully_dependency(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'true' }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_completed_successfully'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) project.up() svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') assert 'svc1' in svc2.get_dependency_names() assert svc2.containers()[0].is_running assert len(svc1.containers()) == 0 assert svc1.is_completed_successfully() def test_project_up_completed_unsuccessfully_dependency(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'false' }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_completed_successfully'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(ProjectError): project.up() svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') assert 'svc1' in svc2.get_dependency_names() assert len(svc2.containers()) == 0 with pytest.raises(CompletedUnsuccessfully): svc1.is_completed_successfully() def test_project_up_completed_differently_dependencies(self): config_dict = { 'version': '2.1', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'true' }, 'svc2': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'false' }, 'svc3': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'depends_on': { 'svc1': {'condition': 'service_completed_successfully'}, 'svc2': {'condition': 'service_completed_successfully'}, } } } } config_data = load_config(config_dict) project = Project.from_config( name='composetest', config_data=config_data, client=self.client ) with pytest.raises(ProjectError): project.up() svc1 = project.get_service('svc1') svc2 = project.get_service('svc2') svc3 = project.get_service('svc3') assert ['svc1', 'svc2'] == svc3.get_dependency_names() assert svc1.is_completed_successfully() assert len(svc3.containers()) == 0 with pytest.raises(CompletedUnsuccessfully): svc2.is_completed_successfully() def test_project_up_seccomp_profile(self): seccomp_data = { 'defaultAction': 'SCMP_ACT_ALLOW', 'syscalls': [] } fd, profile_path = tempfile.mkstemp('_seccomp.json') self.addCleanup(os.remove, profile_path) with os.fdopen(fd, 'w') as f: json.dump(seccomp_data, f) config_dict = { 'version': '2.3', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'security_opt': ['seccomp:"{}"'.format(profile_path)] } } } config_data = load_config(config_dict) project = Project.from_config(name='composetest', config_data=config_data, client=self.client) project.up() containers = project.containers() assert len(containers) == 1 remote_secopts = containers[0].get('HostConfig.SecurityOpt') assert len(remote_secopts) == 1 assert remote_secopts[0].startswith('seccomp=') assert json.loads(remote_secopts[0].lstrip('seccomp=')) == seccomp_data @no_cluster('inspect volume by name defect on Swarm Classic') def test_project_up_name_starts_with_illegal_char(self): config_dict = { 'version': '2.3', 'services': { 'svc1': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'ls', 'volumes': ['foo:/foo:rw'], 'networks': ['bar'], }, }, 'volumes': { 'foo': {}, }, 'networks': { 'bar': {}, } } config_data = load_config(config_dict) project = Project.from_config( name='_underscoretest', config_data=config_data, client=self.client ) project.up() self.addCleanup(project.down, None, True) containers = project.containers(stopped=True) assert len(containers) == 1 assert containers[0].name.startswith('underscoretest_svc1_') assert containers[0].project == '_underscoretest' full_vol_name = 'underscoretest_foo' vol_data = self.get_volume_data(full_vol_name) assert vol_data assert vol_data['Labels'][LABEL_PROJECT] == '_underscoretest' full_net_name = '_underscoretest_bar' net_data = self.client.inspect_network(full_net_name) assert net_data assert net_data['Labels'][LABEL_PROJECT] == '_underscoretest' project2 = Project.from_config( name='-dashtest', config_data=config_data, client=self.client ) project2.up() self.addCleanup(project2.down, None, True) containers = project2.containers(stopped=True) assert len(containers) == 1 assert containers[0].name.startswith('dashtest_svc1_') assert containers[0].project == '-dashtest' full_vol_name = 'dashtest_foo' vol_data = self.get_volume_data(full_vol_name) assert vol_data assert vol_data['Labels'][LABEL_PROJECT] == '-dashtest' full_net_name = '-dashtest_bar' net_data = self.client.inspect_network(full_net_name) assert net_data assert net_data['Labels'][LABEL_PROJECT] == '-dashtest' compose-1.29.2/tests/integration/resilience_test.py000066400000000000000000000033441404620552300224770ustar00rootroot00000000000000import pytest from .. import mock from .testcases import DockerClientTestCase from compose.config.types import VolumeSpec from compose.project import Project from compose.service import ConvergenceStrategy class ResilienceTest(DockerClientTestCase): def setUp(self): self.db = self.create_service( 'db', volumes=[VolumeSpec.parse('/var/db')], command='top') self.project = Project('composetest', [self.db], self.client) container = self.db.create_container() self.db.start_container(container) self.host_path = container.get_mount('/var/db')['Source'] def tearDown(self): del self.project del self.db super().tearDown() def test_successful_recreate(self): self.project.up(strategy=ConvergenceStrategy.always) container = self.db.containers()[0] assert container.get_mount('/var/db')['Source'] == self.host_path def test_create_failure(self): with mock.patch('compose.service.Service.create_container', crash): with pytest.raises(Crash): self.project.up(strategy=ConvergenceStrategy.always) self.project.up() container = self.db.containers()[0] assert container.get_mount('/var/db')['Source'] == self.host_path def test_start_failure(self): with mock.patch('compose.service.Service.start_container', crash): with pytest.raises(Crash): self.project.up(strategy=ConvergenceStrategy.always) self.project.up() container = self.db.containers()[0] assert container.get_mount('/var/db')['Source'] == self.host_path class Crash(Exception): pass def crash(*args, **kwargs): raise Crash() compose-1.29.2/tests/integration/service_test.py000066400000000000000000002167011404620552300220200ustar00rootroot00000000000000import os import re import shutil import tempfile from distutils.spawn import find_executable from io import StringIO from os import path import pytest from docker.errors import APIError from docker.errors import ImageNotFound from .. import mock from ..helpers import BUSYBOX_IMAGE_WITH_TAG from .testcases import docker_client from .testcases import DockerClientTestCase from .testcases import get_links from .testcases import pull_busybox from .testcases import SWARM_SKIP_CONTAINERS_ALL from .testcases import SWARM_SKIP_CPU_SHARES from compose import __version__ from compose.config.types import MountSpec from compose.config.types import SecurityOpt from compose.config.types import VolumeFromSpec from compose.config.types import VolumeSpec from compose.const import IS_WINDOWS_PLATFORM from compose.const import LABEL_CONFIG_HASH from compose.const import LABEL_CONTAINER_NUMBER from compose.const import LABEL_ONE_OFF from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.const import LABEL_VERSION from compose.container import Container from compose.errors import OperationFailedError from compose.parallel import ParallelStreamWriter from compose.project import OneOffFilter from compose.project import Project from compose.service import BuildAction from compose.service import BuildError from compose.service import ConvergencePlan from compose.service import ConvergenceStrategy from compose.service import IpcMode from compose.service import NetworkMode from compose.service import PidMode from compose.service import Service from compose.utils import parse_nanoseconds_int from tests.helpers import create_custom_host_file from tests.integration.testcases import is_cluster from tests.integration.testcases import no_cluster def create_and_start_container(service, **override_options): container = service.create_container(**override_options) return service.start_container(container) class ServiceTest(DockerClientTestCase): def test_containers(self): foo = self.create_service('foo') bar = self.create_service('bar') create_and_start_container(foo) assert len(foo.containers()) == 1 assert foo.containers()[0].name.startswith('composetest_foo_') assert len(bar.containers()) == 0 create_and_start_container(bar) create_and_start_container(bar) assert len(foo.containers()) == 1 assert len(bar.containers()) == 2 names = [c.name for c in bar.containers()] assert len(names) == 2 assert all(name.startswith('composetest_bar_') for name in names) def test_containers_one_off(self): db = self.create_service('db') container = db.create_container(one_off=True) assert db.containers(stopped=True) == [] assert db.containers(one_off=OneOffFilter.only, stopped=True) == [container] def test_project_is_added_to_container_name(self): service = self.create_service('web') create_and_start_container(service) assert service.containers()[0].name.startswith('composetest_web_') def test_create_container_with_one_off(self): db = self.create_service('db') container = db.create_container(one_off=True) assert container.name.startswith('composetest_db_run_') def test_create_container_with_one_off_when_existing_container_is_running(self): db = self.create_service('db') db.start() container = db.create_container(one_off=True) assert container.name.startswith('composetest_db_run_') def test_create_container_with_unspecified_volume(self): service = self.create_service('db', volumes=[VolumeSpec.parse('/var/db')]) container = service.create_container() service.start_container(container) assert container.get_mount('/var/db') def test_create_container_with_volume_driver(self): service = self.create_service('db', volume_driver='foodriver') container = service.create_container() service.start_container(container) assert 'foodriver' == container.get('HostConfig.VolumeDriver') @pytest.mark.skipif(SWARM_SKIP_CPU_SHARES, reason='Swarm --cpu-shares bug') def test_create_container_with_cpu_shares(self): service = self.create_service('db', cpu_shares=73) container = service.create_container() service.start_container(container) assert container.get('HostConfig.CpuShares') == 73 def test_create_container_with_cpu_quota(self): service = self.create_service('db', cpu_quota=40000, cpu_period=150000) container = service.create_container() container.start() assert container.get('HostConfig.CpuQuota') == 40000 assert container.get('HostConfig.CpuPeriod') == 150000 @pytest.mark.xfail(raises=OperationFailedError, reason='not supported by kernel') def test_create_container_with_cpu_rt(self): service = self.create_service('db', cpu_rt_runtime=40000, cpu_rt_period=150000) container = service.create_container() container.start() assert container.get('HostConfig.CpuRealtimeRuntime') == 40000 assert container.get('HostConfig.CpuRealtimePeriod') == 150000 def test_create_container_with_cpu_count(self): self.require_api_version('1.25') service = self.create_service('db', cpu_count=2) container = service.create_container() service.start_container(container) assert container.get('HostConfig.CpuCount') == 2 @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='cpu_percent is not supported for Linux') def test_create_container_with_cpu_percent(self): self.require_api_version('1.25') service = self.create_service('db', cpu_percent=12) container = service.create_container() service.start_container(container) assert container.get('HostConfig.CpuPercent') == 12 def test_create_container_with_cpus(self): self.require_api_version('1.25') service = self.create_service('db', cpus=1) container = service.create_container() service.start_container(container) assert container.get('HostConfig.NanoCpus') == 1000000000 def test_create_container_with_shm_size(self): self.require_api_version('1.22') service = self.create_service('db', shm_size=67108864) container = service.create_container() service.start_container(container) assert container.get('HostConfig.ShmSize') == 67108864 def test_create_container_with_init_bool(self): self.require_api_version('1.25') service = self.create_service('db', init=True) container = service.create_container() service.start_container(container) assert container.get('HostConfig.Init') is True @pytest.mark.xfail(True, reason='Option has been removed in Engine 17.06.0') def test_create_container_with_init_path(self): self.require_api_version('1.25') docker_init_path = find_executable('docker-init') service = self.create_service('db', init=docker_init_path) container = service.create_container() service.start_container(container) assert container.get('HostConfig.InitPath') == docker_init_path @pytest.mark.xfail(True, reason='Some kernels/configs do not support pids_limit') def test_create_container_with_pids_limit(self): self.require_api_version('1.23') service = self.create_service('db', pids_limit=10) container = service.create_container() service.start_container(container) assert container.get('HostConfig.PidsLimit') == 10 def test_create_container_with_extra_hosts_list(self): extra_hosts = ['somehost:162.242.195.82', 'otherhost:50.31.209.229'] service = self.create_service('db', extra_hosts=extra_hosts) container = service.create_container() service.start_container(container) assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts) def test_create_container_with_extra_hosts_dicts(self): extra_hosts = {'somehost': '162.242.195.82', 'otherhost': '50.31.209.229'} extra_hosts_list = ['somehost:162.242.195.82', 'otherhost:50.31.209.229'] service = self.create_service('db', extra_hosts=extra_hosts) container = service.create_container() service.start_container(container) assert set(container.get('HostConfig.ExtraHosts')) == set(extra_hosts_list) def test_create_container_with_cpu_set(self): service = self.create_service('db', cpuset='0') container = service.create_container() service.start_container(container) assert container.get('HostConfig.CpusetCpus') == '0' def test_create_container_with_read_only_root_fs(self): read_only = True service = self.create_service('db', read_only=read_only) container = service.create_container() service.start_container(container) assert container.get('HostConfig.ReadonlyRootfs') == read_only @pytest.mark.xfail(True, reason='Getting "Your kernel does not support ' 'cgroup blkio weight and weight_device" on daemon start ' 'on Linux kernel 5.3.x') def test_create_container_with_blkio_config(self): blkio_config = { 'weight': 300, 'weight_device': [{'path': '/dev/sda', 'weight': 200}], 'device_read_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024 * 100}], 'device_read_iops': [{'path': '/dev/sda', 'rate': 1000}], 'device_write_bps': [{'path': '/dev/sda', 'rate': 1024 * 1024}], 'device_write_iops': [{'path': '/dev/sda', 'rate': 800}] } service = self.create_service('web', blkio_config=blkio_config) container = service.create_container() assert container.get('HostConfig.BlkioWeight') == 300 assert container.get('HostConfig.BlkioWeightDevice') == [{ 'Path': '/dev/sda', 'Weight': 200 }] assert container.get('HostConfig.BlkioDeviceReadBps') == [{ 'Path': '/dev/sda', 'Rate': 1024 * 1024 * 100 }] assert container.get('HostConfig.BlkioDeviceWriteBps') == [{ 'Path': '/dev/sda', 'Rate': 1024 * 1024 }] assert container.get('HostConfig.BlkioDeviceReadIOps') == [{ 'Path': '/dev/sda', 'Rate': 1000 }] assert container.get('HostConfig.BlkioDeviceWriteIOps') == [{ 'Path': '/dev/sda', 'Rate': 800 }] def test_create_container_with_security_opt(self): security_opt = [SecurityOpt.parse('label:disable')] service = self.create_service('db', security_opt=security_opt) container = service.create_container() service.start_container(container) assert set(container.get('HostConfig.SecurityOpt')) == {o.repr() for o in security_opt} @pytest.mark.xfail(True, reason='Not supported on most drivers') def test_create_container_with_storage_opt(self): storage_opt = {'size': '1G'} service = self.create_service('db', storage_opt=storage_opt) container = service.create_container() service.start_container(container) assert container.get('HostConfig.StorageOpt') == storage_opt def test_create_container_with_oom_kill_disable(self): self.require_api_version('1.20') service = self.create_service('db', oom_kill_disable=True) container = service.create_container() assert container.get('HostConfig.OomKillDisable') is True def test_create_container_with_mac_address(self): service = self.create_service('db', mac_address='02:42:ac:11:65:43') container = service.create_container() service.start_container(container) assert container.inspect()['Config']['MacAddress'] == '02:42:ac:11:65:43' def test_create_container_with_device_cgroup_rules(self): service = self.create_service('db', device_cgroup_rules=['c 7:128 rwm']) container = service.create_container() assert container.get('HostConfig.DeviceCgroupRules') == ['c 7:128 rwm'] def test_create_container_with_specified_volume(self): host_path = '/tmp/host-path' container_path = '/container-path' service = self.create_service( 'db', volumes=[VolumeSpec(host_path, container_path, 'rw')]) container = service.create_container() service.start_container(container) assert container.get_mount(container_path) # Match the last component ("host-path"), because boot2docker symlinks /tmp actual_host_path = container.get_mount(container_path)['Source'] assert path.basename(actual_host_path) == path.basename(host_path), ( "Last component differs: {}, {}".format(actual_host_path, host_path) ) def test_create_container_with_host_mount(self): host_path = '/tmp/host-path' container_path = '/container-path' create_custom_host_file(self.client, path.join(host_path, 'a.txt'), 'test') service = self.create_service( 'db', volumes=[ MountSpec(type='bind', source=host_path, target=container_path, read_only=True) ] ) container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount assert path.basename(mount['Source']) == path.basename(host_path) assert mount['RW'] is False def test_create_container_with_tmpfs_mount(self): container_path = '/container-tmpfs' service = self.create_service( 'db', volumes=[MountSpec(type='tmpfs', target=container_path)] ) container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount assert mount['Type'] == 'tmpfs' def test_create_container_with_tmpfs_mount_tmpfs_size(self): container_path = '/container-tmpfs' service = self.create_service( 'db', volumes=[MountSpec(type='tmpfs', target=container_path, tmpfs={'size': 5368709})] ) container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount print(container.dictionary) assert mount['Type'] == 'tmpfs' assert container.get('HostConfig.Mounts')[0]['TmpfsOptions'] == { 'SizeBytes': 5368709 } def test_create_container_with_volume_mount(self): container_path = '/container-volume' volume_name = 'composetest_abcde' self.client.create_volume(volume_name) service = self.create_service( 'db', volumes=[MountSpec(type='volume', source=volume_name, target=container_path)] ) container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount assert mount['Name'] == volume_name def test_create_container_with_legacy_mount(self): # Ensure mounts are converted to volumes if API version < 1.30 # Needed to support long syntax in the 3.2 format client = docker_client({}, version='1.25') container_path = '/container-volume' volume_name = 'composetest_abcde' self.client.create_volume(volume_name) service = Service('db', client=client, volumes=[ MountSpec(type='volume', source=volume_name, target=container_path) ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest') container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount assert mount['Name'] == volume_name def test_create_container_with_legacy_tmpfs_mount(self): # Ensure tmpfs mounts are converted to tmpfs entries if API version < 1.30 # Needed to support long syntax in the 3.2 format client = docker_client({}, version='1.25') container_path = '/container-tmpfs' service = Service('db', client=client, volumes=[ MountSpec(type='tmpfs', target=container_path) ], image=BUSYBOX_IMAGE_WITH_TAG, command=['top'], project='composetest') container = service.create_container() service.start_container(container) mount = container.get_mount(container_path) assert mount is None assert container_path in container.get('HostConfig.Tmpfs') def test_create_container_with_healthcheck_config(self): one_second = parse_nanoseconds_int('1s') healthcheck = { 'test': ['true'], 'interval': 2 * one_second, 'timeout': 5 * one_second, 'retries': 5, 'start_period': 2 * one_second } service = self.create_service('db', healthcheck=healthcheck) container = service.create_container() remote_healthcheck = container.get('Config.Healthcheck') assert remote_healthcheck['Test'] == healthcheck['test'] assert remote_healthcheck['Interval'] == healthcheck['interval'] assert remote_healthcheck['Timeout'] == healthcheck['timeout'] assert remote_healthcheck['Retries'] == healthcheck['retries'] assert remote_healthcheck['StartPeriod'] == healthcheck['start_period'] def test_recreate_preserves_volume_with_trailing_slash(self): """When the Compose file specifies a trailing slash in the container path, make sure we copy the volume over when recreating. """ service = self.create_service('data', volumes=[VolumeSpec.parse('/data/')]) old_container = create_and_start_container(service) volume_path = old_container.get_mount('/data')['Source'] new_container = service.recreate_container(old_container) assert new_container.get_mount('/data')['Source'] == volume_path def test_recreate_volume_to_mount(self): # https://github.com/docker/compose/issues/6280 service = Service( project='composetest', name='db', client=self.client, build={'context': 'tests/fixtures/dockerfile-with-volume'}, volumes=[MountSpec.parse({ 'type': 'volume', 'target': '/data', })] ) old_container = create_and_start_container(service) new_container = service.recreate_container(old_container) assert new_container.get_mount('/data')['Source'] def test_duplicate_volume_trailing_slash(self): """ When an image specifies a volume, and the Compose file specifies a host path but adds a trailing slash, make sure that we don't create duplicate binds. """ host_path = '/tmp/data' container_path = '/data' volumes = [VolumeSpec.parse('{}:{}/'.format(host_path, container_path))] tmp_container = self.client.create_container( 'busybox', 'true', volumes={container_path: {}}, labels={'com.docker.compose.test_image': 'true'}, host_config={} ) image = self.client.commit(tmp_container)['Id'] service = self.create_service('db', image=image, volumes=volumes) old_container = create_and_start_container(service) assert old_container.get('Config.Volumes') == {container_path: {}} service = self.create_service('db', image=image, volumes=volumes) new_container = service.recreate_container(old_container) assert new_container.get('Config.Volumes') == {container_path: {}} assert service.containers(stopped=False) == [new_container] def test_create_container_with_volumes_from(self): volume_service = self.create_service('data') volume_container_1 = volume_service.create_container() volume_container_2 = Container.create( self.client, image=BUSYBOX_IMAGE_WITH_TAG, command=["top"], labels={LABEL_PROJECT: 'composetest'}, host_config={}, environment=['affinity:container=={}'.format(volume_container_1.id)], ) host_service = self.create_service( 'host', volumes_from=[ VolumeFromSpec(volume_service, 'rw', 'service'), VolumeFromSpec(volume_container_2, 'rw', 'container') ], environment=['affinity:container=={}'.format(volume_container_1.id)], ) host_container = host_service.create_container() host_service.start_container(host_container) assert volume_container_1.id + ':rw' in host_container.get('HostConfig.VolumesFrom') assert volume_container_2.id + ':rw' in host_container.get('HostConfig.VolumesFrom') def test_execute_convergence_plan_recreate(self): service = self.create_service( 'db', environment={'FOO': '1'}, volumes=[VolumeSpec.parse('/etc')], entrypoint=['top'], command=['-d', '1'] ) old_container = service.create_container() assert old_container.get('Config.Entrypoint') == ['top'] assert old_container.get('Config.Cmd') == ['-d', '1'] assert 'FOO=1' in old_container.get('Config.Env') assert old_container.name.startswith('composetest_db_') service.start_container(old_container) old_container.inspect() # reload volume data volume_path = old_container.get_mount('/etc')['Source'] num_containers_before = len(self.client.containers(all=True)) service.options['environment']['FOO'] = '2' new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container])) assert new_container.get('Config.Entrypoint') == ['top'] assert new_container.get('Config.Cmd') == ['-d', '1'] assert 'FOO=2' in new_container.get('Config.Env') assert new_container.name.startswith('composetest_db_') assert new_container.get_mount('/etc')['Source'] == volume_path if not is_cluster(self.client): assert ( 'affinity:container==%s' % old_container.id in new_container.get('Config.Env') ) else: # In Swarm, the env marker is consumed and the container should be deployed # on the same node. assert old_container.get('Node.Name') == new_container.get('Node.Name') assert len(self.client.containers(all=True)) == num_containers_before assert old_container.id != new_container.id with pytest.raises(APIError): self.client.inspect_container(old_container.id) def test_execute_convergence_plan_recreate_change_mount_target(self): service = self.create_service( 'db', volumes=[MountSpec(target='/app1', type='volume')], entrypoint=['top'], command=['-d', '1'] ) old_container = create_and_start_container(service) assert ( [mount['Destination'] for mount in old_container.get('Mounts')] == ['/app1'] ) service.options['volumes'] = [MountSpec(target='/app2', type='volume')] new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]) ) assert ( [mount['Destination'] for mount in new_container.get('Mounts')] == ['/app2'] ) def test_execute_convergence_plan_recreate_twice(self): service = self.create_service( 'db', volumes=[VolumeSpec.parse('/etc')], entrypoint=['top'], command=['-d', '1']) orig_container = service.create_container() service.start_container(orig_container) orig_container.inspect() # reload volume data volume_path = orig_container.get_mount('/etc')['Source'] # Do this twice to reproduce the bug for _ in range(2): new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [orig_container])) assert new_container.get_mount('/etc')['Source'] == volume_path if not is_cluster(self.client): assert ('affinity:container==%s' % orig_container.id in new_container.get('Config.Env')) else: # In Swarm, the env marker is consumed and the container should be deployed # on the same node. assert orig_container.get('Node.Name') == new_container.get('Node.Name') orig_container = new_container def test_execute_convergence_plan_recreate_twice_with_mount(self): service = self.create_service( 'db', volumes=[MountSpec(target='/etc', type='volume')], entrypoint=['top'], command=['-d', '1'] ) orig_container = service.create_container() service.start_container(orig_container) orig_container.inspect() # reload volume data volume_path = orig_container.get_mount('/etc')['Source'] # Do this twice to reproduce the bug for _ in range(2): new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [orig_container]) ) assert new_container.get_mount('/etc')['Source'] == volume_path if not is_cluster(self.client): assert ('affinity:container==%s' % orig_container.id in new_container.get('Config.Env')) else: # In Swarm, the env marker is consumed and the container should be deployed # on the same node. assert orig_container.get('Node.Name') == new_container.get('Node.Name') orig_container = new_container def test_execute_convergence_plan_when_containers_are_stopped(self): service = self.create_service( 'db', environment={'FOO': '1'}, volumes=[VolumeSpec.parse('/var/db')], entrypoint=['top'], command=['-d', '1'] ) service.create_container() containers = service.containers(stopped=True) assert len(containers) == 1 container, = containers assert not container.is_running service.execute_convergence_plan(ConvergencePlan('start', [container])) containers = service.containers() assert len(containers) == 1 container.inspect() assert container == containers[0] assert container.is_running def test_execute_convergence_plan_with_image_declared_volume(self): service = Service( project='composetest', name='db', client=self.client, build={'context': 'tests/fixtures/dockerfile-with-volume'}, ) old_container = create_and_start_container(service) assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] volume_path = old_container.get_mount('/data')['Source'] new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container])) assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] assert new_container.get_mount('/data')['Source'] == volume_path def test_execute_convergence_plan_with_image_declared_volume_renew(self): service = Service( project='composetest', name='db', client=self.client, build={'context': 'tests/fixtures/dockerfile-with-volume'}, ) old_container = create_and_start_container(service) assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] volume_path = old_container.get_mount('/data')['Source'] new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]), renew_anonymous_volumes=True ) assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] assert new_container.get_mount('/data')['Source'] != volume_path def test_execute_convergence_plan_when_image_volume_masks_config(self): service = self.create_service( 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'}, ) old_container = create_and_start_container(service) assert [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] volume_path = old_container.get_mount('/data')['Source'] service.options['volumes'] = [VolumeSpec.parse('/tmp:/data')] with mock.patch('compose.service.log') as mock_log: new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container])) mock_log.warning.assert_called_once_with(mock.ANY) _, args, kwargs = mock_log.warning.mock_calls[0] assert "Service \"db\" is using volume \"/data\" from the previous container" in args[0] assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] assert new_container.get_mount('/data')['Source'] == volume_path def test_execute_convergence_plan_when_host_volume_is_removed(self): host_path = '/tmp/host-path' service = self.create_service( 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'}, volumes=[VolumeSpec(host_path, '/data', 'rw')]) old_container = create_and_start_container(service) assert ( [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] ) service.options['volumes'] = [] with mock.patch('compose.service.log', autospec=True) as mock_log: new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container])) assert not mock_log.warn.called assert ( [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] ) assert new_container.get_mount('/data')['Source'] != host_path def test_execute_convergence_plan_anonymous_volume_renew(self): service = self.create_service( 'db', image='busybox', volumes=[VolumeSpec(None, '/data', 'rw')]) old_container = create_and_start_container(service) assert ( [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] ) volume_path = old_container.get_mount('/data')['Source'] new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]), renew_anonymous_volumes=True ) assert ( [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] ) assert new_container.get_mount('/data')['Source'] != volume_path def test_execute_convergence_plan_anonymous_volume_recreate_then_renew(self): service = self.create_service( 'db', image='busybox', volumes=[VolumeSpec(None, '/data', 'rw')]) old_container = create_and_start_container(service) assert ( [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] ) volume_path = old_container.get_mount('/data')['Source'] mid_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]), ) assert ( [mount['Destination'] for mount in mid_container.get('Mounts')] == ['/data'] ) assert mid_container.get_mount('/data')['Source'] == volume_path new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [mid_container]), renew_anonymous_volumes=True ) assert ( [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] ) assert new_container.get_mount('/data')['Source'] != volume_path def test_execute_convergence_plan_without_start(self): service = self.create_service( 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'} ) containers = service.execute_convergence_plan(ConvergencePlan('create', []), start=False) service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running containers = service.execute_convergence_plan( ConvergencePlan('recreate', containers), start=False) service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running service.execute_convergence_plan(ConvergencePlan('start', containers), start=False) service_containers = service.containers(stopped=True) assert len(service_containers) == 1 assert not service_containers[0].is_running def test_execute_convergence_plan_image_with_volume_is_removed(self): service = self.create_service( 'db', build={'context': 'tests/fixtures/dockerfile-with-volume'} ) old_container = create_and_start_container(service) assert ( [mount['Destination'] for mount in old_container.get('Mounts')] == ['/data'] ) volume_path = old_container.get_mount('/data')['Source'] old_container.stop() self.client.remove_image(service.image(), force=True) service.ensure_image_exists() with pytest.raises(ImageNotFound): service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]) ) old_container.inspect() # retrieve new name from server new_container, = service.execute_convergence_plan( ConvergencePlan('recreate', [old_container]), reset_container_image=True ) assert [mount['Destination'] for mount in new_container.get('Mounts')] == ['/data'] assert new_container.get_mount('/data')['Source'] == volume_path def test_start_container_passes_through_options(self): db = self.create_service('db') create_and_start_container(db, environment={'FOO': 'BAR'}) assert db.containers()[0].environment['FOO'] == 'BAR' def test_start_container_inherits_options_from_constructor(self): db = self.create_service('db', environment={'FOO': 'BAR'}) create_and_start_container(db) assert db.containers()[0].environment['FOO'] == 'BAR' @no_cluster('No legacy links support in Swarm') def test_start_container_creates_links(self): db = self.create_service('db') web = self.create_service('web', links=[(db, None)]) db1 = create_and_start_container(db) db2 = create_and_start_container(db) create_and_start_container(web) assert set(get_links(web.containers()[0])) == { db1.name, db1.name_without_project, db2.name, db2.name_without_project, 'db' } @no_cluster('No legacy links support in Swarm') def test_start_container_creates_links_with_names(self): db = self.create_service('db') web = self.create_service('web', links=[(db, 'custom_link_name')]) db1 = create_and_start_container(db) db2 = create_and_start_container(db) create_and_start_container(web) assert set(get_links(web.containers()[0])) == { db1.name, db1.name_without_project, db2.name, db2.name_without_project, 'custom_link_name' } @no_cluster('No legacy links support in Swarm') def test_start_container_with_external_links(self): db = self.create_service('db') db_ctnrs = [create_and_start_container(db) for _ in range(3)] web = self.create_service( 'web', external_links=[ db_ctnrs[0].name, db_ctnrs[1].name, '{}:db_3'.format(db_ctnrs[2].name) ] ) create_and_start_container(web) assert set(get_links(web.containers()[0])) == { db_ctnrs[0].name, db_ctnrs[1].name, 'db_3' } @no_cluster('No legacy links support in Swarm') def test_start_normal_container_does_not_create_links_to_its_own_service(self): db = self.create_service('db') create_and_start_container(db) create_and_start_container(db) c = create_and_start_container(db) assert set(get_links(c)) == set() @no_cluster('No legacy links support in Swarm') def test_start_one_off_container_creates_links_to_its_own_service(self): db = self.create_service('db') db1 = create_and_start_container(db) db2 = create_and_start_container(db) c = create_and_start_container(db, one_off=OneOffFilter.only) assert set(get_links(c)) == { db1.name, db1.name_without_project, db2.name, db2.name_without_project, 'db' } def test_start_container_builds_images(self): service = Service( name='test', client=self.client, build={'context': 'tests/fixtures/simple-dockerfile'}, project='composetest', ) container = create_and_start_container(service) container.wait() assert b'success' in container.logs() assert len(self.client.images(name='composetest_test')) >= 1 def test_start_container_uses_tagged_image_if_it_exists(self): self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test') service = Service( name='test', client=self.client, build={'context': 'this/does/not/exist/and/will/throw/error'}, project='composetest', ) container = create_and_start_container(service) container.wait() assert b'success' in container.logs() def test_start_container_creates_ports(self): service = self.create_service('web', ports=[8000]) container = create_and_start_container(service).inspect() assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/tcp'] assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] != '8000' def test_build(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") service = self.create_service('web', build={'context': base_dir}, environment={ 'COMPOSE_DOCKER_CLI_BUILD': '0', 'DOCKER_BUILDKIT': '0', }) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert self.client.inspect_image('composetest_web') def test_build_cli(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") service = self.create_service('web', build={'context': base_dir}, environment={ 'DOCKER_BUILDKIT': '1', }) service.build(cli=True) self.addCleanup(self.client.remove_image, service.image_name) assert self.client.inspect_image('composetest_web') def test_build_cli_with_build_labels(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") service = self.create_service('web', build={ 'context': base_dir, 'labels': {'com.docker.compose.test': 'true'}}, ) service.build(cli=True) self.addCleanup(self.client.remove_image, service.image_name) image = self.client.inspect_image('composetest_web') assert image['Config']['Labels']['com.docker.compose.test'] def test_build_cli_with_build_error(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('\n'.join([ "FROM busybox", "RUN exit 2", ])) service = self.create_service('web', build={ 'context': base_dir, 'labels': {'com.docker.compose.test': 'true'}}, ) with pytest.raises(BuildError): service.build(cli=True) def test_up_build_cli(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") web = self.create_service('web', build={'context': base_dir}, environment={ 'DOCKER_BUILDKIT': '1', }) project = Project('composetest', [web], self.client) project.up(do_build=BuildAction.force) containers = project.containers(['web']) assert len(containers) == 1 assert containers[0].name.startswith('composetest_web_') def test_build_non_ascii_filename(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") with open(os.path.join(base_dir.encode('utf8'), b'foo\xE2bar'), 'w') as f: f.write("hello world\n") service = self.create_service('web', build={'context': str(base_dir)}) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert self.client.inspect_image('composetest_web') def test_build_with_image_name(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") image_name = 'examples/composetest:latest' self.addCleanup(self.client.remove_image, image_name) self.create_service('web', build={'context': base_dir}, image=image_name).build() assert self.client.inspect_image(image_name) def test_build_with_git_url(self): build_url = "https://github.com/dnephin/docker-build-from-url.git" service = self.create_service('buildwithurl', build={'context': build_url}) self.addCleanup(self.client.remove_image, service.image_name) service.build() assert service.image() def test_build_with_build_args(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") f.write("ARG build_version\n") f.write("RUN echo ${build_version}\n") service = self.create_service('buildwithargs', build={'context': str(base_dir), 'args': {"build_version": "1"}}) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert service.image() assert "build_version=1" in service.image()['ContainerConfig']['Cmd'] def test_build_with_build_args_override(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") f.write("ARG build_version\n") f.write("RUN echo ${build_version}\n") service = self.create_service('buildwithargs', build={'context': str(base_dir), 'args': {"build_version": "1"}}) service.build(build_args_override={'build_version': '2'}) self.addCleanup(self.client.remove_image, service.image_name) assert service.image() assert "build_version=2" in service.image()['ContainerConfig']['Cmd'] def test_build_with_build_labels(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('FROM busybox\n') service = self.create_service('buildlabels', build={ 'context': str(base_dir), 'labels': {'com.docker.compose.test': 'true'} }) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert service.image() assert service.image()['Config']['Labels']['com.docker.compose.test'] == 'true' @no_cluster('Container networks not on Swarm') def test_build_with_network(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('FROM busybox\n') f.write('RUN ping -c1 google.local\n') net_container = self.client.create_container( 'busybox', 'top', host_config=self.client.create_host_config( extra_hosts={'google.local': '127.0.0.1'} ), name='composetest_build_network' ) self.addCleanup(self.client.remove_container, net_container, force=True) self.client.start(net_container) service = self.create_service('buildwithnet', build={ 'context': str(base_dir), 'network': 'container:{}'.format(net_container['Id']) }) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert service.image() @no_cluster('Not supported on UCP 2.2.0-beta1') # FIXME: remove once support is added def test_build_with_target(self): self.require_api_version('1.30') base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('FROM busybox as one\n') f.write('LABEL com.docker.compose.test=true\n') f.write('LABEL com.docker.compose.test.target=one\n') f.write('FROM busybox as two\n') f.write('LABEL com.docker.compose.test.target=two\n') service = self.create_service('buildtarget', build={ 'context': str(base_dir), 'target': 'one' }) service.build() assert service.image() assert service.image()['Config']['Labels']['com.docker.compose.test.target'] == 'one' def test_build_with_extra_hosts(self): self.require_api_version('1.27') base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('\n'.join([ 'FROM busybox', 'RUN ping -c1 foobar', 'RUN ping -c1 baz', ])) service = self.create_service('build_extra_hosts', build={ 'context': str(base_dir), 'extra_hosts': { 'foobar': '127.0.0.1', 'baz': '127.0.0.1' } }) service.build() assert service.image() def test_build_with_gzip(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('\n'.join([ 'FROM busybox', 'COPY . /src', 'RUN cat /src/hello.txt' ])) with open(os.path.join(base_dir, 'hello.txt'), 'w') as f: f.write('hello world\n') service = self.create_service('build_gzip', build={ 'context': str(base_dir), }) service.build(gzip=True) assert service.image() def test_build_with_isolation(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('FROM busybox\n') service = self.create_service('build_isolation', build={ 'context': str(base_dir), 'isolation': 'default', }) service.build() assert service.image() def test_build_with_illegal_leading_chars(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write('FROM busybox\nRUN echo "Embodiment of Scarlet Devil"\n') service = Service( 'build_leading_slug', client=self.client, project='___-composetest', build={ 'context': str(base_dir) } ) assert service.image_name == 'composetest_build_leading_slug' service.build() assert service.image() def test_start_container_stays_unprivileged(self): service = self.create_service('web') container = create_and_start_container(service).inspect() assert container['HostConfig']['Privileged'] is False def test_start_container_becomes_privileged(self): service = self.create_service('web', privileged=True) container = create_and_start_container(service).inspect() assert container['HostConfig']['Privileged'] is True def test_expose_does_not_publish_ports(self): service = self.create_service('web', expose=["8000"]) container = create_and_start_container(service).inspect() assert container['NetworkSettings']['Ports'] == {'8000/tcp': None} def test_start_container_creates_port_with_explicit_protocol(self): service = self.create_service('web', ports=['8000/udp']) container = create_and_start_container(service).inspect() assert list(container['NetworkSettings']['Ports'].keys()) == ['8000/udp'] def test_start_container_creates_fixed_external_ports(self): service = self.create_service('web', ports=['8000:8000']) container = create_and_start_container(service).inspect() assert '8000/tcp' in container['NetworkSettings']['Ports'] assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8000' def test_start_container_creates_fixed_external_ports_when_it_is_different_to_internal_port(self): service = self.create_service('web', ports=['8001:8000']) container = create_and_start_container(service).inspect() assert '8000/tcp' in container['NetworkSettings']['Ports'] assert container['NetworkSettings']['Ports']['8000/tcp'][0]['HostPort'] == '8001' def test_port_with_explicit_interface(self): service = self.create_service('web', ports=[ '127.0.0.1:8001:8000', '0.0.0.0:9001:9000/udp', ]) container = create_and_start_container(service).inspect() assert container['NetworkSettings']['Ports']['8000/tcp'] == [{ 'HostIp': '127.0.0.1', 'HostPort': '8001', }] assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostPort'] == '9001' if not is_cluster(self.client): assert container['NetworkSettings']['Ports']['9000/udp'][0]['HostIp'] == '0.0.0.0' # self.assertEqual(container['NetworkSettings']['Ports'], { # '8000/tcp': [ # { # 'HostIp': '127.0.0.1', # 'HostPort': '8001', # }, # ], # '9000/udp': [ # { # 'HostIp': '0.0.0.0', # 'HostPort': '9001', # }, # ], # }) def test_create_with_image_id(self): pull_busybox(self.client) image_id = self.client.inspect_image(BUSYBOX_IMAGE_WITH_TAG)['Id'][:12] service = self.create_service('foo', image=image_id) service.create_container() def test_scale(self): service = self.create_service('web') service.scale(1) assert len(service.containers()) == 1 # Ensure containers don't have stdout or stdin connected container = service.containers()[0] config = container.inspect()['Config'] assert not config['AttachStderr'] assert not config['AttachStdout'] assert not config['AttachStdin'] service.scale(3) assert len(service.containers()) == 3 service.scale(1) assert len(service.containers()) == 1 service.scale(0) assert len(service.containers()) == 0 @pytest.mark.skipif( SWARM_SKIP_CONTAINERS_ALL, reason='Swarm /containers/json bug' ) def test_scale_with_stopped_containers(self): """ Given there are some stopped containers and scale is called with a desired number that is the same as the number of stopped containers, test that those containers are restarted and not removed/recreated. """ service = self.create_service('web') service.create_container(number=1) service.create_container(number=2) ParallelStreamWriter.instance = None with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr: service.scale(2) for container in service.containers(): assert container.is_running assert container.number in [1, 2] captured_output = mock_stderr.getvalue() assert 'Creating' not in captured_output assert 'Starting' in captured_output def test_scale_with_stopped_containers_and_needing_creation(self): """ Given there are some stopped containers and scale is called with a desired number that is greater than the number of stopped containers, test that those containers are restarted and required number are created. """ service = self.create_service('web') next_number = service._next_container_number() service.create_container(number=next_number, quiet=True) for container in service.containers(): assert not container.is_running ParallelStreamWriter.instance = None with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr: service.scale(2) assert len(service.containers()) == 2 for container in service.containers(): assert container.is_running captured_output = mock_stderr.getvalue() assert 'Creating' in captured_output assert 'Starting' in captured_output def test_scale_with_api_error(self): """Test that when scaling if the API returns an error, that error is handled and the remaining threads continue. """ service = self.create_service('web') next_number = service._next_container_number() service.create_container(number=next_number, quiet=True) with mock.patch( 'compose.container.Container.create', side_effect=APIError( message="testing", response={}, explanation="Boom")): with mock.patch('sys.stderr', new_callable=StringIO) as mock_stderr: with pytest.raises(OperationFailedError): service.scale(3) assert len(service.containers()) == 1 assert service.containers()[0].is_running assert "ERROR: for composetest_web_" in mock_stderr.getvalue() assert "Cannot create container for service web: Boom" in mock_stderr.getvalue() def test_scale_with_unexpected_exception(self): """Test that when scaling if the API returns an error, that is not of type APIError, that error is re-raised. """ service = self.create_service('web') next_number = service._next_container_number() service.create_container(number=next_number, quiet=True) with mock.patch( 'compose.container.Container.create', side_effect=ValueError("BOOM") ): with pytest.raises(ValueError): service.scale(3) assert len(service.containers()) == 1 assert service.containers()[0].is_running @mock.patch('compose.service.log') def test_scale_with_desired_number_already_achieved(self, mock_log): """ Test that calling scale with a desired number that is equal to the number of containers already running results in no change. """ service = self.create_service('web') next_number = service._next_container_number() container = service.create_container(number=next_number, quiet=True) container.start() container.inspect() assert container.is_running assert len(service.containers()) == 1 service.scale(1) assert len(service.containers()) == 1 container.inspect() assert container.is_running captured_output = mock_log.info.call_args[0] assert 'Desired container number already achieved' in captured_output @mock.patch('compose.service.log') def test_scale_with_custom_container_name_outputs_warning(self, mock_log): """Test that calling scale on a service that has a custom container name results in warning output. """ service = self.create_service('app', container_name='custom-container') assert service.custom_container_name == 'custom-container' with pytest.raises(OperationFailedError): service.scale(3) captured_output = mock_log.warning.call_args[0][0] assert len(service.containers()) == 1 assert "Remove the custom name to scale the service." in captured_output def test_scale_sets_ports(self): service = self.create_service('web', ports=['8000']) service.scale(2) containers = service.containers() assert len(containers) == 2 for container in containers: assert list(container.get('HostConfig.PortBindings')) == ['8000/tcp'] def test_scale_with_immediate_exit(self): service = self.create_service('web', image='busybox', command='true') service.scale(2) assert len(service.containers(stopped=True)) == 2 def test_network_mode_none(self): service = self.create_service('web', network_mode=NetworkMode('none')) container = create_and_start_container(service) assert container.get('HostConfig.NetworkMode') == 'none' def test_network_mode_bridged(self): service = self.create_service('web', network_mode=NetworkMode('bridge')) container = create_and_start_container(service) assert container.get('HostConfig.NetworkMode') == 'bridge' def test_network_mode_host(self): service = self.create_service('web', network_mode=NetworkMode('host')) container = create_and_start_container(service) assert container.get('HostConfig.NetworkMode') == 'host' def test_pid_mode_none_defined(self): service = self.create_service('web', pid_mode=None) container = create_and_start_container(service) assert container.get('HostConfig.PidMode') == '' def test_pid_mode_host(self): service = self.create_service('web', pid_mode=PidMode('host')) container = create_and_start_container(service) assert container.get('HostConfig.PidMode') == 'host' def test_ipc_mode_none_defined(self): service = self.create_service('web', ipc_mode=None) container = create_and_start_container(service) print(container.get('HostConfig.IpcMode')) assert container.get('HostConfig.IpcMode') == 'shareable' def test_ipc_mode_host(self): service = self.create_service('web', ipc_mode=IpcMode('host')) container = create_and_start_container(service) assert container.get('HostConfig.IpcMode') == 'host' def test_userns_mode_none_defined(self): service = self.create_service('web', userns_mode=None) container = create_and_start_container(service) assert container.get('HostConfig.UsernsMode') == '' def test_userns_mode_host(self): service = self.create_service('web', userns_mode='host') container = create_and_start_container(service) assert container.get('HostConfig.UsernsMode') == 'host' def test_dns_no_value(self): service = self.create_service('web') container = create_and_start_container(service) assert container.get('HostConfig.Dns') is None def test_dns_list(self): service = self.create_service('web', dns=['8.8.8.8', '9.9.9.9']) container = create_and_start_container(service) assert container.get('HostConfig.Dns') == ['8.8.8.8', '9.9.9.9'] def test_mem_swappiness(self): service = self.create_service('web', mem_swappiness=11) container = create_and_start_container(service) assert container.get('HostConfig.MemorySwappiness') == 11 def test_mem_reservation(self): service = self.create_service('web', mem_reservation='20m') container = create_and_start_container(service) assert container.get('HostConfig.MemoryReservation') == 20 * 1024 * 1024 def test_restart_always_value(self): service = self.create_service('web', restart={'Name': 'always'}) container = create_and_start_container(service) assert container.get('HostConfig.RestartPolicy.Name') == 'always' def test_oom_score_adj_value(self): service = self.create_service('web', oom_score_adj=500) container = create_and_start_container(service) assert container.get('HostConfig.OomScoreAdj') == 500 def test_group_add_value(self): service = self.create_service('web', group_add=["root", "1"]) container = create_and_start_container(service) host_container_groupadd = container.get('HostConfig.GroupAdd') assert "root" in host_container_groupadd assert "1" in host_container_groupadd def test_dns_opt_value(self): service = self.create_service('web', dns_opt=["use-vc", "no-tld-query"]) container = create_and_start_container(service) dns_opt = container.get('HostConfig.DnsOptions') assert 'use-vc' in dns_opt assert 'no-tld-query' in dns_opt def test_restart_on_failure_value(self): service = self.create_service('web', restart={ 'Name': 'on-failure', 'MaximumRetryCount': 5 }) container = create_and_start_container(service) assert container.get('HostConfig.RestartPolicy.Name') == 'on-failure' assert container.get('HostConfig.RestartPolicy.MaximumRetryCount') == 5 def test_cap_add_list(self): service = self.create_service('web', cap_add=['SYS_ADMIN', 'NET_ADMIN']) container = create_and_start_container(service) assert container.get('HostConfig.CapAdd') == ['SYS_ADMIN', 'NET_ADMIN'] def test_cap_drop_list(self): service = self.create_service('web', cap_drop=['SYS_ADMIN', 'NET_ADMIN']) container = create_and_start_container(service) assert container.get('HostConfig.CapDrop') == ['SYS_ADMIN', 'NET_ADMIN'] def test_dns_search(self): service = self.create_service('web', dns_search=['dc1.example.com', 'dc2.example.com']) container = create_and_start_container(service) assert container.get('HostConfig.DnsSearch') == ['dc1.example.com', 'dc2.example.com'] def test_tmpfs(self): service = self.create_service('web', tmpfs=['/run']) container = create_and_start_container(service) assert container.get('HostConfig.Tmpfs') == {'/run': ''} def test_working_dir_param(self): service = self.create_service('container', working_dir='/working/dir/sample') container = service.create_container() assert container.get('Config.WorkingDir') == '/working/dir/sample' def test_split_env(self): service = self.create_service( 'web', environment=['NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS=']) env = create_and_start_container(service).environment for k, v in {'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': ''}.items(): assert env[k] == v def test_env_from_file_combined_with_env(self): service = self.create_service( 'web', environment=['ONE=1', 'TWO=2', 'THREE=3'], env_file=['tests/fixtures/env/one.env', 'tests/fixtures/env/two.env']) env = create_and_start_container(service).environment for k, v in { 'ONE': '1', 'TWO': '2', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah' }.items(): assert env[k] == v def test_build_with_cachefrom(self): base_dir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, base_dir) with open(os.path.join(base_dir, 'Dockerfile'), 'w') as f: f.write("FROM busybox\n") service = self.create_service('cache_from', build={'context': base_dir, 'cache_from': ['build1']}) service.build() self.addCleanup(self.client.remove_image, service.image_name) assert service.image() @mock.patch.dict(os.environ) def test_resolve_env(self): os.environ['FILE_DEF'] = 'E1' os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['ENV_DEF'] = 'E3' service = self.create_service( 'web', environment={ 'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None } ) env = create_and_start_container(service).environment for k, v in { 'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None }.items(): assert env[k] == v def test_with_high_enough_api_version_we_get_default_network_mode(self): # TODO: remove this test once minimum docker version is 1.8.x with mock.patch.object(self.client, '_version', '1.20'): service = self.create_service('web') service_config = service._get_container_host_config({}) assert service_config['NetworkMode'] == 'default' def test_labels(self): labels_dict = { 'com.example.description': "Accounting webapp", 'com.example.department': "Finance", 'com.example.label-with-empty-value': "", } compose_labels = { LABEL_ONE_OFF: 'False', LABEL_PROJECT: 'composetest', LABEL_SERVICE: 'web', LABEL_VERSION: __version__, LABEL_CONTAINER_NUMBER: '1' } expected = dict(labels_dict, **compose_labels) service = self.create_service('web', labels=labels_dict) ctnr = create_and_start_container(service) labels = ctnr.labels.items() for pair in expected.items(): assert pair in labels def test_empty_labels(self): labels_dict = {'foo': '', 'bar': ''} service = self.create_service('web', labels=labels_dict) labels = create_and_start_container(service).labels.items() for name in labels_dict: assert (name, '') in labels def test_stop_signal(self): stop_signal = 'SIGINT' service = self.create_service('web', stop_signal=stop_signal) container = create_and_start_container(service) assert container.stop_signal == stop_signal def test_custom_container_name(self): service = self.create_service('web', container_name='my-web-container') assert service.custom_container_name == 'my-web-container' container = create_and_start_container(service) assert container.name == 'my-web-container' one_off_container = service.create_container(one_off=True) assert one_off_container.name != 'my-web-container' @pytest.mark.skipif(True, reason="Broken on 1.11.0 - 17.03.0") def test_log_drive_invalid(self): service = self.create_service('web', logging={'driver': 'xxx'}) expected_error_msg = "logger: no log driver named 'xxx' is registered" with pytest.raises(APIError) as excinfo: create_and_start_container(service) assert re.search(expected_error_msg, excinfo.value) def test_log_drive_empty_default_jsonfile(self): service = self.create_service('web') log_config = create_and_start_container(service).log_config assert 'json-file' == log_config['Type'] assert not log_config['Config'] def test_log_drive_none(self): service = self.create_service('web', logging={'driver': 'none'}) log_config = create_and_start_container(service).log_config assert 'none' == log_config['Type'] assert not log_config['Config'] def test_devices(self): service = self.create_service('web', devices=["/dev/random:/dev/mapped-random"]) device_config = create_and_start_container(service).get('HostConfig.Devices') device_dict = { 'PathOnHost': '/dev/random', 'CgroupPermissions': 'rwm', 'PathInContainer': '/dev/mapped-random' } assert 1 == len(device_config) assert device_dict == device_config[0] def test_duplicate_containers(self): service = self.create_service('web') options = service._get_container_create_options({}, service._next_container_number()) original = Container.create(service.client, **options) assert set(service.containers(stopped=True)) == {original} assert set(service.duplicate_containers()) == set() options['name'] = 'temporary_container_name' duplicate = Container.create(service.client, **options) assert set(service.containers(stopped=True)) == {original, duplicate} assert set(service.duplicate_containers()) == {duplicate} def converge(service, strategy=ConvergenceStrategy.changed): """Create a converge plan from a strategy and execute the plan.""" plan = service.convergence_plan(strategy) return service.execute_convergence_plan(plan, timeout=1) class ConfigHashTest(DockerClientTestCase): def test_no_config_hash_when_one_off(self): web = self.create_service('web') container = web.create_container(one_off=True) assert LABEL_CONFIG_HASH not in container.labels def test_no_config_hash_when_overriding_options(self): web = self.create_service('web') container = web.create_container(environment={'FOO': '1'}) assert LABEL_CONFIG_HASH not in container.labels def test_config_hash_with_custom_labels(self): web = self.create_service('web', labels={'foo': '1'}) container = converge(web)[0] assert LABEL_CONFIG_HASH in container.labels assert 'foo' in container.labels def test_config_hash_sticks_around(self): web = self.create_service('web', command=["top"]) container = converge(web)[0] assert LABEL_CONFIG_HASH in container.labels web = self.create_service('web', command=["top", "-d", "1"]) container = converge(web)[0] assert LABEL_CONFIG_HASH in container.labels compose-1.29.2/tests/integration/state_test.py000066400000000000000000000373061404620552300215020ustar00rootroot00000000000000""" Integration tests which cover state convergence (aka smart recreate) performed by `docker-compose up`. """ import copy import os import shutil import tempfile from docker.errors import ImageNotFound from ..helpers import BUSYBOX_IMAGE_WITH_TAG from .testcases import DockerClientTestCase from .testcases import get_links from .testcases import no_cluster from compose.config import config from compose.project import Project from compose.service import ConvergenceStrategy class ProjectTestCase(DockerClientTestCase): def run_up(self, cfg, **kwargs): kwargs.setdefault('timeout', 1) kwargs.setdefault('detached', True) project = self.make_project(cfg) project.up(**kwargs) return set(project.containers(stopped=True)) def make_project(self, cfg): details = config.ConfigDetails( 'working_dir', [config.ConfigFile(None, cfg)]) return Project.from_config( name='composetest', client=self.client, config_data=config.load(details)) class BasicProjectTest(ProjectTestCase): def setUp(self): super().setUp() self.cfg = { 'db': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'}, 'web': {'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top'}, } def test_no_change(self): old_containers = self.run_up(self.cfg) assert len(old_containers) == 2 new_containers = self.run_up(self.cfg) assert len(new_containers) == 2 assert old_containers == new_containers def test_partial_change(self): old_containers = self.run_up(self.cfg) old_db = [c for c in old_containers if c.name_without_project.startswith('db_')][0] old_web = [c for c in old_containers if c.name_without_project.startswith('web_')][0] self.cfg['web']['command'] = '/bin/true' new_containers = self.run_up(self.cfg) assert len(new_containers) == 2 preserved = list(old_containers & new_containers) assert preserved == [old_db] removed = list(old_containers - new_containers) assert removed == [old_web] created = list(new_containers - old_containers) assert len(created) == 1 assert created[0].name_without_project == old_web.name_without_project assert created[0].get('Config.Cmd') == ['/bin/true'] def test_all_change(self): old_containers = self.run_up(self.cfg) assert len(old_containers) == 2 self.cfg['web']['command'] = '/bin/true' self.cfg['db']['command'] = '/bin/true' new_containers = self.run_up(self.cfg) assert len(new_containers) == 2 unchanged = old_containers & new_containers assert len(unchanged) == 0 new = new_containers - old_containers assert len(new) == 2 class ProjectWithDependenciesTest(ProjectTestCase): def setUp(self): super().setUp() self.cfg = { 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', 'links': ['db'], }, 'nginx': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', 'links': ['web'], }, } def test_up(self): containers = self.run_up(self.cfg) assert {c.service for c in containers} == {'db', 'web', 'nginx'} def test_change_leaf(self): old_containers = self.run_up(self.cfg) self.cfg['nginx']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(self.cfg) assert {c.service for c in new_containers - old_containers} == {'nginx'} def test_change_middle(self): old_containers = self.run_up(self.cfg) self.cfg['web']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(self.cfg) assert {c.service for c in new_containers - old_containers} == {'web'} def test_change_middle_always_recreate_deps(self): old_containers = self.run_up(self.cfg, always_recreate_deps=True) self.cfg['web']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(self.cfg, always_recreate_deps=True) assert {c.service for c in new_containers - old_containers} == {'web', 'nginx'} def test_change_root(self): old_containers = self.run_up(self.cfg) self.cfg['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(self.cfg) assert {c.service for c in new_containers - old_containers} == {'db'} def test_change_root_always_recreate_deps(self): old_containers = self.run_up(self.cfg, always_recreate_deps=True) self.cfg['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(self.cfg, always_recreate_deps=True) assert {c.service for c in new_containers - old_containers} == { 'db', 'web', 'nginx' } def test_change_root_no_recreate(self): old_containers = self.run_up(self.cfg) self.cfg['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up( self.cfg, strategy=ConvergenceStrategy.never) assert new_containers - old_containers == set() def test_service_removed_while_down(self): next_cfg = { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', }, 'nginx': self.cfg['nginx'], } containers = self.run_up(self.cfg) assert len(containers) == 3 project = self.make_project(self.cfg) project.stop(timeout=1) containers = self.run_up(next_cfg) assert len(containers) == 2 def test_service_recreated_when_dependency_created(self): containers = self.run_up(self.cfg, service_names=['web'], start_deps=False) assert len(containers) == 1 containers = self.run_up(self.cfg) assert len(containers) == 3 web, = [c for c in containers if c.service == 'web'] nginx, = [c for c in containers if c.service == 'nginx'] db, = [c for c in containers if c.service == 'db'] assert set(get_links(web)) == { 'composetest_db_1', 'db', 'db_1', } assert set(get_links(nginx)) == { 'composetest_web_1', 'web', 'web_1', } class ProjectWithDependsOnDependenciesTest(ProjectTestCase): def setUp(self): super().setUp() self.cfg = { 'version': '2', 'services': { 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', }, 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', 'depends_on': ['db'], }, 'nginx': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'tail -f /dev/null', 'depends_on': ['web'], }, } } def test_up(self): local_cfg = copy.deepcopy(self.cfg) containers = self.run_up(local_cfg) assert {c.service for c in containers} == {'db', 'web', 'nginx'} def test_change_leaf(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg) local_cfg['services']['nginx']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(local_cfg) assert {c.service for c in new_containers - old_containers} == {'nginx'} def test_change_middle(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg) local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(local_cfg) assert {c.service for c in new_containers - old_containers} == {'web'} def test_change_middle_always_recreate_deps(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg, always_recreate_deps=True) local_cfg['services']['web']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(local_cfg, always_recreate_deps=True) assert {c.service for c in new_containers - old_containers} == {'web', 'nginx'} def test_change_root(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg) local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(local_cfg) assert {c.service for c in new_containers - old_containers} == {'db'} def test_change_root_always_recreate_deps(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg, always_recreate_deps=True) local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up(local_cfg, always_recreate_deps=True) assert {c.service for c in new_containers - old_containers} == {'db', 'web', 'nginx'} def test_change_root_no_recreate(self): local_cfg = copy.deepcopy(self.cfg) old_containers = self.run_up(local_cfg) local_cfg['services']['db']['environment'] = {'NEW_VAR': '1'} new_containers = self.run_up( local_cfg, strategy=ConvergenceStrategy.never) assert new_containers - old_containers == set() def test_service_removed_while_down(self): local_cfg = copy.deepcopy(self.cfg) next_cfg = copy.deepcopy(self.cfg) del next_cfg['services']['db'] del next_cfg['services']['web']['depends_on'] containers = self.run_up(local_cfg) assert {c.service for c in containers} == {'db', 'web', 'nginx'} project = self.make_project(local_cfg) project.stop(timeout=1) next_containers = self.run_up(next_cfg) assert {c.service for c in next_containers} == {'web', 'nginx'} def test_service_removed_while_up(self): local_cfg = copy.deepcopy(self.cfg) containers = self.run_up(local_cfg) assert {c.service for c in containers} == {'db', 'web', 'nginx'} del local_cfg['services']['db'] del local_cfg['services']['web']['depends_on'] containers = self.run_up(local_cfg) assert {c.service for c in containers} == {'web', 'nginx'} def test_dependency_removed(self): local_cfg = copy.deepcopy(self.cfg) next_cfg = copy.deepcopy(self.cfg) del next_cfg['services']['nginx']['depends_on'] containers = self.run_up(local_cfg, service_names=['nginx']) assert {c.service for c in containers} == {'db', 'web', 'nginx'} project = self.make_project(local_cfg) project.stop(timeout=1) next_containers = self.run_up(next_cfg, service_names=['nginx']) assert {c.service for c in next_containers if c.is_running} == {'nginx'} def test_dependency_added(self): local_cfg = copy.deepcopy(self.cfg) del local_cfg['services']['nginx']['depends_on'] containers = self.run_up(local_cfg, service_names=['nginx']) assert {c.service for c in containers} == {'nginx'} local_cfg['services']['nginx']['depends_on'] = ['db'] containers = self.run_up(local_cfg, service_names=['nginx']) assert {c.service for c in containers} == {'nginx', 'db'} class ServiceStateTest(DockerClientTestCase): """Test cases for Service.convergence_plan.""" def test_trigger_create(self): web = self.create_service('web') assert ('create', []) == web.convergence_plan() def test_trigger_noop(self): web = self.create_service('web') container = web.create_container() web.start() web = self.create_service('web') assert ('noop', [container]) == web.convergence_plan() def test_trigger_start(self): options = dict(command=["top"]) web = self.create_service('web', **options) web.scale(2) containers = web.containers(stopped=True) containers[0].stop() containers[0].inspect() assert [c.is_running for c in containers] == [False, True] assert ('start', containers) == web.convergence_plan() def test_trigger_recreate_with_config_change(self): web = self.create_service('web', command=["top"]) container = web.create_container() web = self.create_service('web', command=["top", "-d", "1"]) assert ('recreate', [container]) == web.convergence_plan() def test_trigger_recreate_with_nonexistent_image_tag(self): web = self.create_service('web', image=BUSYBOX_IMAGE_WITH_TAG) container = web.create_container() web = self.create_service('web', image="nonexistent-image") assert ('recreate', [container]) == web.convergence_plan() def test_trigger_recreate_with_image_change(self): repo = 'composetest_myimage' tag = 'latest' image = '{}:{}'.format(repo, tag) def safe_remove_image(image): try: self.client.remove_image(image) except ImageNotFound: pass image_id = self.client.images(name='busybox')[0]['Id'] self.client.tag(image_id, repository=repo, tag=tag) self.addCleanup(safe_remove_image, image) web = self.create_service('web', image=image) container = web.create_container() # update the image c = self.client.create_container(image, ['touch', '/hello.txt'], host_config={}) # In the case of a cluster, there's a chance we pick up the old image when # calculating the new hash. To circumvent that, untag the old image first # See also: https://github.com/moby/moby/issues/26852 self.client.remove_image(image, force=True) self.client.commit(c, repository=repo, tag=tag) self.client.remove_container(c) web = self.create_service('web', image=image) assert ('recreate', [container]) == web.convergence_plan() @no_cluster('Can not guarantee the build will be run on the same node the service is deployed') def test_trigger_recreate_with_build(self): context = tempfile.mkdtemp('test_trigger_recreate_with_build') self.addCleanup(shutil.rmtree, context) base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n" dockerfile = os.path.join(context, 'Dockerfile') with open(dockerfile, mode="w") as dockerfile_fh: dockerfile_fh.write(base_image) web = self.create_service('web', build={'context': str(context)}) container = web.create_container() with open(dockerfile, mode="w") as dockerfile_fh: dockerfile_fh.write(base_image + 'CMD echo hello world\n') web.build() web = self.create_service('web', build={'context': str(context)}) assert ('recreate', [container]) == web.convergence_plan() def test_image_changed_to_build(self): context = tempfile.mkdtemp('test_image_changed_to_build') self.addCleanup(shutil.rmtree, context) with open(os.path.join(context, 'Dockerfile'), mode="w") as dockerfile: dockerfile.write(""" FROM busybox LABEL com.docker.compose.test_image=true """) web = self.create_service('web', image='busybox') container = web.create_container() web = self.create_service('web', build={'context': str(context)}) plan = web.convergence_plan() assert ('recreate', [container]) == plan containers = web.execute_convergence_plan(plan) assert len(containers) == 1 compose-1.29.2/tests/integration/testcases.py000066400000000000000000000124521404620552300213140ustar00rootroot00000000000000import functools import os import pytest from docker.errors import APIError from docker.utils import version_lt from .. import unittest from ..helpers import BUSYBOX_IMAGE_WITH_TAG from compose.cli.docker_client import docker_client from compose.config.config import resolve_environment from compose.config.environment import Environment from compose.const import API_VERSIONS from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 from compose.const import LABEL_PROJECT from compose.progress_stream import stream_output from compose.service import Service SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0' SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0' SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0' SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0' def pull_busybox(client): client.pull(BUSYBOX_IMAGE_WITH_TAG, stream=False) def get_links(container): links = container.get('HostConfig.Links') or [] def format_link(link): _, alias = link.split(':') return alias.split('/')[-1] return [format_link(link) for link in links] def engine_max_version(): if 'DOCKER_VERSION' not in os.environ: return VERSION version = os.environ['DOCKER_VERSION'].partition('-')[0] if version_lt(version, '1.10'): return V1 return VERSION def min_version_skip(version): return pytest.mark.skipif( engine_max_version() < version, reason="Engine version %s is too low" % version ) class DockerClientTestCase(unittest.TestCase): @classmethod def setUpClass(cls): version = API_VERSIONS[engine_max_version()] cls.client = docker_client(Environment(), version) @classmethod def tearDownClass(cls): cls.client.close() del cls.client def tearDown(self): for c in self.client.containers( all=True, filters={'label': '%s=composetest' % LABEL_PROJECT}): self.client.remove_container(c['Id'], force=True) for i in self.client.images( filters={'label': 'com.docker.compose.test_image'}): try: self.client.remove_image(i, force=True) except APIError as e: if e.is_server_error(): pass volumes = self.client.volumes().get('Volumes') or [] for v in volumes: if 'composetest_' in v['Name']: self.client.remove_volume(v['Name']) networks = self.client.networks() for n in networks: if 'composetest_' in n['Name']: self.client.remove_network(n['Name']) def create_service(self, name, **kwargs): if 'image' not in kwargs and 'build' not in kwargs: kwargs['image'] = BUSYBOX_IMAGE_WITH_TAG if 'command' not in kwargs: kwargs['command'] = ["top"] kwargs['environment'] = resolve_environment( kwargs, Environment.from_env_file(None) ) labels = dict(kwargs.setdefault('labels', {})) labels['com.docker.compose.test-name'] = self.id() return Service(name, client=self.client, project='composetest', **kwargs) def check_build(self, *args, **kwargs): kwargs.setdefault('rm', True) build_output = self.client.build(*args, **kwargs) with open(os.devnull, 'w') as devnull: for event in stream_output(build_output, devnull): pass def require_api_version(self, minimum): api_version = self.client.version()['ApiVersion'] if version_lt(api_version, minimum): pytest.skip("API version is too low ({} < {})".format(api_version, minimum)) def get_volume_data(self, volume_name): if not is_cluster(self.client): return self.client.inspect_volume(volume_name) volumes = self.client.volumes(filters={'name': volume_name})['Volumes'] assert len(volumes) > 0 return self.client.inspect_volume(volumes[0]['Name']) def if_runtime_available(runtime): def decorator(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): if runtime not in self.client.info().get('Runtimes', {}): return pytest.skip("This daemon does not support the '{}'' runtime".format(runtime)) return f(self, *args, **kwargs) return wrapper return decorator def is_cluster(client): if SWARM_ASSUME_MULTINODE: return True def get_nodes_number(): try: return len(client.nodes()) except APIError: # If the Engine is not part of a Swarm, the SDK will raise # an APIError return 0 if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None: # Only make the API call if the value hasn't been cached yet is_cluster.nodes = get_nodes_number() return is_cluster.nodes > 1 def no_cluster(reason): def decorator(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): if is_cluster(self.client): pytest.skip("Test will not be run in cluster mode: %s" % reason) return return f(self, *args, **kwargs) return wrapper return decorator compose-1.29.2/tests/integration/volume_test.py000066400000000000000000000103151404620552300216600ustar00rootroot00000000000000from docker.errors import DockerException from .testcases import DockerClientTestCase from .testcases import no_cluster from compose.const import LABEL_PROJECT from compose.const import LABEL_VOLUME from compose.volume import Volume class VolumeTest(DockerClientTestCase): def setUp(self): self.tmp_volumes = [] def tearDown(self): for volume in self.tmp_volumes: try: self.client.remove_volume(volume.full_name) except DockerException: pass del self.tmp_volumes super().tearDown() def create_volume(self, name, driver=None, opts=None, external=None, custom_name=False): if external: custom_name = True if isinstance(external, str): name = external vol = Volume( self.client, 'composetest', name, driver=driver, driver_opts=opts, external=bool(external), custom_name=custom_name ) self.tmp_volumes.append(vol) return vol def test_create_volume(self): vol = self.create_volume('volume01') vol.create() info = self.get_volume_data(vol.full_name) assert info['Name'].split('/')[-1] == vol.full_name def test_create_volume_custom_name(self): vol = self.create_volume('volume01', custom_name=True) assert vol.name == vol.full_name vol.create() info = self.get_volume_data(vol.full_name) assert info['Name'].split('/')[-1] == vol.name def test_recreate_existing_volume(self): vol = self.create_volume('volume01') vol.create() info = self.get_volume_data(vol.full_name) assert info['Name'].split('/')[-1] == vol.full_name vol.create() info = self.get_volume_data(vol.full_name) assert info['Name'].split('/')[-1] == vol.full_name @no_cluster('inspect volume by name defect on Swarm Classic') def test_inspect_volume(self): vol = self.create_volume('volume01') vol.create() info = vol.inspect() assert info['Name'] == vol.full_name @no_cluster('remove volume by name defect on Swarm Classic') def test_remove_volume(self): vol = Volume(self.client, 'composetest', 'volume01') vol.create() vol.remove() volumes = self.client.volumes()['Volumes'] assert len([v for v in volumes if v['Name'] == vol.full_name]) == 0 @no_cluster('inspect volume by name defect on Swarm Classic') def test_external_volume(self): vol = self.create_volume('composetest_volume_ext', external=True) assert vol.external is True assert vol.full_name == vol.name vol.create() info = vol.inspect() assert info['Name'] == vol.name @no_cluster('inspect volume by name defect on Swarm Classic') def test_external_aliased_volume(self): alias_name = 'composetest_alias01' vol = self.create_volume('volume01', external=alias_name) assert vol.external is True assert vol.full_name == alias_name vol.create() info = vol.inspect() assert info['Name'] == alias_name @no_cluster('inspect volume by name defect on Swarm Classic') def test_exists(self): vol = self.create_volume('volume01') assert vol.exists() is False vol.create() assert vol.exists() is True @no_cluster('inspect volume by name defect on Swarm Classic') def test_exists_external(self): vol = self.create_volume('volume01', external=True) assert vol.exists() is False vol.create() assert vol.exists() is True @no_cluster('inspect volume by name defect on Swarm Classic') def test_exists_external_aliased(self): vol = self.create_volume('volume01', external='composetest_alias01') assert vol.exists() is False vol.create() assert vol.exists() is True @no_cluster('inspect volume by name defect on Swarm Classic') def test_volume_default_labels(self): vol = self.create_volume('volume01') vol.create() vol_data = vol.inspect() labels = vol_data['Labels'] assert labels[LABEL_VOLUME] == vol.name assert labels[LABEL_PROJECT] == vol.project compose-1.29.2/tests/unit/000077500000000000000000000000001404620552300153745ustar00rootroot00000000000000compose-1.29.2/tests/unit/__init__.py000066400000000000000000000000001404620552300174730ustar00rootroot00000000000000compose-1.29.2/tests/unit/cli/000077500000000000000000000000001404620552300161435ustar00rootroot00000000000000compose-1.29.2/tests/unit/cli/__init__.py000066400000000000000000000000001404620552300202420ustar00rootroot00000000000000compose-1.29.2/tests/unit/cli/colors_test.py000066400000000000000000000033021404620552300210530ustar00rootroot00000000000000import os import pytest from compose.cli.colors import AnsiMode from tests import mock @pytest.fixture def tty_stream(): stream = mock.Mock() stream.isatty.return_value = True return stream @pytest.fixture def non_tty_stream(): stream = mock.Mock() stream.isatty.return_value = False return stream class TestAnsiModeTestCase: @mock.patch.dict(os.environ) def test_ansi_mode_never(self, tty_stream, non_tty_stream): if "CLICOLOR" in os.environ: del os.environ["CLICOLOR"] assert not AnsiMode.NEVER.use_ansi_codes(tty_stream) assert not AnsiMode.NEVER.use_ansi_codes(non_tty_stream) os.environ["CLICOLOR"] = "0" assert not AnsiMode.NEVER.use_ansi_codes(tty_stream) assert not AnsiMode.NEVER.use_ansi_codes(non_tty_stream) @mock.patch.dict(os.environ) def test_ansi_mode_always(self, tty_stream, non_tty_stream): if "CLICOLOR" in os.environ: del os.environ["CLICOLOR"] assert AnsiMode.ALWAYS.use_ansi_codes(tty_stream) assert AnsiMode.ALWAYS.use_ansi_codes(non_tty_stream) os.environ["CLICOLOR"] = "0" assert AnsiMode.ALWAYS.use_ansi_codes(tty_stream) assert AnsiMode.ALWAYS.use_ansi_codes(non_tty_stream) @mock.patch.dict(os.environ) def test_ansi_mode_auto(self, tty_stream, non_tty_stream): if "CLICOLOR" in os.environ: del os.environ["CLICOLOR"] assert AnsiMode.AUTO.use_ansi_codes(tty_stream) assert not AnsiMode.AUTO.use_ansi_codes(non_tty_stream) os.environ["CLICOLOR"] = "0" assert not AnsiMode.AUTO.use_ansi_codes(tty_stream) assert not AnsiMode.AUTO.use_ansi_codes(non_tty_stream) compose-1.29.2/tests/unit/cli/command_test.py000066400000000000000000000044501404620552300211750ustar00rootroot00000000000000import os import pytest from compose.cli.command import get_config_path_from_options from compose.config.environment import Environment from compose.const import IS_WINDOWS_PLATFORM from tests import mock class TestGetConfigPathFromOptions: def test_path_from_options(self): paths = ['one.yml', 'two.yml'] opts = {'--file': paths} environment = Environment.from_env_file('.') assert get_config_path_from_options(opts, environment) == paths def test_single_path_from_env(self): with mock.patch.dict(os.environ): os.environ['COMPOSE_FILE'] = 'one.yml' environment = Environment.from_env_file('.') assert get_config_path_from_options({}, environment) == ['one.yml'] @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix separator') def test_multiple_path_from_env(self): with mock.patch.dict(os.environ): os.environ['COMPOSE_FILE'] = 'one.yml:two.yml' environment = Environment.from_env_file('.') assert get_config_path_from_options({}, environment) == ['one.yml', 'two.yml'] @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows separator') def test_multiple_path_from_env_windows(self): with mock.patch.dict(os.environ): os.environ['COMPOSE_FILE'] = 'one.yml;two.yml' environment = Environment.from_env_file('.') assert get_config_path_from_options({}, environment) == ['one.yml', 'two.yml'] def test_multiple_path_from_env_custom_separator(self): with mock.patch.dict(os.environ): os.environ['COMPOSE_PATH_SEPARATOR'] = '^' os.environ['COMPOSE_FILE'] = 'c:\\one.yml^.\\semi;colon.yml' environment = Environment.from_env_file('.') assert get_config_path_from_options({}, environment) == ['c:\\one.yml', '.\\semi;colon.yml'] def test_no_path(self): environment = Environment.from_env_file('.') assert not get_config_path_from_options({}, environment) def test_unicode_path_from_options(self): paths = [b'\xe5\xb0\xb1\xe5\x90\x83\xe9\xa5\xad/docker-compose.yml'] opts = {'--file': paths} environment = Environment.from_env_file('.') assert get_config_path_from_options(opts, environment) == ['就吃饭/docker-compose.yml'] compose-1.29.2/tests/unit/cli/docker_client_test.py000066400000000000000000000232101404620552300223570ustar00rootroot00000000000000import os import platform import ssl import docker import pytest from docker.constants import DEFAULT_DOCKER_API_VERSION import compose from compose.cli import errors from compose.cli.docker_client import docker_client from compose.cli.docker_client import get_tls_version from compose.cli.docker_client import tls_config_from_options from compose.config.environment import Environment from tests import mock from tests import unittest class DockerClientTestCase(unittest.TestCase): def test_docker_client_no_home(self): with mock.patch.dict(os.environ): try: del os.environ['HOME'] except KeyError: pass docker_client(os.environ, version=DEFAULT_DOCKER_API_VERSION) @mock.patch.dict(os.environ) def test_docker_client_with_custom_timeout(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ, version=DEFAULT_DOCKER_API_VERSION) assert client.timeout == 123 @mock.patch.dict(os.environ) def test_custom_timeout_error(self): os.environ['COMPOSE_HTTP_TIMEOUT'] = '123' client = docker_client(os.environ, version=DEFAULT_DOCKER_API_VERSION) with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.RequestsConnectionError( errors.ReadTimeoutError(None, None, None)) assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] with mock.patch('compose.cli.errors.log') as fake_log: with pytest.raises(errors.ConnectionError): with errors.handle_connection_errors(client): raise errors.ReadTimeout() assert fake_log.error.call_count == 1 assert '123' in fake_log.error.call_args[0][0] def test_user_agent(self): client = docker_client(os.environ, version=DEFAULT_DOCKER_API_VERSION) expected = "docker-compose/{} docker-py/{} {}/{}".format( compose.__version__, docker.__version__, platform.system(), platform.release() ) assert client.headers['User-Agent'] == expected class TLSConfigTestCase(unittest.TestCase): cert_path = 'tests/fixtures/tls/' ca_cert = os.path.join(cert_path, 'ca.pem') client_cert = os.path.join(cert_path, 'cert.pem') key = os.path.join(cert_path, 'key.pem') def test_simple_tls(self): options = {'--tls': True} result = tls_config_from_options(options) assert result is True def test_tls_ca_cert(self): options = { '--tlscacert': self.ca_cert, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_ca_cert_explicit(self): options = { '--tlscacert': self.ca_cert, '--tls': True, '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_cert(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_cert_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) def test_tls_client_and_ca(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_and_ca_explicit(self): options = { '--tlscert': self.client_cert, '--tlskey': self.key, '--tlsverify': True, '--tlscacert': self.ca_cert, '--tls': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (options['--tlscert'], options['--tlskey']) assert result.ca_cert == options['--tlscacert'] assert result.verify is True def test_tls_client_missing_key(self): options = {'--tlscert': self.client_cert} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) options = {'--tlskey': self.key} with pytest.raises(docker.errors.TLSParameterError): tls_config_from_options(options) def test_assert_hostname_explicit_skip(self): options = {'--tlscacert': self.ca_cert, '--skip-hostname-check': True} result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.assert_hostname is False def test_tls_client_and_ca_quoted_paths(self): options = { '--tlscacert': '"{}"'.format(self.ca_cert), '--tlscert': '"{}"'.format(self.client_cert), '--tlskey': '"{}"'.format(self.key), '--tlsverify': True } result = tls_config_from_options(options) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (self.client_cert, self.key) assert result.ca_cert == self.ca_cert assert result.verify is True def test_tls_simple_with_tls_version(self): tls_version = 'TLSv1' options = {'--tls': True} environment = Environment({'COMPOSE_TLS_VERSION': tls_version}) result = tls_config_from_options(options, environment) assert isinstance(result, docker.tls.TLSConfig) assert result.ssl_version == ssl.PROTOCOL_TLSv1 def test_tls_mixed_environment_and_flags(self): options = {'--tls': True, '--tlsverify': False} environment = Environment({'DOCKER_CERT_PATH': 'tests/fixtures/tls/'}) result = tls_config_from_options(options, environment) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (self.client_cert, self.key) assert result.ca_cert == self.ca_cert assert result.verify is False def test_tls_flags_override_environment(self): environment = Environment({ 'DOCKER_CERT_PATH': '/completely/wrong/path', 'DOCKER_TLS_VERIFY': 'false' }) options = { '--tlscacert': '"{}"'.format(self.ca_cert), '--tlscert': '"{}"'.format(self.client_cert), '--tlskey': '"{}"'.format(self.key), '--tlsverify': True } result = tls_config_from_options(options, environment) assert isinstance(result, docker.tls.TLSConfig) assert result.cert == (self.client_cert, self.key) assert result.ca_cert == self.ca_cert assert result.verify is True def test_tls_verify_flag_no_override(self): environment = Environment({ 'DOCKER_TLS_VERIFY': 'true', 'COMPOSE_TLS_VERSION': 'TLSv1', 'DOCKER_CERT_PATH': self.cert_path }) options = {'--tls': True, '--tlsverify': False} result = tls_config_from_options(options, environment) assert isinstance(result, docker.tls.TLSConfig) assert result.ssl_version == ssl.PROTOCOL_TLSv1 # verify is a special case - since `--tlsverify` = False means it # wasn't used, we set it if either the environment or the flag is True # see https://github.com/docker/compose/issues/5632 assert result.verify is True def test_tls_verify_env_falsy_value(self): environment = Environment({'DOCKER_TLS_VERIFY': '0'}) options = {'--tls': True} assert tls_config_from_options(options, environment) is True def test_tls_verify_default_cert_path(self): environment = Environment({'DOCKER_TLS_VERIFY': '1'}) options = {'--tls': True} with mock.patch('compose.cli.docker_client.default_cert_path') as dcp: dcp.return_value = 'tests/fixtures/tls/' result = tls_config_from_options(options, environment) assert isinstance(result, docker.tls.TLSConfig) assert result.verify is True assert result.ca_cert == self.ca_cert assert result.cert == (self.client_cert, self.key) class TestGetTlsVersion: def test_get_tls_version_default(self): environment = {} assert get_tls_version(environment) is None @pytest.mark.skipif(not hasattr(ssl, 'PROTOCOL_TLSv1_2'), reason='TLS v1.2 unsupported') def test_get_tls_version_upgrade(self): environment = {'COMPOSE_TLS_VERSION': 'TLSv1_2'} assert get_tls_version(environment) == ssl.PROTOCOL_TLSv1_2 def test_get_tls_version_unavailable(self): environment = {'COMPOSE_TLS_VERSION': 'TLSv5_5'} with mock.patch('compose.cli.docker_client.log') as mock_log: tls_version = get_tls_version(environment) mock_log.warning.assert_called_once_with(mock.ANY) assert tls_version is None compose-1.29.2/tests/unit/cli/errors_test.py000066400000000000000000000077131404620552300211000ustar00rootroot00000000000000import pytest from docker.errors import APIError from requests.exceptions import ConnectionError from compose.cli import errors from compose.cli.errors import handle_connection_errors from compose.const import IS_WINDOWS_PLATFORM from tests import mock @pytest.yield_fixture def mock_logging(): with mock.patch('compose.cli.errors.log', autospec=True) as mock_log: yield mock_log def patch_find_executable(side_effect): return mock.patch( 'compose.cli.errors.find_executable', autospec=True, side_effect=side_effect) class TestHandleConnectionErrors: def test_generic_connection_error(self, mock_logging): with pytest.raises(errors.ConnectionError): with patch_find_executable(['/bin/docker', None]): with handle_connection_errors(mock.Mock()): raise ConnectionError() _, args, _ = mock_logging.error.mock_calls[0] assert "Couldn't connect to Docker daemon" in args[0] def test_api_error_version_mismatch(self, mock_logging): with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.38')): raise APIError(None, None, b"client is newer than server") _, args, _ = mock_logging.error.mock_calls[0] assert "Docker Engine of version 18.06.0 or greater" in args[0] def test_api_error_version_mismatch_unicode_explanation(self, mock_logging): with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.38')): raise APIError(None, None, "client is newer than server") _, args, _ = mock_logging.error.mock_calls[0] assert "Docker Engine of version 18.06.0 or greater" in args[0] def test_api_error_version_other(self, mock_logging): msg = b"Something broke!" with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.22')): raise APIError(None, None, msg) mock_logging.error.assert_called_once_with(msg.decode('utf-8')) def test_api_error_version_other_unicode_explanation(self, mock_logging): msg = "Something broke!" with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.22')): raise APIError(None, None, msg) mock_logging.error.assert_called_once_with(msg) @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32') def test_windows_pipe_error_no_data(self, mock_logging): import pywintypes with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.22')): raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.') _, args, _ = mock_logging.error.mock_calls[0] assert "The current Compose file version is not compatible with your engine version." in args[0] @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32') def test_windows_pipe_error_misc(self, mock_logging): import pywintypes with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.22')): raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.') _, args, _ = mock_logging.error.mock_calls[0] assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0] @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32') def test_windows_pipe_error_encoding_issue(self, mock_logging): import pywintypes with pytest.raises(errors.ConnectionError): with handle_connection_errors(mock.Mock(api_version='1.22')): raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9') _, args, _ = mock_logging.error.mock_calls[0] assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0] compose-1.29.2/tests/unit/cli/formatter_test.py000066400000000000000000000033761404620552300215700ustar00rootroot00000000000000import logging from compose.cli import colors from compose.cli.formatter import ConsoleWarningFormatter from tests import unittest MESSAGE = 'this is the message' def make_log_record(level, message=None): return logging.LogRecord('name', level, 'pathame', 0, message or MESSAGE, (), None) class ConsoleWarningFormatterTestCase(unittest.TestCase): def setUp(self): self.formatter = ConsoleWarningFormatter() def test_format_warn(self): output = self.formatter.format(make_log_record(logging.WARN)) expected = colors.yellow('WARNING') + ': ' assert output == expected + MESSAGE def test_format_error(self): output = self.formatter.format(make_log_record(logging.ERROR)) expected = colors.red('ERROR') + ': ' assert output == expected + MESSAGE def test_format_info(self): output = self.formatter.format(make_log_record(logging.INFO)) assert output == MESSAGE def test_format_unicode_info(self): message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95' output = self.formatter.format(make_log_record(logging.INFO, message)) assert output == message.decode('utf-8') def test_format_unicode_warn(self): message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95' output = self.formatter.format(make_log_record(logging.WARN, message)) expected = colors.yellow('WARNING') + ': ' assert output == '{}{}'.format(expected, message.decode('utf-8')) def test_format_unicode_error(self): message = b'\xec\xa0\x95\xec\x88\x98\xec\xa0\x95' output = self.formatter.format(make_log_record(logging.ERROR, message)) expected = colors.red('ERROR') + ': ' assert output == '{}{}'.format(expected, message.decode('utf-8')) compose-1.29.2/tests/unit/cli/log_printer_test.py000066400000000000000000000143241404620552300221040ustar00rootroot00000000000000import itertools from io import StringIO from queue import Queue import pytest import requests from docker.errors import APIError from compose.cli.log_printer import build_log_generator from compose.cli.log_printer import build_log_presenters from compose.cli.log_printer import consume_queue from compose.cli.log_printer import QueueItem from compose.cli.log_printer import wait_on_exit from compose.cli.log_printer import watch_events from compose.container import Container from tests import mock @pytest.fixture def output_stream(): output = StringIO() output.flush = mock.Mock() return output @pytest.fixture def mock_container(): return mock.Mock(spec=Container, name_without_project='web_1') class TestLogPresenter: def test_monochrome(self, mock_container): presenters = build_log_presenters(['foo', 'bar'], True) presenter = next(presenters) actual = presenter.present(mock_container, "this line") assert actual == "web_1 | this line" def test_polychrome(self, mock_container): presenters = build_log_presenters(['foo', 'bar'], False) presenter = next(presenters) actual = presenter.present(mock_container, "this line") assert '\033[' in actual def test_wait_on_exit(): exit_status = 3 mock_container = mock.Mock( spec=Container, name='cname', wait=mock.Mock(return_value=exit_status)) expected = '{} exited with code {}\n'.format(mock_container.name, exit_status) assert expected == wait_on_exit(mock_container) def test_wait_on_exit_raises(): status_code = 500 def mock_wait(): resp = requests.Response() resp.status_code = status_code raise APIError('Bad server', resp) mock_container = mock.Mock( spec=Container, name='cname', wait=mock_wait ) expected = 'Unexpected API error for {} (HTTP code {})\n'.format( mock_container.name, status_code, ) assert expected in wait_on_exit(mock_container) class TestBuildLogGenerator: def test_no_log_stream(self, mock_container): mock_container.log_stream = None mock_container.logs.return_value = iter([b"hello\nworld"]) log_args = {'follow': True} generator = build_log_generator(mock_container, log_args) assert next(generator) == "hello\n" assert next(generator) == "world" mock_container.logs.assert_called_once_with( stdout=True, stderr=True, stream=True, **log_args) def test_with_log_stream(self, mock_container): mock_container.log_stream = iter([b"hello\nworld"]) log_args = {'follow': True} generator = build_log_generator(mock_container, log_args) assert next(generator) == "hello\n" assert next(generator) == "world" def test_unicode(self, output_stream): glyph = '\u2022\n' mock_container.log_stream = iter([glyph.encode('utf-8')]) generator = build_log_generator(mock_container, {}) assert next(generator) == glyph @pytest.fixture def thread_map(): return {'cid': mock.Mock()} @pytest.fixture def mock_presenters(): return itertools.cycle([mock.Mock()]) class TestWatchEvents: def test_stop_event(self, thread_map, mock_presenters): event_stream = [{'action': 'stop', 'id': 'cid'}] watch_events(thread_map, event_stream, mock_presenters, ()) assert not thread_map def test_start_event(self, thread_map, mock_presenters): container_id = 'abcd' event = {'action': 'start', 'id': container_id, 'container': mock.Mock()} event_stream = [event] thread_args = 'foo', 'bar' with mock.patch( 'compose.cli.log_printer.build_thread', autospec=True ) as mock_build_thread: watch_events(thread_map, event_stream, mock_presenters, thread_args) mock_build_thread.assert_called_once_with( event['container'], next(mock_presenters), *thread_args) assert container_id in thread_map def test_container_attach_event(self, thread_map, mock_presenters): container_id = 'abcd' mock_container = mock.Mock(is_restarting=False) mock_container.attach_log_stream.side_effect = APIError("race condition") event_die = {'action': 'die', 'id': container_id} event_start = {'action': 'start', 'id': container_id, 'container': mock_container} event_stream = [event_die, event_start] thread_args = 'foo', 'bar' watch_events(thread_map, event_stream, mock_presenters, thread_args) assert mock_container.attach_log_stream.called def test_other_event(self, thread_map, mock_presenters): container_id = 'abcd' event_stream = [{'action': 'create', 'id': container_id}] watch_events(thread_map, event_stream, mock_presenters, ()) assert container_id not in thread_map class TestConsumeQueue: def test_item_is_an_exception(self): class Problem(Exception): pass queue = Queue() error = Problem('oops') for item in QueueItem.new('a'), QueueItem.new('b'), QueueItem.exception(error): queue.put(item) generator = consume_queue(queue, False) assert next(generator) == 'a' assert next(generator) == 'b' with pytest.raises(Problem): next(generator) def test_item_is_stop_without_cascade_stop(self): queue = Queue() for item in QueueItem.stop(), QueueItem.new('a'), QueueItem.new('b'): queue.put(item) generator = consume_queue(queue, False) assert next(generator) == 'a' assert next(generator) == 'b' def test_item_is_stop_with_cascade_stop(self): """Return the name of the container that caused the cascade_stop""" queue = Queue() for item in QueueItem.stop('foobar-1'), QueueItem.new('a'), QueueItem.new('b'): queue.put(item) generator = consume_queue(queue, True) assert next(generator) == 'foobar-1' def test_item_is_none_when_timeout_is_hit(self): queue = Queue() generator = consume_queue(queue, False) assert next(generator) is None compose-1.29.2/tests/unit/cli/main_test.py000066400000000000000000000214761404620552300205120ustar00rootroot00000000000000import logging import docker import pytest from compose import container from compose.cli.errors import UserError from compose.cli.formatter import ConsoleWarningFormatter from compose.cli.main import build_one_off_container_options from compose.cli.main import call_docker from compose.cli.main import convergence_strategy_from_opts from compose.cli.main import filter_attached_containers from compose.cli.main import get_docker_start_call from compose.cli.main import setup_console_handler from compose.cli.main import warn_for_swarm_mode from compose.service import ConvergenceStrategy from tests import mock def mock_container(service, number): return mock.create_autospec( container.Container, service=service, number=number, name_without_project='{}_{}'.format(service, number)) @pytest.fixture def logging_handler(): stream = mock.Mock() stream.isatty.return_value = True return logging.StreamHandler(stream=stream) class TestCLIMainTestCase: def test_filter_attached_containers(self): containers = [ mock_container('web', 1), mock_container('web', 2), mock_container('db', 1), mock_container('other', 1), mock_container('another', 1), ] service_names = ['web', 'db'] actual = filter_attached_containers(containers, service_names) assert actual == containers[:3] def test_filter_attached_containers_with_dependencies(self): containers = [ mock_container('web', 1), mock_container('web', 2), mock_container('db', 1), mock_container('other', 1), mock_container('another', 1), ] service_names = ['web', 'db'] actual = filter_attached_containers(containers, service_names, attach_dependencies=True) assert actual == containers def test_filter_attached_containers_all(self): containers = [ mock_container('web', 1), mock_container('db', 1), mock_container('other', 1), ] service_names = [] actual = filter_attached_containers(containers, service_names) assert actual == containers def test_warning_in_swarm_mode(self): mock_client = mock.create_autospec(docker.APIClient) mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}} with mock.patch('compose.cli.main.log') as fake_log: warn_for_swarm_mode(mock_client) assert fake_log.warning.call_count == 1 def test_build_one_off_container_options(self): command = 'build myservice' detach = False options = { '-e': ['MYVAR=MYVALUE'], '-T': True, '--label': ['MYLABEL'], '--entrypoint': 'bash', '--user': 'MYUSER', '--service-ports': [], '--publish': '', '--name': 'MYNAME', '--workdir': '.', '--volume': [], 'stdin_open': False, } expected_container_options = { 'command': command, 'tty': False, 'stdin_open': False, 'detach': detach, 'entrypoint': 'bash', 'environment': {'MYVAR': 'MYVALUE'}, 'labels': {'MYLABEL': ''}, 'name': 'MYNAME', 'ports': [], 'restart': None, 'user': 'MYUSER', 'working_dir': '.', } container_options = build_one_off_container_options(options, detach, command) assert container_options == expected_container_options def test_get_docker_start_call(self): container_id = 'my_container_id' mock_container_options = {'detach': False, 'stdin_open': True} expected_docker_start_call = ['start', '--attach', '--interactive', container_id] docker_start_call = get_docker_start_call(mock_container_options, container_id) assert expected_docker_start_call == docker_start_call mock_container_options = {'detach': False, 'stdin_open': False} expected_docker_start_call = ['start', '--attach', container_id] docker_start_call = get_docker_start_call(mock_container_options, container_id) assert expected_docker_start_call == docker_start_call mock_container_options = {'detach': True, 'stdin_open': True} expected_docker_start_call = ['start', '--interactive', container_id] docker_start_call = get_docker_start_call(mock_container_options, container_id) assert expected_docker_start_call == docker_start_call mock_container_options = {'detach': True, 'stdin_open': False} expected_docker_start_call = ['start', container_id] docker_start_call = get_docker_start_call(mock_container_options, container_id) assert expected_docker_start_call == docker_start_call class TestSetupConsoleHandlerTestCase: def test_with_console_formatter_verbose(self, logging_handler): setup_console_handler(logging_handler, True) assert type(logging_handler.formatter) == ConsoleWarningFormatter assert '%(name)s' in logging_handler.formatter._fmt assert '%(funcName)s' in logging_handler.formatter._fmt def test_with_console_formatter_not_verbose(self, logging_handler): setup_console_handler(logging_handler, False) assert type(logging_handler.formatter) == ConsoleWarningFormatter assert '%(name)s' not in logging_handler.formatter._fmt assert '%(funcName)s' not in logging_handler.formatter._fmt def test_without_console_formatter(self, logging_handler): setup_console_handler(logging_handler, False, use_console_formatter=False) assert type(logging_handler.formatter) == logging.Formatter class TestConvergeStrategyFromOptsTestCase: def test_invalid_opts(self): options = {'--force-recreate': True, '--no-recreate': True} with pytest.raises(UserError): convergence_strategy_from_opts(options) def test_always(self): options = {'--force-recreate': True, '--no-recreate': False} assert ( convergence_strategy_from_opts(options) == ConvergenceStrategy.always ) def test_never(self): options = {'--force-recreate': False, '--no-recreate': True} assert ( convergence_strategy_from_opts(options) == ConvergenceStrategy.never ) def test_changed(self): options = {'--force-recreate': False, '--no-recreate': False} assert ( convergence_strategy_from_opts(options) == ConvergenceStrategy.changed ) def mock_find_executable(exe): return exe @mock.patch('compose.cli.main.find_executable', mock_find_executable) class TestCallDocker: def test_simple_no_options(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {}, {}) assert fake_call.call_args[0][0] == ['docker', 'ps'] def test_simple_tls_option(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {'--tls': True}, {}) assert fake_call.call_args[0][0] == ['docker', '--tls', 'ps'] def test_advanced_tls_options(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], { '--tls': True, '--tlscacert': './ca.pem', '--tlscert': './cert.pem', '--tlskey': './key.pem', }, {}) assert fake_call.call_args[0][0] == [ 'docker', '--tls', '--tlscacert', './ca.pem', '--tlscert', './cert.pem', '--tlskey', './key.pem', 'ps' ] def test_with_host_option(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {'--host': 'tcp://mydocker.net:2333'}, {}) assert fake_call.call_args[0][0] == [ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps' ] def test_with_http_host(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {'--host': 'http://mydocker.net:2333'}, {}) assert fake_call.call_args[0][0] == [ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps', ] def test_with_host_option_shorthand_equal(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {'--host': '=tcp://mydocker.net:2333'}, {}) assert fake_call.call_args[0][0] == [ 'docker', '--host', 'tcp://mydocker.net:2333', 'ps' ] def test_with_env(self): with mock.patch('subprocess.call') as fake_call: call_docker(['ps'], {}, {'DOCKER_HOST': 'tcp://mydocker.net:2333'}) assert fake_call.call_args[0][0] == [ 'docker', 'ps' ] assert fake_call.call_args[1]['env'] == {'DOCKER_HOST': 'tcp://mydocker.net:2333'} compose-1.29.2/tests/unit/cli/utils_test.py000066400000000000000000000030531404620552300207150ustar00rootroot00000000000000import unittest from compose.cli.utils import human_readable_file_size from compose.utils import unquote_path class UnquotePathTest(unittest.TestCase): def test_no_quotes(self): assert unquote_path('hello') == 'hello' def test_simple_quotes(self): assert unquote_path('"hello"') == 'hello' def test_uneven_quotes(self): assert unquote_path('"hello') == '"hello' assert unquote_path('hello"') == 'hello"' def test_nested_quotes(self): assert unquote_path('""hello""') == '"hello"' assert unquote_path('"hel"lo"') == 'hel"lo' assert unquote_path('"hello""') == 'hello"' class HumanReadableFileSizeTest(unittest.TestCase): def test_100b(self): assert human_readable_file_size(100) == '100 B' def test_1kb(self): assert human_readable_file_size(1000) == '1 kB' assert human_readable_file_size(1024) == '1.024 kB' def test_1023b(self): assert human_readable_file_size(1023) == '1.023 kB' def test_999b(self): assert human_readable_file_size(999) == '999 B' def test_units(self): assert human_readable_file_size((10 ** 3) ** 0) == '1 B' assert human_readable_file_size((10 ** 3) ** 1) == '1 kB' assert human_readable_file_size((10 ** 3) ** 2) == '1 MB' assert human_readable_file_size((10 ** 3) ** 3) == '1 GB' assert human_readable_file_size((10 ** 3) ** 4) == '1 TB' assert human_readable_file_size((10 ** 3) ** 5) == '1 PB' assert human_readable_file_size((10 ** 3) ** 6) == '1 EB' compose-1.29.2/tests/unit/cli/verbose_proxy_test.py000066400000000000000000000015371404620552300224700ustar00rootroot00000000000000from compose.cli import verbose_proxy from tests import unittest class VerboseProxyTestCase(unittest.TestCase): def test_format_call(self): prefix = '' expected = "(%(p)s'arg1', True, key=%(p)s'value')" % dict(p=prefix) actual = verbose_proxy.format_call( ("arg1", True), {'key': 'value'}) assert expected == actual def test_format_return_sequence(self): expected = "(list with 10 items)" actual = verbose_proxy.format_return(list(range(10)), 2) assert expected == actual def test_format_return(self): expected = repr({'Id': 'ok'}) actual = verbose_proxy.format_return({'Id': 'ok'}, 2) assert expected == actual def test_format_return_no_result(self): actual = verbose_proxy.format_return(None, 2) assert actual is None compose-1.29.2/tests/unit/cli_test.py000066400000000000000000000244731404620552300175660ustar00rootroot00000000000000import os import shutil import tempfile from io import StringIO import docker import py import pytest from docker.constants import DEFAULT_DOCKER_API_VERSION from .. import mock from .. import unittest from ..helpers import build_config from compose.cli.command import get_project from compose.cli.command import get_project_name from compose.cli.docopt_command import NoSuchCommand from compose.cli.errors import UserError from compose.cli.main import TopLevelCommand from compose.config.environment import Environment from compose.const import IS_WINDOWS_PLATFORM from compose.const import LABEL_SERVICE from compose.container import Container from compose.project import Project class CLITestCase(unittest.TestCase): def test_default_project_name(self): test_dir = py._path.local.LocalPath('tests/fixtures/simple-composefile') with test_dir.as_cwd(): project_name = get_project_name('.') assert 'simple-composefile' == project_name def test_project_name_with_explicit_base_dir(self): base_dir = 'tests/fixtures/simple-composefile' project_name = get_project_name(base_dir) assert 'simple-composefile' == project_name def test_project_name_with_explicit_uppercase_base_dir(self): base_dir = 'tests/fixtures/UpperCaseDir' project_name = get_project_name(base_dir) assert 'uppercasedir' == project_name def test_project_name_with_explicit_project_name(self): name = 'explicit-project-name' project_name = get_project_name(None, project_name=name) assert 'explicit-project-name' == project_name @mock.patch.dict(os.environ) def test_project_name_from_environment_new_var(self): name = 'namefromenv' os.environ['COMPOSE_PROJECT_NAME'] = name project_name = get_project_name(None) assert project_name == name def test_project_name_with_empty_environment_var(self): base_dir = 'tests/fixtures/simple-composefile' with mock.patch.dict(os.environ): os.environ['COMPOSE_PROJECT_NAME'] = '' project_name = get_project_name(base_dir) assert 'simple-composefile' == project_name @mock.patch.dict(os.environ) def test_project_name_with_environment_file(self): base_dir = tempfile.mkdtemp() try: name = 'namefromenvfile' with open(os.path.join(base_dir, '.env'), 'w') as f: f.write('COMPOSE_PROJECT_NAME={}'.format(name)) project_name = get_project_name(base_dir) assert project_name == name # Environment has priority over .env file os.environ['COMPOSE_PROJECT_NAME'] = 'namefromenv' assert get_project_name(base_dir) == os.environ['COMPOSE_PROJECT_NAME'] finally: shutil.rmtree(base_dir) def test_get_project(self): base_dir = 'tests/fixtures/longer-filename-composefile' env = Environment.from_env_file(base_dir) env['COMPOSE_API_VERSION'] = DEFAULT_DOCKER_API_VERSION project = get_project(base_dir, environment=env) assert project.name == 'longer-filename-composefile' assert project.client assert project.services def test_command_help(self): with mock.patch('sys.stdout', new=StringIO()) as fake_stdout: TopLevelCommand.help({'COMMAND': 'up'}) assert "Usage: up" in fake_stdout.getvalue() def test_command_help_nonexistent(self): with pytest.raises(NoSuchCommand): TopLevelCommand.help({'COMMAND': 'nonexistent'}) @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason="requires dockerpty") @mock.patch('compose.cli.main.RunOperation', autospec=True) @mock.patch('compose.cli.main.PseudoTerminal', autospec=True) @mock.patch('compose.service.Container.create') @mock.patch.dict(os.environ) def test_run_interactive_passes_logs_false( self, mock_container_create, mock_pseudo_terminal, mock_run_operation, ): os.environ['COMPOSE_INTERACTIVE_NO_CLI'] = 'true' mock_client = mock.create_autospec(docker.APIClient) mock_client.api_version = DEFAULT_DOCKER_API_VERSION mock_client._general_configs = {} mock_container_create.return_value = Container(mock_client, { 'Id': '37b35e0ba80d91009d37e16f249b32b84f72bda269985578ed6c75a0a13fcaa8', 'Config': { 'Labels': { LABEL_SERVICE: 'service', } }, }, has_been_inspected=True) project = Project.from_config( name='composetest', client=mock_client, config_data=build_config({ 'service': {'image': 'busybox'} }), ) command = TopLevelCommand(project) with pytest.raises(SystemExit): command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], '--label': [], '--user': None, '--no-deps': None, '--detach': False, '-T': None, '--entrypoint': None, '--service-ports': None, '--use-aliases': None, '--publish': [], '--volume': [], '--rm': None, '--name': None, '--workdir': None, }) _, _, call_kwargs = mock_run_operation.mock_calls[0] assert call_kwargs['logs'] is False @mock.patch('compose.service.Container.create') def test_run_service_with_restart_always(self, mock_container_create): mock_client = mock.create_autospec(docker.APIClient) mock_client.api_version = DEFAULT_DOCKER_API_VERSION mock_client._general_configs = {} mock_container_create.return_value = Container(mock_client, { 'Id': '37b35e0ba80d91009d37e16f249b32b84f72bda269985578ed6c75a0a13fcaa8', 'Name': 'composetest_service_37b35', 'Config': { 'Labels': { LABEL_SERVICE: 'service', } }, }, has_been_inspected=True) project = Project.from_config( name='composetest', client=mock_client, config_data=build_config({ 'service': { 'image': 'busybox', 'restart': 'always', } }), ) command = TopLevelCommand(project) command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], '--label': [], '--user': None, '--no-deps': None, '--detach': True, '-T': None, '--entrypoint': None, '--service-ports': None, '--use-aliases': None, '--publish': [], '--volume': [], '--rm': None, '--name': None, '--workdir': None, }) # NOTE: The "run" command is supposed to be a one-off tool; therefore restart policy "no" # (the default) is enforced despite explicit wish for "always" in the project # configuration file assert not mock_client.create_host_config.call_args[1].get('restart_policy') command = TopLevelCommand(project) command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], '--label': [], '--user': None, '--no-deps': None, '--detach': True, '-T': None, '--entrypoint': None, '--service-ports': None, '--use-aliases': None, '--publish': [], '--volume': [], '--rm': True, '--name': None, '--workdir': None, }) assert not mock_client.create_host_config.call_args[1].get('restart_policy') @mock.patch('compose.project.Project.up') @mock.patch.dict(os.environ) def test_run_up_with_docker_cli_build(self, mock_project_up): os.environ['COMPOSE_DOCKER_CLI_BUILD'] = '1' mock_client = mock.create_autospec(docker.APIClient) mock_client.api_version = DEFAULT_DOCKER_API_VERSION mock_client._general_configs = {} container = Container(mock_client, { 'Id': '37b35e0ba80d91009d37e16f249b32b84f72bda269985578ed6c75a0a13fcaa8', 'Name': 'composetest_service_37b35', 'Config': { 'Labels': { LABEL_SERVICE: 'service', } }, }, has_been_inspected=True) mock_project_up.return_value = [container] project = Project.from_config( name='composetest', config_data=build_config({ 'service': {'image': 'busybox'} }), client=mock_client, ) command = TopLevelCommand(project) command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], '--label': [], '--user': None, '--no-deps': None, '--detach': True, '-T': None, '--entrypoint': None, '--service-ports': None, '--use-aliases': None, '--publish': [], '--volume': [], '--rm': None, '--name': None, '--workdir': None, }) _, _, call_kwargs = mock_project_up.mock_calls[0] assert call_kwargs.get('cli') def test_command_manual_and_service_ports_together(self): project = Project.from_config( name='composetest', client=None, config_data=build_config({ 'service': {'image': 'busybox'}, }), ) command = TopLevelCommand(project) with pytest.raises(UserError): command.run({ 'SERVICE': 'service', 'COMMAND': None, '-e': [], '--label': [], '--user': None, '--no-deps': None, '--detach': True, '-T': None, '--entrypoint': None, '--service-ports': True, '--use-aliases': None, '--publish': ['80:80'], '--rm': None, '--name': None, }) compose-1.29.2/tests/unit/config/000077500000000000000000000000001404620552300166415ustar00rootroot00000000000000compose-1.29.2/tests/unit/config/__init__.py000066400000000000000000000000001404620552300207400ustar00rootroot00000000000000compose-1.29.2/tests/unit/config/config_test.py000066400000000000000000005566351404620552300215430ustar00rootroot00000000000000import codecs import os import shutil import tempfile from operator import itemgetter from random import shuffle import pytest import yaml from ddt import data from ddt import ddt from ...helpers import build_config_details from ...helpers import BUSYBOX_IMAGE_WITH_TAG from ...helpers import cd from compose.config import config from compose.config import types from compose.config.config import ConfigFile from compose.config.config import resolve_build_args from compose.config.config import resolve_environment from compose.config.environment import Environment from compose.config.errors import ConfigurationError from compose.config.errors import VERSION_EXPLANATION from compose.config.serialize import denormalize_service_dict from compose.config.serialize import serialize_config from compose.config.serialize import serialize_ns_time_value from compose.config.types import VolumeSpec from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 from compose.const import IS_WINDOWS_PLATFORM from tests import mock from tests import unittest DEFAULT_VERSION = VERSION def make_service_dict(name, service_dict, working_dir='.', filename=None): """Test helper function to construct a ServiceExtendsResolver """ resolver = config.ServiceExtendsResolver( config.ServiceConfig( working_dir=working_dir, filename=filename, name=name, config=service_dict), config.ConfigFile(filename=filename, config={}), environment=Environment.from_env_file(working_dir) ) return config.process_service(resolver.run()) def service_sort(services): return sorted(services, key=itemgetter('name')) def secret_sort(secrets): return sorted(secrets, key=itemgetter('source')) @ddt class ConfigTest(unittest.TestCase): def test_load(self): service_dicts = config.load( build_config_details( { 'services': { 'foo': {'image': 'busybox'}, 'bar': {'image': 'busybox', 'environment': ['FOO=1']}, } }, 'tests/fixtures/extends', 'common.yml' ) ).services assert service_sort(service_dicts) == service_sort([ { 'name': 'bar', 'image': 'busybox', 'environment': {'FOO': '1'}, }, { 'name': 'foo', 'image': 'busybox', } ]) def test_load_v2(self): config_data = config.load( build_config_details({ 'version': '2', 'services': { 'foo': {'image': 'busybox'}, 'bar': {'image': 'busybox', 'environment': ['FOO=1']}, }, 'volumes': { 'hello': { 'driver': 'default', 'driver_opts': {'beep': 'boop'} } }, 'networks': { 'default': { 'driver': 'bridge', 'driver_opts': {'beep': 'boop'} }, 'with_ipam': { 'ipam': { 'driver': 'default', 'config': [ {'subnet': '172.28.0.0/16'} ] } }, 'internal': { 'driver': 'bridge', 'internal': True } } }, 'working_dir', 'filename.yml') ) service_dicts = config_data.services volume_dict = config_data.volumes networks_dict = config_data.networks assert service_sort(service_dicts) == service_sort([ { 'name': 'bar', 'image': 'busybox', 'environment': {'FOO': '1'}, }, { 'name': 'foo', 'image': 'busybox', } ]) assert volume_dict == { 'hello': { 'driver': 'default', 'driver_opts': {'beep': 'boop'} } } assert networks_dict == { 'default': { 'driver': 'bridge', 'driver_opts': {'beep': 'boop'} }, 'with_ipam': { 'ipam': { 'driver': 'default', 'config': [ {'subnet': '172.28.0.0/16'} ] } }, 'internal': { 'driver': 'bridge', 'internal': True } } def test_valid_versions(self): cfg = config.load( build_config_details({ 'services': { 'foo': {'image': 'busybox'}, 'bar': {'image': 'busybox', 'environment': ['FOO=1']}, } }) ) assert cfg.config_version == VERSION assert cfg.version == VERSION for version in ['2', '2.0', '2.1', '2.2', '2.3', '3', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8']: cfg = config.load(build_config_details({'version': version})) assert cfg.config_version == version assert cfg.version == VERSION def test_v1_file_version(self): cfg = config.load(build_config_details({'web': {'image': 'busybox'}})) assert cfg.version == V1 assert list(s['name'] for s in cfg.services) == ['web'] cfg = config.load(build_config_details({'version': {'image': 'busybox'}})) assert cfg.version == V1 assert list(s['name'] for s in cfg.services) == ['version'] def test_wrong_version_type(self): for version in [1, 2, 2.0]: with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( {'version': version}, filename='filename.yml', ) ) assert 'Version in "filename.yml" is invalid - it should be a string.' \ in excinfo.exconly() def test_unsupported_version(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( {'version': '1'}, filename='filename.yml', ) ) assert 'Version in "filename.yml" is invalid' in excinfo.exconly() assert VERSION_EXPLANATION in excinfo.exconly() def test_version_1_is_invalid(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '1', 'web': {'image': 'busybox'}, }, filename='filename.yml', ) ) assert 'Version in "filename.yml" is invalid' in excinfo.exconly() assert VERSION_EXPLANATION in excinfo.exconly() def test_v1_file_with_version_is_invalid(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '2', 'web': {'image': 'busybox'}, }, filename='filename.yml', ) ) assert "compose.config.errors.ConfigurationError: " \ "The Compose file 'filename.yml' is invalid because:\n" \ "'web' does not match any of the regexes: '^x-'" in excinfo.exconly() assert VERSION_EXPLANATION in excinfo.exconly() def test_named_volume_config_empty(self): config_details = build_config_details({ 'version': '2', 'services': { 'simple': {'image': 'busybox'} }, 'volumes': { 'simple': None, 'other': {}, } }) config_result = config.load(config_details) volumes = config_result.volumes assert 'simple' in volumes assert volumes['simple'] == {} assert volumes['other'] == {} def test_named_volume_numeric_driver_opt(self): config_details = build_config_details({ 'version': '2', 'services': { 'simple': {'image': 'busybox'} }, 'volumes': { 'simple': {'driver_opts': {'size': 42}}, } }) cfg = config.load(config_details) assert cfg.volumes['simple']['driver_opts']['size'] == '42' def test_volume_invalid_driver_opt(self): config_details = build_config_details({ 'version': '2', 'services': { 'simple': {'image': 'busybox'} }, 'volumes': { 'simple': {'driver_opts': {'size': True}}, } }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert 'driver_opts.size contains an invalid type' in exc.exconly() def test_named_volume_invalid_type_list(self): config_details = build_config_details({ 'version': '2', 'services': { 'simple': {'image': 'busybox'} }, 'volumes': [] }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "volume must be a mapping, not an array" in exc.exconly() def test_networks_invalid_type_list(self): config_details = build_config_details({ 'version': '2', 'services': { 'simple': {'image': 'busybox'} }, 'networks': [] }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "network must be a mapping, not an array" in exc.exconly() def test_load_service_with_name_version(self): with mock.patch('compose.config.config.log') as mock_logging: config_data = config.load( build_config_details({ 'version': { 'image': 'busybox' } }, 'working_dir', 'filename.yml') ) assert 'Unexpected type for "version" key in "filename.yml"' \ in mock_logging.warning.call_args[0][0] service_dicts = config_data.services assert service_sort(service_dicts) == service_sort([ { 'name': 'version', 'image': 'busybox', } ]) def test_load_throws_error_when_not_dict(self): with pytest.raises(ConfigurationError): config.load( build_config_details( {'web': BUSYBOX_IMAGE_WITH_TAG}, 'working_dir', 'filename.yml' ) ) def test_load_throws_error_when_not_dict_v2(self): with pytest.raises(ConfigurationError): config.load( build_config_details( {'version': '2', 'services': {'web': BUSYBOX_IMAGE_WITH_TAG}}, 'working_dir', 'filename.yml' ) ) def test_load_throws_error_with_invalid_network_fields(self): with pytest.raises(ConfigurationError): config.load( build_config_details({ 'version': '2', 'services': {'web': BUSYBOX_IMAGE_WITH_TAG}, 'networks': { 'invalid': {'foo', 'bar'} } }, 'working_dir', 'filename.yml') ) def test_load_config_link_local_ips_network(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': 'example/web', 'networks': { 'foobar': { 'aliases': ['foo', 'bar'], 'link_local_ips': ['169.254.8.8'] } } } }, 'networks': {'foobar': {}} } ) details = config.ConfigDetails('.', [base_file]) web_service = config.load(details).services[0] assert web_service['networks'] == { 'foobar': { 'aliases': ['foo', 'bar'], 'link_local_ips': ['169.254.8.8'] } } def test_load_config_service_labels(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2.1', 'services': { 'web': { 'image': 'example/web', 'labels': ['label_key=label_val'] }, 'db': { 'image': 'example/db', 'labels': { 'label_key': 'label_val' } } }, } ) details = config.ConfigDetails('.', [base_file]) service_dicts = config.load(details).services for service in service_dicts: assert service['labels'] == { 'label_key': 'label_val' } def test_load_config_custom_resource_names(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.5', 'volumes': { 'abc': { 'name': 'xyz' } }, 'networks': { 'abc': { 'name': 'xyz' } }, 'secrets': { 'abc': { 'name': 'xyz' } }, 'configs': { 'abc': { 'name': 'xyz' } } } ) details = config.ConfigDetails('.', [base_file]) loaded_config = config.load(details) assert loaded_config.networks['abc'] == {'name': 'xyz'} assert loaded_config.volumes['abc'] == {'name': 'xyz'} assert loaded_config.secrets['abc']['name'] == 'xyz' assert loaded_config.configs['abc']['name'] == 'xyz' def test_load_config_volume_and_network_labels(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2.1', 'services': { 'web': { 'image': 'example/web', }, }, 'networks': { 'with_label': { 'labels': { 'label_key': 'label_val' } } }, 'volumes': { 'with_label': { 'labels': { 'label_key': 'label_val' } } } } ) details = config.ConfigDetails('.', [base_file]) loaded_config = config.load(details) assert loaded_config.networks == { 'with_label': { 'labels': { 'label_key': 'label_val' } } } assert loaded_config.volumes == { 'with_label': { 'labels': { 'label_key': 'label_val' } } } def test_load_config_invalid_service_names(self): for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': '2', 'services': { invalid_name: { 'image': 'busybox' } } })) assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly() def test_load_config_invalid_service_names_v2(self): for invalid_name in ['?not?allowed', ' ', '', '!', '/', '\xe2']: with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': '2', 'services': {invalid_name: {'image': 'busybox'}}, })) assert 'Invalid service name \'%s\'' % invalid_name in exc.exconly() def test_load_with_invalid_field_name(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': '2', 'services': { 'web': {'image': 'busybox', 'name': 'bogus'}, } }, 'working_dir', 'filename.yml', )) assert "Unsupported config option for services.web: 'name'" in exc.exconly() def test_load_with_invalid_field_name_v1(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': '2', 'services': { 'web': {'image': 'busybox', 'name': 'bogus'} } }, 'working_dir', 'filename.yml', )) assert "Unsupported config option for services.web: 'name'" in exc.exconly() def test_load_invalid_service_definition(self): config_details = build_config_details( { 'version': '2', 'services': { 'web': 'wrong' } }, 'working_dir', 'filename.yml') with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "service 'web' must be a mapping not a string." in exc.exconly() def test_load_with_empty_build_args(self): config_details = build_config_details( { 'version': '2', 'services': { 'web': { 'build': { 'context': os.getcwd(), 'args': None, }, }, }, } ) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert ( "services.web.build.args contains an invalid type, it should be an " "object, or an array" in exc.exconly() ) def test_config_integer_service_name_raise_validation_error(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '2', 'services': {1: {'image': 'busybox'}} }, 'working_dir', 'filename.yml' ) ) assert ( "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'" in excinfo.exconly() ) def test_config_integer_service_name_raise_validation_error_v2(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '2', 'services': {1: {'image': 'busybox'}} }, 'working_dir', 'filename.yml' ) ) assert ( "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in excinfo.exconly() ) def test_config_integer_service_name_raise_validation_error_v2_when_no_interpolate(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '2', 'services': {1: {'image': 'busybox'}} }, 'working_dir', 'filename.yml' ), interpolate=False ) assert ( "In file 'filename.yml', the service name 1 must be a quoted string, i.e. '1'." in excinfo.exconly() ) def test_config_integer_service_property_raise_validation_error(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details({ 'version': '2.1', 'services': {'foobar': {'image': 'busybox', 1234: 'hah'}} }, 'working_dir', 'filename.yml') ) assert ( "Unsupported config option for services.foobar: '1234'" in excinfo.exconly() ) def test_config_invalid_service_name_raise_validation_error(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details({ 'version': '2', 'services': { 'test_app': {'build': '.'}, 'mong\\o': {'image': 'mongo'}, } }) ) assert 'Invalid service name \'mong\\o\'' in excinfo.exconly() def test_config_duplicate_cache_from_values_no_validation_error(self): with pytest.raises(ConfigurationError) as exc: config.load( build_config_details({ 'version': '2.3', 'services': { 'test': {'build': {'context': '.', 'cache_from': ['a', 'b', 'a']}} } }) ) assert 'build.cache_from contains non-unique items' not in exc.exconly() def test_load_with_multiple_files_v1(self): base_file = config.ConfigFile( 'base.yaml', { 'web': { 'image': 'example/web', 'links': ['db'], }, 'db': { 'image': 'example/db', }, }) override_file = config.ConfigFile( 'override.yaml', { 'web': { 'build': '/', 'volumes': ['/home/user/project:/code'], }, }) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'build': {'context': os.path.abspath('/')}, 'volumes': [VolumeSpec.parse('/home/user/project:/code')], 'links': ['db'], }, { 'name': 'db', 'image': 'example/db', }, ] assert service_sort(service_dicts) == service_sort(expected) def test_load_with_multiple_files_and_empty_override(self): base_file = config.ConfigFile( 'base.yml', {'web': {'image': 'example/web'}}) override_file = config.ConfigFile('override.yml', None) details = config.ConfigDetails('.', [base_file, override_file]) with pytest.raises(ConfigurationError) as exc: config.load(details) error_msg = "Top level object in 'override.yml' needs to be an object" assert error_msg in exc.exconly() def test_load_with_multiple_files_and_empty_override_v2(self): base_file = config.ConfigFile( 'base.yml', {'version': '2', 'services': {'web': {'image': 'example/web'}}}) override_file = config.ConfigFile('override.yml', None) details = config.ConfigDetails('.', [base_file, override_file]) with pytest.raises(ConfigurationError) as exc: config.load(details) error_msg = "Top level object in 'override.yml' needs to be an object" assert error_msg in exc.exconly() def test_load_with_multiple_files_and_empty_base(self): base_file = config.ConfigFile('base.yml', None) override_file = config.ConfigFile( 'override.yml', {'web': {'image': 'example/web'}}) details = config.ConfigDetails('.', [base_file, override_file]) with pytest.raises(ConfigurationError) as exc: config.load(details) assert "Top level object in 'base.yml' needs to be an object" in exc.exconly() def test_load_with_multiple_files_and_empty_base_v2(self): base_file = config.ConfigFile('base.yml', None) override_file = config.ConfigFile( 'override.tml', {'version': '2', 'services': {'web': {'image': 'example/web'}}} ) details = config.ConfigDetails('.', [base_file, override_file]) with pytest.raises(ConfigurationError) as exc: config.load(details) assert "Top level object in 'base.yml' needs to be an object" in exc.exconly() def test_load_with_multiple_files_and_extends_in_override_file(self): base_file = config.ConfigFile( 'base.yaml', { 'web': {'image': 'example/web'}, }) override_file = config.ConfigFile( 'override.yaml', { 'web': { 'extends': { 'file': 'common.yml', 'service': 'base', }, 'volumes': ['/home/user/project:/code'], }, }) details = config.ConfigDetails('.', [base_file, override_file]) tmpdir = tempfile.mkdtemp('config_test') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'common.yml'), mode="w") as common_fh: common_fh.write(""" base: labels: ['label=one'] """) with cd(tmpdir): service_dicts = config.load(details).services expected = [ { 'name': 'web', 'image': 'example/web', 'volumes': [VolumeSpec.parse('/home/user/project:/code')], 'labels': {'label': 'one'}, }, ] assert service_sort(service_dicts) == service_sort(expected) def test_load_mixed_extends_resolution(self): main_file = config.ConfigFile( 'main.yml', { 'version': '2.2', 'services': { 'prodweb': { 'extends': { 'service': 'web', 'file': 'base.yml' }, 'environment': {'PROD': 'true'}, }, }, } ) tmpdir = tempfile.mkdtemp('config_test') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh: base_fh.write(""" version: '2.2' services: base: image: base web: extends: base """) details = config.ConfigDetails('.', [main_file]) with cd(tmpdir): service_dicts = config.load(details).services assert service_dicts[0] == { 'name': 'prodweb', 'image': 'base', 'environment': {'PROD': 'true'}, } def test_load_with_multiple_files_and_invalid_override(self): base_file = config.ConfigFile( 'base.yaml', {'version': '2', 'services': {'web': {'image': 'example/web'}}}) override_file = config.ConfigFile( 'override.yaml', {'version': '2', 'services': {'bogus': 'thing'}}) details = config.ConfigDetails('.', [base_file, override_file]) with pytest.raises(ConfigurationError) as exc: config.load(details) assert "service 'bogus' must be a mapping not a string." in exc.exconly() assert "In file 'override.yaml'" in exc.exconly() def test_load_sorts_in_dependency_order(self): config_details = build_config_details({ 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'links': ['db'], }, 'db': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': ['volume:ro'] }, 'volume': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': ['/tmp'], } }) services = config.load(config_details).services assert services[0]['name'] == 'volume' assert services[1]['name'] == 'db' assert services[2]['name'] == 'web' def test_load_with_extensions(self): config_details = build_config_details({ 'version': '2.3', 'x-data': { 'lambda': 3, 'excess': [True, {}] } }) config_data = config.load(config_details) assert config_data.services == [] def test_config_build_configuration(self): service = config.load( build_config_details( {'web': { 'build': '.', 'dockerfile': 'Dockerfile-alt' }}, 'tests/fixtures/extends', 'filename.yml' ) ).services assert 'context' in service[0]['build'] assert service[0]['build']['dockerfile'] == 'Dockerfile-alt' def test_config_build_configuration_v2(self): # service.dockerfile is invalid in v2 with pytest.raises(ConfigurationError): config.load( build_config_details( { 'version': '2', 'services': { 'web': { 'build': '.', 'dockerfile': 'Dockerfile-alt' } } }, 'tests/fixtures/extends', 'filename.yml' ) ) service = config.load( build_config_details({ 'version': '2', 'services': { 'web': { 'build': '.' } } }, 'tests/fixtures/extends', 'filename.yml') ).services[0] assert 'context' in service['build'] service = config.load( build_config_details( { 'version': '2', 'services': { 'web': { 'build': { 'context': '.', 'dockerfile': 'Dockerfile-alt' } } } }, 'tests/fixtures/extends', 'filename.yml' ) ).services assert 'context' in service[0]['build'] assert service[0]['build']['dockerfile'] == 'Dockerfile-alt' def test_load_with_buildargs(self): service = config.load( build_config_details( { 'version': '2', 'services': { 'web': { 'build': { 'context': '.', 'dockerfile': 'Dockerfile-alt', 'args': { 'opt1': 42, 'opt2': 'foobar' } } } } }, 'tests/fixtures/extends', 'filename.yml' ) ).services[0] assert 'args' in service['build'] assert 'opt1' in service['build']['args'] assert isinstance(service['build']['args']['opt1'], str) assert service['build']['args']['opt1'] == '42' assert service['build']['args']['opt2'] == 'foobar' def test_load_build_labels_dict(self): service = config.load( build_config_details( { 'services': { 'web': { 'build': { 'context': '.', 'dockerfile': 'Dockerfile-alt', 'labels': { 'label1': 42, 'label2': 'foobar' } } } } }, 'tests/fixtures/extends', 'filename.yml' ) ).services[0] assert 'labels' in service['build'] assert 'label1' in service['build']['labels'] assert service['build']['labels']['label1'] == '42' assert service['build']['labels']['label2'] == 'foobar' def test_load_build_labels_list(self): base_file = config.ConfigFile( 'base.yml', { 'version': '2.3', 'services': { 'web': { 'build': { 'context': '.', 'labels': ['foo=bar', 'baz=true', 'foobar=1'] }, }, }, } ) details = config.ConfigDetails('.', [base_file]) service = config.load(details).services[0] assert service['build']['labels'] == { 'foo': 'bar', 'baz': 'true', 'foobar': '1' } def test_build_args_allow_empty_properties(self): service = config.load( build_config_details( { 'version': '2', 'services': { 'web': { 'build': { 'context': '.', 'dockerfile': 'Dockerfile-alt', 'args': { 'foo': None } } } } }, 'tests/fixtures/extends', 'filename.yml' ) ).services[0] assert 'args' in service['build'] assert 'foo' in service['build']['args'] assert service['build']['args']['foo'] == '' # If build argument is None then it will be converted to the empty # string. Make sure that int zero kept as it is, i.e. not converted to # the empty string def test_build_args_check_zero_preserved(self): service = config.load( build_config_details( { 'version': '2', 'services': { 'web': { 'build': { 'context': '.', 'dockerfile': 'Dockerfile-alt', 'args': { 'foo': 0 } } } } }, 'tests/fixtures/extends', 'filename.yml' ) ).services[0] assert 'args' in service['build'] assert 'foo' in service['build']['args'] assert service['build']['args']['foo'] == '0' def test_load_with_multiple_files_mismatched_networks_format(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': 'example/web', 'networks': { 'foobar': {'aliases': ['foo', 'bar']} } } }, 'networks': {'foobar': {}, 'baz': {}} } ) override_file = config.ConfigFile( 'override.yaml', { 'version': '2', 'services': { 'web': { 'networks': ['baz'] } } } ) details = config.ConfigDetails('.', [base_file, override_file]) web_service = config.load(details).services[0] assert web_service['networks'] == { 'foobar': {'aliases': ['bar', 'foo']}, 'baz': {} } def test_load_with_multiple_files_mismatched_networks_format_inverse_order(self): base_file = config.ConfigFile( 'override.yaml', { 'version': '2', 'services': { 'web': { 'networks': ['baz'] } } } ) override_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': 'example/web', 'networks': { 'foobar': {'aliases': ['foo', 'bar']} } } }, 'networks': {'foobar': {}, 'baz': {}} } ) details = config.ConfigDetails('.', [base_file, override_file]) web_service = config.load(details).services[0] assert web_service['networks'] == { 'foobar': {'aliases': ['bar', 'foo']}, 'baz': {} } def test_load_with_multiple_files_v2(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': 'example/web', 'depends_on': ['db'], }, 'db': { 'image': 'example/db', } }, }) override_file = config.ConfigFile( 'override.yaml', { 'version': '2', 'services': { 'web': { 'build': '/', 'volumes': ['/home/user/project:/code'], 'depends_on': ['other'], }, 'other': { 'image': 'example/other', } } }) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'build': {'context': os.path.abspath('/')}, 'image': 'example/web', 'volumes': [VolumeSpec.parse('/home/user/project:/code')], 'depends_on': { 'db': {'condition': 'service_started'}, 'other': {'condition': 'service_started'}, }, }, { 'name': 'db', 'image': 'example/db', }, { 'name': 'other', 'image': 'example/other', }, ] assert service_sort(service_dicts) == service_sort(expected) @mock.patch.dict(os.environ) def test_load_with_multiple_files_v3_2(self): os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true' base_file = config.ConfigFile( 'base.yaml', { 'version': '3.2', 'services': { 'web': { 'image': 'example/web', 'volumes': [ {'source': '/a', 'target': '/b', 'type': 'bind'}, {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True} ], 'stop_grace_period': '30s', } }, 'volumes': {'vol': {}} } ) override_file = config.ConfigFile( 'override.yaml', { 'version': '3.2', 'services': { 'web': { 'volumes': ['/c:/b', '/anonymous'] } } } ) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services svc_volumes = map(lambda v: v.repr(), service_dicts[0]['volumes']) for vol in svc_volumes: assert vol in [ '/anonymous', '/c:/b:rw', {'source': 'vol', 'target': '/x', 'type': 'volume', 'read_only': True} ] assert service_dicts[0]['stop_grace_period'] == '30s' @mock.patch.dict(os.environ) def test_volume_mode_override(self): os.environ['COMPOSE_CONVERT_WINDOWS_PATHS'] = 'true' base_file = config.ConfigFile( 'base.yaml', { 'version': '2.3', 'services': { 'web': { 'image': 'example/web', 'volumes': ['/c:/b:rw'] } }, } ) override_file = config.ConfigFile( 'override.yaml', { 'version': '2.3', 'services': { 'web': { 'volumes': ['/c:/b:ro'] } } } ) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services svc_volumes = list(map(lambda v: v.repr(), service_dicts[0]['volumes'])) assert svc_volumes == ['/c:/b:ro'] def test_undeclared_volume_v2(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': ['data0028:/data:ro'], }, }, } ) details = config.ConfigDetails('.', [base_file]) with pytest.raises(ConfigurationError): config.load(details) base_file = config.ConfigFile( 'base.yaml', { 'version': '2', 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': ['./data0028:/data:ro'], }, }, } ) details = config.ConfigDetails('.', [base_file]) config_data = config.load(details) volume = config_data.services[0].get('volumes')[0] assert not volume.is_named_volume def test_undeclared_volume_v1(self): base_file = config.ConfigFile( 'base.yaml', { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': ['data0028:/data:ro'], }, } ) details = config.ConfigDetails('.', [base_file]) config_data = config.load(details) volume = config_data.services[0].get('volumes')[0] assert volume.external == 'data0028' assert volume.is_named_volume def test_volumes_long_syntax(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '2.3', 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': [ { 'target': '/anonymous', 'type': 'volume' }, { 'source': '/abc', 'target': '/xyz', 'type': 'bind' }, { 'source': '\\\\.\\pipe\\abcd', 'target': '/named_pipe', 'type': 'npipe' }, { 'type': 'tmpfs', 'target': '/tmpfs' } ] }, }, }, ) details = config.ConfigDetails('.', [base_file]) config_data = config.load(details) volumes = config_data.services[0].get('volumes') anon_volume = [v for v in volumes if v.target == '/anonymous'][0] tmpfs_mount = [v for v in volumes if v.type == 'tmpfs'][0] host_mount = [v for v in volumes if v.type == 'bind'][0] npipe_mount = [v for v in volumes if v.type == 'npipe'][0] assert anon_volume.type == 'volume' assert not anon_volume.is_named_volume assert tmpfs_mount.target == '/tmpfs' assert not tmpfs_mount.is_named_volume assert host_mount.source == '/abc' assert host_mount.target == '/xyz' assert not host_mount.is_named_volume assert npipe_mount.source == '\\\\.\\pipe\\abcd' assert npipe_mount.target == '/named_pipe' assert not npipe_mount.is_named_volume def test_load_bind_mount_relative_path(self): expected_source = 'C:\\tmp\\web' if IS_WINDOWS_PLATFORM else '/tmp/web' base_file = config.ConfigFile( 'base.yaml', { 'version': '3.4', 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': [ {'type': 'bind', 'source': './web', 'target': '/web'}, ], }, }, }, ) details = config.ConfigDetails('/tmp', [base_file]) config_data = config.load(details) mount = config_data.services[0].get('volumes')[0] assert mount.target == '/web' assert mount.type == 'bind' assert mount.source == expected_source def test_load_bind_mount_relative_path_with_tilde(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.4', 'services': { 'web': { 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes': [ {'type': 'bind', 'source': '~/web', 'target': '/web'}, ], }, }, }, ) details = config.ConfigDetails('.', [base_file]) config_data = config.load(details) mount = config_data.services[0].get('volumes')[0] assert mount.target == '/web' assert mount.type == 'bind' assert ( not mount.source.startswith('~') and mount.source.endswith( '{}web'.format(os.path.sep) ) ) def test_config_invalid_ipam_config(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'networks': { 'foo': { 'driver': 'default', 'ipam': { 'driver': 'default', 'config': ['172.18.0.0/16'], } } } }, filename='filename.yml', ) ) assert ('networks.foo.ipam.config contains an invalid type,' ' it should be an object') in excinfo.exconly() def test_config_valid_ipam_config(self): ipam_config = { 'subnet': '172.28.0.0/16', 'ip_range': '172.28.5.0/24', 'gateway': '172.28.5.254', 'aux_addresses': { 'host1': '172.28.1.5', 'host2': '172.28.1.6', 'host3': '172.28.1.7', }, } networks = config.load( build_config_details( { 'networks': { 'foo': { 'driver': 'default', 'ipam': { 'driver': 'default', 'config': [ipam_config], } } } }, filename='filename.yml', ) ).networks assert 'foo' in networks assert networks['foo']['ipam']['config'] == [ipam_config] def test_config_valid_service_names(self): for valid_name in ['_', '-', '.__.', '_what-up.', 'what_.up----', 'whatup']: services = config.load( build_config_details( {valid_name: {'image': 'busybox'}}, 'tests/fixtures/extends', 'common.yml')).services assert services[0]['name'] == valid_name def test_config_hint(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'foo': {'image': 'busybox', 'privilige': 'something'}, } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "(did you mean 'privileged'?)" in excinfo.exconly() def test_load_errors_on_uppercase_with_no_image(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details({ 'Foo': {'build': '.'}, }, 'tests/fixtures/build-ctx')) assert "Service 'Foo' contains uppercase characters" in exc.exconly() def test_invalid_config_v1(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'foo': {'image': 1}, } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "foo.image contains an invalid type, it should be a string" \ in excinfo.exconly() def test_invalid_config_v2(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '2', 'services': { 'foo': {'image': 1}, }, }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "services.foo.image contains an invalid type, it should be a string" \ in excinfo.exconly() def test_invalid_config_build_and_image_specified_v1(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'foo': {'image': 'busybox', 'build': '.'}, }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "foo has both an image and build path specified." in excinfo.exconly() def test_invalid_config_type_should_be_an_array(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'foo': {'image': 'busybox', 'links': 'an_link'}, } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "foo.links contains an invalid type, it should be an array" \ in excinfo.exconly() def test_invalid_config_not_a_dictionary(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( ['foo', 'lol'], 'tests/fixtures/extends', 'filename.yml' ) ) assert "Top level object in 'filename.yml' needs to be an object" \ in excinfo.exconly() def test_invalid_config_not_unique_items(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': {'build': '.', 'devices': ['/dev/foo:/dev/foo', '/dev/foo:/dev/foo']} } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "has non-unique elements" in excinfo.exconly() def test_invalid_list_of_strings_format(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': {'build': '.', 'command': [1]} } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "web.command contains 1, which is an invalid type, it should be a string" \ in excinfo.exconly() def test_load_config_dockerfile_without_build_raises_error_v1(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details({ 'web': { 'image': 'busybox', 'dockerfile': 'Dockerfile.alt' } })) assert "web has both an image and alternate Dockerfile." in exc.exconly() def test_config_extra_hosts_string_raises_validation_error(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'extra_hosts': 'somehost:162.242.195.82'}} }, 'working_dir', 'filename.yml' ) ) assert "web.extra_hosts contains an invalid type" \ in excinfo.exconly() def test_config_extra_hosts_list_of_dicts_validation_error(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'extra_hosts': [ {'somehost': '162.242.195.82'}, {'otherhost': '50.31.209.229'} ]}} }, 'working_dir', 'filename.yml' ) ) assert "web.extra_hosts contains {\"somehost\": \"162.242.195.82\"}, " \ "which is an invalid type, it should be a string" \ in excinfo.exconly() def test_config_ulimits_invalid_keys_validation_error(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'ulimits': { 'nofile': { "not_soft_or_hard": 100, "soft": 10000, "hard": 20000, } } } } }, 'working_dir', 'filename.yml')) assert "web.ulimits.nofile contains unsupported option: 'not_soft_or_hard'" \ in exc.exconly() def test_config_ulimits_required_keys_validation_error(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'ulimits': {'nofile': {"soft": 10000}} } } }, 'working_dir', 'filename.yml')) assert "web.ulimits.nofile" in exc.exconly() assert "'hard' is a required property" in exc.exconly() def test_config_ulimits_soft_greater_than_hard_error(self): expected = "'soft' value can not be greater than 'hard' value" with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'ulimits': { 'nofile': {"soft": 10000, "hard": 1000} } } } }, 'working_dir', 'filename.yml')) assert expected in exc.exconly() def test_valid_config_which_allows_two_type_definitions(self): expose_values = [["8000"], [8000]] for expose in expose_values: service = config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'expose': expose}}}, 'working_dir', 'filename.yml' ) ).services assert service[0]['expose'] == expose def test_valid_config_oneof_string_or_list(self): entrypoint_values = [["sh"], "sh"] for entrypoint in entrypoint_values: service = config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'entrypoint': entrypoint}}}, 'working_dir', 'filename.yml' ) ).services assert service[0]['entrypoint'] == entrypoint def test_logs_warning_for_boolean_in_environment(self): config_details = build_config_details({ 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'environment': {'SHOW_STUFF': True} } } }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "contains true, which is an invalid type" in exc.exconly() def test_config_valid_environment_dict_key_contains_dashes(self): services = config.load( build_config_details( { 'version': str(VERSION), 'services': { 'web': { 'image': 'busybox', 'environment': {'SPRING_JPA_HIBERNATE_DDL-AUTO': 'none'}}}}, 'working_dir', 'filename.yml' ) ).services assert services[0]['environment']['SPRING_JPA_HIBERNATE_DDL-AUTO'] == 'none' def test_load_yaml_with_yaml_error(self): tmpdir = tempfile.mkdtemp('invalid_yaml_test') self.addCleanup(shutil.rmtree, tmpdir) invalid_yaml_file = os.path.join(tmpdir, 'docker-compose.yml') with open(invalid_yaml_file, mode="w") as invalid_yaml_file_fh: invalid_yaml_file_fh.write(""" web: this is bogus: ok: what """) with pytest.raises(ConfigurationError) as exc: config.load_yaml(str(invalid_yaml_file)) assert 'line 3, column 22' in exc.exconly() def test_load_yaml_with_bom(self): tmpdir = tempfile.mkdtemp('bom_yaml') self.addCleanup(shutil.rmtree, tmpdir) bom_yaml = os.path.join(tmpdir, 'docker-compose.yml') with codecs.open(str(bom_yaml), 'w', encoding='utf-8') as f: f.write('''\ufeff version: '2.3' volumes: park_bom: ''') assert config.load_yaml(str(bom_yaml)) == { 'version': '2.3', 'volumes': {'park_bom': None} } def test_validate_extra_hosts_invalid(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details({ 'version': str(VERSION), 'services': { 'web': { 'image': 'alpine', 'extra_hosts': "www.example.com: 192.168.0.17", } } })) assert "web.extra_hosts contains an invalid type" in exc.exconly() def test_validate_extra_hosts_invalid_list(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details({ 'version': str(VERSION), 'services': { 'web': { 'image': 'alpine', 'extra_hosts': [ {'www.example.com': '192.168.0.17'}, {'api.example.com': '192.168.0.18'} ], } } })) assert "which is an invalid type" in exc.exconly() def test_normalize_dns_options(self): actual = config.load(build_config_details({ 'version': str(VERSION), 'services': { 'web': { 'image': 'alpine', 'dns': '8.8.8.8', 'dns_search': 'domain.local', } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'dns': ['8.8.8.8'], 'dns_search': ['domain.local'], } ] def test_tmpfs_option(self): actual = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'alpine', 'tmpfs': '/run', } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'tmpfs': ['/run'], } ] def test_oom_score_adj_option(self): actual = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'alpine', 'oom_score_adj': 500 } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'oom_score_adj': 500 } ] def test_swappiness_option(self): actual = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'alpine', 'mem_swappiness': 10, } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'mem_swappiness': 10, } ] @data( '2 ', '3.', '3.0.0', '3.0.a', '3.a', '3a') def test_invalid_version_formats(self, version): content = { 'version': version, 'services': { 'web': { 'image': 'alpine', } } } with pytest.raises(ConfigurationError) as exc: config.load(build_config_details(content)) assert 'Version "{}" in "filename.yml" is invalid.'.format(version) in exc.exconly() def test_group_add_option(self): actual = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'alpine', 'group_add': ["docker", 777] } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'group_add': ["docker", 777] } ] def test_dns_opt_option(self): actual = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'alpine', 'dns_opt': ["use-vc", "no-tld-query"] } } })) assert actual.services == [ { 'name': 'web', 'image': 'alpine', 'dns_opt': ["use-vc", "no-tld-query"] } ] def test_isolation_option(self): actual = config.load(build_config_details({ 'services': { 'web': { 'image': 'win10', 'isolation': 'hyperv' } } })) assert actual.services == [ { 'name': 'web', 'image': 'win10', 'isolation': 'hyperv', } ] def test_runtime_option(self): actual = config.load(build_config_details({ 'services': { 'web': { 'image': 'nvidia/cuda', 'runtime': 'nvidia' } } })) assert actual.services == [ { 'name': 'web', 'image': 'nvidia/cuda', 'runtime': 'nvidia', } ] def test_merge_service_dicts_from_files_with_extends_in_base(self): base = { 'volumes': ['.:/app'], 'extends': {'service': 'app'} } override = { 'image': 'alpine:edge', } actual = config.merge_service_dicts_from_files( base, override, DEFAULT_VERSION) assert actual == { 'image': 'alpine:edge', 'volumes': ['.:/app'], 'extends': {'service': 'app'} } def test_merge_service_dicts_from_files_with_extends_in_override(self): base = { 'volumes': ['.:/app'], 'extends': {'service': 'app'} } override = { 'image': 'alpine:edge', 'extends': {'service': 'foo'} } actual = config.merge_service_dicts_from_files( base, override, DEFAULT_VERSION) assert actual == { 'image': 'alpine:edge', 'volumes': ['.:/app'], 'extends': {'service': 'foo'} } def test_merge_service_dicts_heterogeneous(self): base = { 'volumes': ['.:/app'], 'ports': ['5432'] } override = { 'image': 'alpine:edge', 'ports': [5432] } actual = config.merge_service_dicts_from_files( base, override, DEFAULT_VERSION) assert actual == { 'image': 'alpine:edge', 'volumes': ['.:/app'], 'ports': types.ServicePort.parse('5432') } def test_merge_service_dicts_heterogeneous_2(self): base = { 'volumes': ['.:/app'], 'ports': [5432] } override = { 'image': 'alpine:edge', 'ports': ['5432'] } actual = config.merge_service_dicts_from_files( base, override, DEFAULT_VERSION) assert actual == { 'image': 'alpine:edge', 'volumes': ['.:/app'], 'ports': types.ServicePort.parse('5432') } def test_merge_service_dicts_ports_sorting(self): base = { 'ports': [5432] } override = { 'image': 'alpine:edge', 'ports': ['5432/udp'] } actual = config.merge_service_dicts_from_files( base, override, DEFAULT_VERSION) assert len(actual['ports']) == 2 assert types.ServicePort.parse('5432')[0] in actual['ports'] assert types.ServicePort.parse('5432/udp')[0] in actual['ports'] def test_merge_service_dicts_heterogeneous_volumes(self): base = { 'volumes': ['/a:/b', '/x:/z'], } override = { 'image': 'alpine:edge', 'volumes': [ {'source': '/e', 'target': '/b', 'type': 'bind'}, {'source': '/c', 'target': '/d', 'type': 'bind'} ] } actual = config.merge_service_dicts_from_files( base, override, VERSION ) assert actual['volumes'] == [ {'source': '/e', 'target': '/b', 'type': 'bind'}, {'source': '/c', 'target': '/d', 'type': 'bind'}, '/x:/z' ] def test_merge_logging_v1(self): base = { 'image': 'alpine:edge', 'log_driver': 'something', 'log_opt': {'foo': 'three'}, } override = { 'image': 'alpine:edge', 'command': 'true', } actual = config.merge_service_dicts(base, override, V1) assert actual == { 'image': 'alpine:edge', 'log_driver': 'something', 'log_opt': {'foo': 'three'}, 'command': 'true', } def test_merge_logging_v2(self): base = { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000', 'timeout': '23' } } } override = { 'logging': { 'options': { 'timeout': '360', 'pretty-print': 'on' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000', 'timeout': '360', 'pretty-print': 'on' } } } def test_merge_logging_v2_override_driver(self): base = { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000', 'timeout': '23' } } } override = { 'logging': { 'driver': 'syslog', 'options': { 'timeout': '360', 'pretty-print': 'on' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'syslog', 'options': { 'timeout': '360', 'pretty-print': 'on' } } } def test_merge_logging_v2_no_base_driver(self): base = { 'image': 'alpine:edge', 'logging': { 'options': { 'frequency': '2000', 'timeout': '23' } } } override = { 'logging': { 'driver': 'json-file', 'options': { 'timeout': '360', 'pretty-print': 'on' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000', 'timeout': '360', 'pretty-print': 'on' } } } def test_merge_logging_v2_no_drivers(self): base = { 'image': 'alpine:edge', 'logging': { 'options': { 'frequency': '2000', 'timeout': '23' } } } override = { 'logging': { 'options': { 'timeout': '360', 'pretty-print': 'on' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'options': { 'frequency': '2000', 'timeout': '360', 'pretty-print': 'on' } } } def test_merge_logging_v2_no_override_options(self): base = { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000', 'timeout': '23' } } } override = { 'logging': { 'driver': 'syslog' } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'syslog', } } def test_merge_logging_v2_no_base(self): base = { 'image': 'alpine:edge' } override = { 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'json-file', 'options': { 'frequency': '2000' } } } def test_merge_logging_v2_no_override(self): base = { 'image': 'alpine:edge', 'logging': { 'driver': 'syslog', 'options': { 'frequency': '2000' } } } override = {} actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'alpine:edge', 'logging': { 'driver': 'syslog', 'options': { 'frequency': '2000' } } } def test_merge_mixed_ports(self): base = { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'ports': [ { 'target': '1245', 'published': '1245', 'protocol': 'udp', } ] } override = { 'ports': ['1245:1245/udp'] } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': BUSYBOX_IMAGE_WITH_TAG, 'command': 'top', 'ports': [types.ServicePort('1245', '1245', 'udp', None, None)] } def test_merge_depends_on_no_override(self): base = { 'image': 'busybox', 'depends_on': { 'app1': {'condition': 'service_started'}, 'app2': {'condition': 'service_healthy'}, 'app3': {'condition': 'service_completed_successfully'} } } override = {} actual = config.merge_service_dicts(base, override, VERSION) assert actual == base def test_merge_depends_on_mixed_syntax(self): base = { 'image': 'busybox', 'depends_on': { 'app1': {'condition': 'service_started'}, 'app2': {'condition': 'service_healthy'}, 'app3': {'condition': 'service_completed_successfully'} } } override = { 'depends_on': ['app4'] } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'busybox', 'depends_on': { 'app1': {'condition': 'service_started'}, 'app2': {'condition': 'service_healthy'}, 'app3': {'condition': 'service_completed_successfully'}, 'app4': {'condition': 'service_started'}, } } def test_empty_environment_key_allowed(self): service_dict = config.load( build_config_details( { 'web': { 'build': '.', 'environment': { 'POSTGRES_PASSWORD': '' }, }, }, '.', None, ) ).services[0] assert service_dict['environment']['POSTGRES_PASSWORD'] == '' def test_merge_pid(self): # Regression: https://github.com/docker/compose/issues/4184 base = { 'image': 'busybox', 'pid': 'host' } override = { 'labels': {'com.docker.compose.test': 'yes'} } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'busybox', 'pid': 'host', 'labels': {'com.docker.compose.test': 'yes'} } def test_merge_different_secrets(self): base = { 'image': 'busybox', 'secrets': [ {'source': 'src.txt'} ] } override = {'secrets': ['other-src.txt']} actual = config.merge_service_dicts(base, override, VERSION) assert secret_sort(actual['secrets']) == secret_sort([ {'source': 'src.txt'}, {'source': 'other-src.txt'} ]) def test_merge_secrets_override(self): base = { 'image': 'busybox', 'secrets': ['src.txt'], } override = { 'secrets': [ { 'source': 'src.txt', 'target': 'data.txt', 'mode': 0o400 } ] } actual = config.merge_service_dicts(base, override, VERSION) assert actual['secrets'] == override['secrets'] def test_merge_different_configs(self): base = { 'image': 'busybox', 'configs': [ {'source': 'src.txt'} ] } override = {'configs': ['other-src.txt']} actual = config.merge_service_dicts(base, override, VERSION) assert secret_sort(actual['configs']) == secret_sort([ {'source': 'src.txt'}, {'source': 'other-src.txt'} ]) def test_merge_configs_override(self): base = { 'image': 'busybox', 'configs': ['src.txt'], } override = { 'configs': [ { 'source': 'src.txt', 'target': 'data.txt', 'mode': 0o400 } ] } actual = config.merge_service_dicts(base, override, VERSION) assert actual['configs'] == override['configs'] def test_merge_deploy(self): base = { 'image': 'busybox', } override = { 'deploy': { 'mode': 'global', 'restart_policy': { 'condition': 'on-failure' } } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['deploy'] == override['deploy'] def test_merge_deploy_override(self): base = { 'deploy': { 'endpoint_mode': 'vip', 'labels': ['com.docker.compose.a=1', 'com.docker.compose.b=2'], 'mode': 'replicated', 'placement': { 'max_replicas_per_node': 1, 'constraints': [ 'node.role == manager', 'engine.labels.aws == true' ], 'preferences': [ {'spread': 'node.labels.zone'}, {'spread': 'x.d.z'} ] }, 'replicas': 3, 'resources': { 'limits': {'cpus': '0.50', 'memory': '50m'}, 'reservations': { 'cpus': '0.1', 'generic_resources': [ {'discrete_resource_spec': {'kind': 'abc', 'value': 123}} ], 'memory': '15m' } }, 'restart_policy': {'condition': 'any', 'delay': '10s'}, 'update_config': {'delay': '10s', 'max_failure_ratio': 0.3} }, 'image': 'hello-world' } override = { 'deploy': { 'labels': { 'com.docker.compose.b': '21', 'com.docker.compose.c': '3' }, 'placement': { 'constraints': ['node.role == worker', 'engine.labels.dev == true'], 'preferences': [{'spread': 'node.labels.zone'}, {'spread': 'x.d.s'}] }, 'resources': { 'limits': {'memory': '200m'}, 'reservations': { 'cpus': '0.78', 'generic_resources': [ {'discrete_resource_spec': {'kind': 'abc', 'value': 134}}, {'discrete_resource_spec': {'kind': 'xyz', 'value': 0.1}} ] } }, 'restart_policy': {'condition': 'on-failure', 'max_attempts': 42}, 'update_config': {'max_failure_ratio': 0.712, 'parallelism': 4} } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['deploy'] == { 'mode': 'replicated', 'endpoint_mode': 'vip', 'labels': { 'com.docker.compose.a': '1', 'com.docker.compose.b': '21', 'com.docker.compose.c': '3' }, 'placement': { 'max_replicas_per_node': 1, 'constraints': [ 'engine.labels.aws == true', 'engine.labels.dev == true', 'node.role == manager', 'node.role == worker' ], 'preferences': [ {'spread': 'node.labels.zone'}, {'spread': 'x.d.s'}, {'spread': 'x.d.z'} ] }, 'replicas': 3, 'resources': { 'limits': {'cpus': '0.50', 'memory': '200m'}, 'reservations': { 'cpus': '0.78', 'memory': '15m', 'generic_resources': [ {'discrete_resource_spec': {'kind': 'abc', 'value': 134}}, {'discrete_resource_spec': {'kind': 'xyz', 'value': 0.1}}, ] } }, 'restart_policy': { 'condition': 'on-failure', 'delay': '10s', 'max_attempts': 42, }, 'update_config': { 'max_failure_ratio': 0.712, 'delay': '10s', 'parallelism': 4 } } def test_merge_credential_spec(self): base = { 'image': 'bb', 'credential_spec': { 'file': '/hello-world', } } override = { 'credential_spec': { 'registry': 'revolution.com', } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['credential_spec'] == override['credential_spec'] def test_merge_scale(self): base = { 'image': 'bar', 'scale': 2, } override = { 'scale': 4, } actual = config.merge_service_dicts(base, override, VERSION) assert actual == {'image': 'bar', 'scale': 4} def test_merge_blkio_config(self): base = { 'image': 'bar', 'blkio_config': { 'weight': 300, 'weight_device': [ {'path': '/dev/sda1', 'weight': 200} ], 'device_read_iops': [ {'path': '/dev/sda1', 'rate': 300} ], 'device_write_iops': [ {'path': '/dev/sda1', 'rate': 1000} ] } } override = { 'blkio_config': { 'weight': 450, 'weight_device': [ {'path': '/dev/sda2', 'weight': 400} ], 'device_read_iops': [ {'path': '/dev/sda1', 'rate': 2000} ], 'device_read_bps': [ {'path': '/dev/sda1', 'rate': 1024} ] } } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'bar', 'blkio_config': { 'weight': override['blkio_config']['weight'], 'weight_device': ( base['blkio_config']['weight_device'] + override['blkio_config']['weight_device'] ), 'device_read_iops': override['blkio_config']['device_read_iops'], 'device_read_bps': override['blkio_config']['device_read_bps'], 'device_write_iops': base['blkio_config']['device_write_iops'] } } def test_merge_extra_hosts(self): base = { 'image': 'bar', 'extra_hosts': { 'foo': '1.2.3.4', } } override = { 'extra_hosts': ['bar:5.6.7.8', 'foo:127.0.0.1'] } actual = config.merge_service_dicts(base, override, VERSION) assert actual['extra_hosts'] == { 'foo': '127.0.0.1', 'bar': '5.6.7.8', } def test_merge_healthcheck_config(self): base = { 'image': 'bar', 'healthcheck': { 'start_period': 1000, 'interval': 3000, 'test': ['true'] } } override = { 'healthcheck': { 'interval': 5000, 'timeout': 10000, 'test': ['echo', 'OK'], } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['healthcheck'] == { 'start_period': base['healthcheck']['start_period'], 'test': override['healthcheck']['test'], 'interval': override['healthcheck']['interval'], 'timeout': override['healthcheck']['timeout'], } def test_merge_healthcheck_override_disables(self): base = { 'image': 'bar', 'healthcheck': { 'start_period': 1000, 'interval': 3000, 'timeout': 2000, 'retries': 3, 'test': ['true'] } } override = { 'healthcheck': { 'disabled': True } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['healthcheck'] == {'disabled': True} def test_merge_healthcheck_override_enables(self): base = { 'image': 'bar', 'healthcheck': { 'disabled': True } } override = { 'healthcheck': { 'disabled': False, 'start_period': 1000, 'interval': 3000, 'timeout': 2000, 'retries': 3, 'test': ['true'] } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['healthcheck'] == override['healthcheck'] def test_merge_device_cgroup_rules(self): base = { 'image': 'bar', 'device_cgroup_rules': ['c 7:128 rwm', 'x 3:244 rw'] } override = { 'device_cgroup_rules': ['c 7:128 rwm', 'f 0:128 n'] } actual = config.merge_service_dicts(base, override, VERSION) assert sorted(actual['device_cgroup_rules']) == sorted( ['c 7:128 rwm', 'x 3:244 rw', 'f 0:128 n'] ) def test_merge_isolation(self): base = { 'image': 'bar', 'isolation': 'default', } override = { 'isolation': 'hyperv', } actual = config.merge_service_dicts(base, override, VERSION) assert actual == { 'image': 'bar', 'isolation': 'hyperv', } def test_merge_storage_opt(self): base = { 'image': 'bar', 'storage_opt': { 'size': '1G', 'readonly': 'false', } } override = { 'storage_opt': { 'size': '2G', 'encryption': 'aes', } } actual = config.merge_service_dicts(base, override, VERSION) assert actual['storage_opt'] == { 'size': '2G', 'readonly': 'false', 'encryption': 'aes', } def test_external_volume_config(self): config_details = build_config_details({ 'version': '2', 'services': { 'bogus': {'image': 'busybox'} }, 'volumes': { 'ext': {'external': True}, 'ext2': {'external': {'name': 'aliased'}} } }) config_result = config.load(config_details) volumes = config_result.volumes assert 'ext' in volumes assert volumes['ext']['external'] is True assert 'ext2' in volumes assert volumes['ext2']['external']['name'] == 'aliased' def test_external_volume_invalid_config(self): config_details = build_config_details({ 'version': '2', 'services': { 'bogus': {'image': 'busybox'} }, 'volumes': { 'ext': {'external': True, 'driver': 'foo'} } }) with pytest.raises(ConfigurationError): config.load(config_details) def test_depends_on_orders_services(self): config_details = build_config_details({ 'version': '2', 'services': { 'one': {'image': 'busybox', 'depends_on': ['three', 'two']}, 'two': {'image': 'busybox', 'depends_on': ['three']}, 'three': {'image': 'busybox'}, }, }) actual = config.load(config_details) assert ( [service['name'] for service in actual.services] == ['three', 'two', 'one'] ) def test_depends_on_unknown_service_errors(self): config_details = build_config_details({ 'version': '2', 'services': { 'one': {'image': 'busybox', 'depends_on': ['three']}, }, }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "Service 'one' depends on service 'three'" in exc.exconly() def test_linked_service_is_undefined(self): with pytest.raises(ConfigurationError): config.load( build_config_details({ 'version': '2', 'services': { 'web': {'image': 'busybox', 'links': ['db:db']}, }, }) ) def test_load_dockerfile_without_context(self): config_details = build_config_details({ 'version': '2', 'services': { 'one': {'build': {'dockerfile': 'Dockerfile.foo'}}, }, }) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert 'has neither an image nor a build context' in exc.exconly() def test_load_secrets(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.1', 'services': { 'web': { 'image': 'example/web', 'secrets': [ 'one', { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, }, ], }, }, 'secrets': { 'one': {'file': 'secret.txt'}, }, }) details = config.ConfigDetails('.', [base_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'image': 'example/web', 'secrets': [ types.ServiceSecret('one', None, None, None, None, None), types.ServiceSecret('source', 'target', '100', '200', 0o777, None), ], }, ] assert service_sort(service_dicts) == service_sort(expected) def test_load_secrets_multi_file(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.1', 'services': { 'web': { 'image': 'example/web', 'secrets': ['one'], }, }, 'secrets': { 'one': {'file': 'secret.txt'}, }, }) override_file = config.ConfigFile( 'base.yaml', { 'version': '3.1', 'services': { 'web': { 'secrets': [ { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, }, ], }, }, }) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'image': 'example/web', 'secrets': [ types.ServiceSecret('one', None, None, None, None, None), types.ServiceSecret('source', 'target', '100', '200', 0o777, None), ], }, ] assert service_sort(service_dicts) == service_sort(expected) def test_load_configs(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.3', 'services': { 'web': { 'image': 'example/web', 'configs': [ 'one', { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, }, ], }, }, 'configs': { 'one': {'file': 'secret.txt'}, }, }) details = config.ConfigDetails('.', [base_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'image': 'example/web', 'configs': [ types.ServiceConfig('one', None, None, None, None, None), types.ServiceConfig('source', 'target', '100', '200', 0o777, None), ], }, ] assert service_sort(service_dicts) == service_sort(expected) def test_load_configs_multi_file(self): base_file = config.ConfigFile( 'base.yaml', { 'version': '3.3', 'services': { 'web': { 'image': 'example/web', 'configs': ['one'], }, }, 'configs': { 'one': {'file': 'secret.txt'}, }, }) override_file = config.ConfigFile( 'base.yaml', { 'version': '3.3', 'services': { 'web': { 'configs': [ { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, }, ], }, }, }) details = config.ConfigDetails('.', [base_file, override_file]) service_dicts = config.load(details).services expected = [ { 'name': 'web', 'image': 'example/web', 'configs': [ types.ServiceConfig('one', None, None, None, None, None), types.ServiceConfig('source', 'target', '100', '200', 0o777, None), ], }, ] assert service_sort(service_dicts) == service_sort(expected) def test_config_convertible_label_types(self): config_details = build_config_details( { 'version': '3.5', 'services': { 'web': { 'build': { 'labels': {'testbuild': True}, 'context': os.getcwd() }, 'labels': { "key": 12345 } }, }, 'networks': { 'foo': { 'labels': {'network.ips.max': 1023} } }, 'volumes': { 'foo': { 'labels': {'volume.is_readonly': False} } }, 'secrets': { 'foo': { 'labels': {'secret.data.expires': 1546282120} } }, 'configs': { 'foo': { 'labels': {'config.data.correction.value': -0.1412} } } } ) loaded_config = config.load(config_details) assert loaded_config.services[0]['build']['labels'] == {'testbuild': 'True'} assert loaded_config.services[0]['labels'] == {'key': '12345'} assert loaded_config.networks['foo']['labels']['network.ips.max'] == '1023' assert loaded_config.volumes['foo']['labels']['volume.is_readonly'] == 'False' assert loaded_config.secrets['foo']['labels']['secret.data.expires'] == '1546282120' assert loaded_config.configs['foo']['labels']['config.data.correction.value'] == '-0.1412' def test_config_invalid_label_types(self): config_details = build_config_details({ 'version': '2.3', 'volumes': { 'foo': {'labels': [1, 2, 3]} } }) with pytest.raises(ConfigurationError): config.load(config_details) def test_service_volume_invalid_config(self): config_details = build_config_details( { 'version': '3.2', 'services': { 'web': { 'build': { 'context': '.', 'args': None, }, 'volumes': [ { "type": "volume", "source": "/data", "garbage": { "and": "error" } } ] } } } ) with pytest.raises(ConfigurationError) as exc: config.load(config_details) assert "services.web.volumes contains unsupported option: 'garbage'" in exc.exconly() def test_config_valid_service_label_validation(self): config_details = build_config_details( { 'version': '3.5', 'services': { 'web': { 'image': 'busybox', 'labels': { "key": "string" } }, }, } ) config.load(config_details) def test_config_duplicate_mount_points(self): config1 = build_config_details( { 'version': '3.5', 'services': { 'web': { 'image': 'busybox', 'volumes': ['/tmp/foo:/tmp/foo', '/tmp/foo:/tmp/foo:rw'] } } } ) config2 = build_config_details( { 'version': '3.5', 'services': { 'web': { 'image': 'busybox', 'volumes': ['/x:/y', '/z:/y'] } } } ) with self.assertRaises(ConfigurationError) as e: config.load(config1) self.assertEqual(str(e.exception), 'Duplicate mount points: [%s]' % ( ', '.join(['/tmp/foo:/tmp/foo:rw']*2))) with self.assertRaises(ConfigurationError) as e: config.load(config2) self.assertEqual(str(e.exception), 'Duplicate mount points: [%s]' % ( ', '.join(['/x:/y:rw', '/z:/y:rw']))) class NetworkModeTest(unittest.TestCase): def test_network_mode_standard(self): config_data = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'busybox', 'command': "top", 'network_mode': 'bridge', }, }, })) assert config_data.services[0]['network_mode'] == 'bridge' def test_network_mode_standard_v1(self): config_data = config.load(build_config_details({ 'web': { 'image': 'busybox', 'command': "top", 'net': 'bridge', }, })) assert config_data.services[0]['network_mode'] == 'bridge' assert 'net' not in config_data.services[0] def test_network_mode_container(self): config_data = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'busybox', 'command': "top", 'network_mode': 'container:foo', }, }, })) assert config_data.services[0]['network_mode'] == 'container:foo' def test_network_mode_container_v1(self): config_data = config.load(build_config_details({ 'web': { 'image': 'busybox', 'command': "top", 'net': 'container:foo', }, })) assert config_data.services[0]['network_mode'] == 'container:foo' def test_network_mode_service(self): config_data = config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'busybox', 'command': "top", 'network_mode': 'service:foo', }, 'foo': { 'image': 'busybox', 'command': "top", }, }, })) assert config_data.services[1]['network_mode'] == 'service:foo' def test_network_mode_service_v1(self): config_data = config.load(build_config_details({ 'web': { 'image': 'busybox', 'command': "top", 'net': 'container:foo', }, 'foo': { 'image': 'busybox', 'command': "top", }, })) assert config_data.services[1]['network_mode'] == 'service:foo' def test_network_mode_service_nonexistent(self): with pytest.raises(ConfigurationError) as excinfo: config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'busybox', 'command': "top", 'network_mode': 'service:foo', }, }, })) assert "service 'foo' which is undefined" in excinfo.exconly() def test_network_mode_plus_networks_is_invalid(self): with pytest.raises(ConfigurationError) as excinfo: config.load(build_config_details({ 'version': '2', 'services': { 'web': { 'image': 'busybox', 'command': "top", 'network_mode': 'bridge', 'networks': ['front'], }, }, 'networks': { 'front': None, } })) assert "'network_mode' and 'networks' cannot be combined" in excinfo.exconly() class PortsTest(unittest.TestCase): INVALID_PORTS_TYPES = [ {"1": "8000"}, False, "8000", 8000, ] NON_UNIQUE_SINGLE_PORTS = [ ["8000", "8000"], ] INVALID_PORT_MAPPINGS = [ ["8000-8004:8000-8002"], ["4242:4242-4244"], ] VALID_SINGLE_PORTS = [ ["8000"], ["8000/tcp"], ["8000", "9000"], [8000], [8000, 9000], ] VALID_PORT_MAPPINGS = [ ["8000:8050"], ["49153-49154:3002-3003"], ] def test_config_invalid_ports_type_validation(self): for invalid_ports in self.INVALID_PORTS_TYPES: with pytest.raises(ConfigurationError) as exc: self.check_config({'ports': invalid_ports}) assert "contains an invalid type" in exc.value.msg def test_config_non_unique_ports_validation(self): for invalid_ports in self.NON_UNIQUE_SINGLE_PORTS: with pytest.raises(ConfigurationError) as exc: self.check_config({'ports': invalid_ports}) assert "non-unique" in exc.value.msg @pytest.mark.skip(reason="Validator is one_off (generic error)") def test_config_invalid_ports_format_validation(self): for invalid_ports in self.INVALID_PORT_MAPPINGS: with pytest.raises(ConfigurationError) as exc: self.check_config({'ports': invalid_ports}) assert "Port ranges don't match in length" in exc.value.msg def test_config_valid_ports_format_validation(self): for valid_ports in self.VALID_SINGLE_PORTS + self.VALID_PORT_MAPPINGS: self.check_config({'ports': valid_ports}) def test_config_invalid_expose_type_validation(self): for invalid_expose in self.INVALID_PORTS_TYPES: with pytest.raises(ConfigurationError) as exc: self.check_config({'expose': invalid_expose}) assert "contains an invalid type" in exc.value.msg def test_config_non_unique_expose_validation(self): for invalid_expose in self.NON_UNIQUE_SINGLE_PORTS: with pytest.raises(ConfigurationError) as exc: self.check_config({'expose': invalid_expose}) assert "non-unique" in exc.value.msg def test_config_invalid_expose_format_validation(self): # Valid port mappings ARE NOT valid 'expose' entries for invalid_expose in self.INVALID_PORT_MAPPINGS + self.VALID_PORT_MAPPINGS: with pytest.raises(ConfigurationError) as exc: self.check_config({'expose': invalid_expose}) assert "should be of the format" in exc.value.msg def test_config_valid_expose_format_validation(self): # Valid single ports ARE valid 'expose' entries for valid_expose in self.VALID_SINGLE_PORTS: self.check_config({'expose': valid_expose}) def check_config(self, cfg): config.load( build_config_details({ 'version': '2.3', 'services': { 'web': dict(image='busybox', **cfg) }, }, 'working_dir', 'filename.yml') ) class SubnetTest(unittest.TestCase): INVALID_SUBNET_TYPES = [ None, False, 10, ] INVALID_SUBNET_MAPPINGS = [ "", "192.168.0.1/sdfsdfs", "192.168.0.1/", "192.168.0.1/33", "192.168.0.1/01", "192.168.0.1", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/sdfsdfs", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/129", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/01", "fe80:0000:0000:0000:0204:61ff:fe9d:f156", "ge80:0000:0000:0000:0204:61ff:fe9d:f156/128", "192.168.0.1/31/31", ] VALID_SUBNET_MAPPINGS = [ "192.168.0.1/0", "192.168.0.1/32", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/0", "fe80:0000:0000:0000:0204:61ff:fe9d:f156/128", "1:2:3:4:5:6:7:8/0", "1::/0", "1:2:3:4:5:6:7::/0", "1::8/0", "1:2:3:4:5:6::8/0", "::/0", "::8/0", "::2:3:4:5:6:7:8/0", "fe80::7:8%eth0/0", "fe80::7:8%1/0", "::255.255.255.255/0", "::ffff:255.255.255.255/0", "::ffff:0:255.255.255.255/0", "2001:db8:3:4::192.0.2.33/0", "64:ff9b::192.0.2.33/0", ] def test_config_invalid_subnet_type_validation(self): for invalid_subnet in self.INVALID_SUBNET_TYPES: with pytest.raises(ConfigurationError) as exc: self.check_config(invalid_subnet) assert "contains an invalid type" in exc.value.msg def test_config_invalid_subnet_format_validation(self): for invalid_subnet in self.INVALID_SUBNET_MAPPINGS: with pytest.raises(ConfigurationError) as exc: self.check_config(invalid_subnet) assert "should use the CIDR format" in exc.value.msg def test_config_valid_subnet_format_validation(self): for valid_subnet in self.VALID_SUBNET_MAPPINGS: self.check_config(valid_subnet) def check_config(self, subnet): config.load( build_config_details({ 'version': '3.5', 'services': { 'web': { 'image': 'busybox' } }, 'networks': { 'default': { 'ipam': { 'config': [ { 'subnet': subnet } ], 'driver': 'default' } } } }) ) class InterpolationTest(unittest.TestCase): @mock.patch.dict(os.environ) def test_config_file_with_environment_file(self): project_dir = 'tests/fixtures/default-env-file' service_dicts = config.load( config.find( project_dir, None, Environment.from_env_file(project_dir) ) ).services assert service_dicts[0] == { 'name': 'web', 'image': 'alpine:latest', 'ports': [ types.ServicePort.parse('5643')[0], types.ServicePort.parse('9999')[0] ], 'command': 'true' } @mock.patch.dict(os.environ) def test_config_file_with_options_environment_file(self): project_dir = 'tests/fixtures/default-env-file' # env-file is relative to current working dir env = Environment.from_env_file(project_dir, project_dir + '/.env2') service_dicts = config.load( config.find( project_dir, None, env ) ).services assert service_dicts[0] == { 'name': 'web', 'image': 'alpine:latest', 'ports': [ types.ServicePort.parse('5644')[0], types.ServicePort.parse('9998')[0] ], 'command': 'false' } @mock.patch.dict(os.environ) def test_config_file_with_environment_variable(self): project_dir = 'tests/fixtures/environment-interpolation' os.environ.update( IMAGE="busybox", HOST_PORT="80", LABEL_VALUE="myvalue", ) service_dicts = config.load( config.find( project_dir, None, Environment.from_env_file(project_dir) ) ).services assert service_dicts == [ { 'name': 'web', 'image': 'busybox', 'ports': types.ServicePort.parse('80:8000'), 'labels': {'mylabel': 'myvalue'}, 'hostname': 'host-', 'command': '${ESCAPED}', } ] @mock.patch.dict(os.environ) def test_config_file_with_environment_variable_with_defaults(self): project_dir = 'tests/fixtures/environment-interpolation-with-defaults' os.environ.update( IMAGE="busybox", ) service_dicts = config.load( config.find( project_dir, None, Environment.from_env_file(project_dir) ) ).services assert service_dicts == [ { 'name': 'web', 'image': 'busybox', 'ports': types.ServicePort.parse('80:8000'), 'hostname': 'host-', } ] @mock.patch.dict(os.environ) def test_unset_variable_produces_warning(self): os.environ.pop('FOO', None) os.environ.pop('BAR', None) config_details = build_config_details( { 'web': { 'image': '${FOO}', 'command': '${BAR}', 'container_name': '${BAR}', }, }, '.', None, ) with mock.patch('compose.config.environment.log') as log: config.load(config_details) assert 2 == log.warning.call_count warnings = sorted(args[0][0] for args in log.warning.call_args_list) assert 'BAR' in warnings[0] assert 'FOO' in warnings[1] @pytest.mark.skip(reason='compatibility mode was removed internally') def test_compatibility_mode_warnings(self): config_details = build_config_details({ 'version': '3.5', 'services': { 'web': { 'deploy': { 'labels': ['abc=def'], 'endpoint_mode': 'dnsrr', 'update_config': {'max_failure_ratio': 0.4}, 'placement': {'constraints': ['node.id==deadbeef']}, 'resources': { 'reservations': {'cpus': '0.2'} }, 'restart_policy': { 'delay': '2s', 'window': '12s' } }, 'image': 'busybox' } } }) with mock.patch('compose.config.config.log') as log: config.load(config_details, compatibility=True) assert log.warning.call_count == 1 warn_message = log.warning.call_args[0][0] assert warn_message.startswith( 'The following deploy sub-keys are not supported in compatibility mode' ) assert 'labels' in warn_message assert 'endpoint_mode' in warn_message assert 'update_config' in warn_message assert 'resources.reservations.cpus' in warn_message assert 'restart_policy.delay' in warn_message assert 'restart_policy.window' in warn_message @pytest.mark.skip(reason='compatibility mode was removed internally') def test_compatibility_mode_load(self): config_details = build_config_details({ 'version': '3.5', 'services': { 'foo': { 'image': 'alpine:3.10.1', 'deploy': { 'replicas': 3, 'restart_policy': { 'condition': 'any', 'max_attempts': 7, }, 'resources': { 'limits': {'memory': '300M', 'cpus': '0.7'}, 'reservations': {'memory': '100M'}, }, }, 'credential_spec': { 'file': 'spec.json' }, }, }, }) with mock.patch('compose.config.config.log') as log: cfg = config.load(config_details, compatibility=True) assert log.warning.call_count == 0 service_dict = cfg.services[0] assert service_dict == { 'image': 'alpine:3.10.1', 'scale': 3, 'restart': {'MaximumRetryCount': 7, 'Name': 'always'}, 'mem_limit': '300M', 'mem_reservation': '100M', 'cpus': 0.7, 'name': 'foo', 'security_opt': ['credentialspec=file://spec.json'], } @mock.patch.dict(os.environ) def test_invalid_interpolation(self): with pytest.raises(config.ConfigurationError) as cm: config.load( build_config_details( {'web': {'image': '${'}}, 'working_dir', 'filename.yml' ) ) assert 'Invalid' in cm.value.msg assert 'for "image" option' in cm.value.msg assert 'in service "web"' in cm.value.msg assert '"${"' in cm.value.msg @mock.patch.dict(os.environ) def test_interpolation_secrets_section(self): os.environ['FOO'] = 'baz.bar' config_dict = config.load(build_config_details({ 'version': '3.1', 'secrets': { 'secretdata': { 'external': {'name': '$FOO'} } } })) assert config_dict.secrets == { 'secretdata': { 'external': {'name': 'baz.bar'}, 'name': 'baz.bar' } } @mock.patch.dict(os.environ) def test_interpolation_configs_section(self): os.environ['FOO'] = 'baz.bar' config_dict = config.load(build_config_details({ 'version': '3.3', 'configs': { 'configdata': { 'external': {'name': '$FOO'} } } })) assert config_dict.configs == { 'configdata': { 'external': {'name': 'baz.bar'}, 'name': 'baz.bar' } } class VolumeConfigTest(unittest.TestCase): def test_no_binding(self): d = make_service_dict('foo', {'build': '.', 'volumes': ['/data']}, working_dir='.') assert d['volumes'] == ['/data'] @mock.patch.dict(os.environ) def test_volume_binding_with_environment_variable(self): os.environ['VOLUME_PATH'] = '/host/path' d = config.load( build_config_details( {'foo': {'build': '.', 'volumes': ['${VOLUME_PATH}:/container/path']}}, '.', None, ) ).services[0] assert d['volumes'] == [VolumeSpec.parse('/host/path:/container/path')] @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') def test_volumes_order_is_preserved(self): volumes = ['/{0}:/{0}'.format(i) for i in range(0, 6)] shuffle(volumes) cfg = make_service_dict('foo', {'build': '.', 'volumes': volumes}) assert cfg['volumes'] == volumes @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') @mock.patch.dict(os.environ) def test_volume_binding_with_home(self): os.environ['HOME'] = '/home/user' d = make_service_dict('foo', {'build': '.', 'volumes': ['~:/container/path']}, working_dir='.') assert d['volumes'] == ['/home/user:/container/path'] def test_name_does_not_expand(self): d = make_service_dict('foo', {'build': '.', 'volumes': ['mydatavolume:/data']}, working_dir='.') assert d['volumes'] == ['mydatavolume:/data'] def test_absolute_posix_path_does_not_expand(self): d = make_service_dict('foo', {'build': '.', 'volumes': ['/var/lib/data:/data']}, working_dir='.') assert d['volumes'] == ['/var/lib/data:/data'] def test_absolute_windows_path_does_not_expand(self): d = make_service_dict('foo', {'build': '.', 'volumes': ['c:\\data:/data']}, working_dir='.') assert d['volumes'] == ['c:\\data:/data'] @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='posix paths') def test_relative_path_does_expand_posix(self): d = make_service_dict( 'foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='/home/me/myproject') assert d['volumes'] == ['/home/me/myproject/data:/data'] d = make_service_dict( 'foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='/home/me/myproject') assert d['volumes'] == ['/home/me/myproject:/data'] d = make_service_dict( 'foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='/home/me/myproject') assert d['volumes'] == ['/home/me/otherproject:/data'] @pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='windows paths') def test_relative_path_does_expand_windows(self): d = make_service_dict( 'foo', {'build': '.', 'volumes': ['./data:/data']}, working_dir='c:\\Users\\me\\myproject') assert d['volumes'] == ['c:\\Users\\me\\myproject\\data:/data'] d = make_service_dict( 'foo', {'build': '.', 'volumes': ['.:/data']}, working_dir='c:\\Users\\me\\myproject') assert d['volumes'] == ['c:\\Users\\me\\myproject:/data'] d = make_service_dict( 'foo', {'build': '.', 'volumes': ['../otherproject:/data']}, working_dir='c:\\Users\\me\\myproject') assert d['volumes'] == ['c:\\Users\\me\\otherproject:/data'] @mock.patch.dict(os.environ) def test_home_directory_with_driver_does_not_expand(self): os.environ['NAME'] = 'surprise!' d = make_service_dict('foo', { 'build': '.', 'volumes': ['~:/data'], 'volume_driver': 'foodriver', }, working_dir='.') assert d['volumes'] == ['~:/data'] def test_volume_path_with_non_ascii_directory(self): volume = '/Füü/data:/data' container_path = config.resolve_volume_path(".", volume) assert container_path == volume class MergePathMappingTest: config_name = "" def test_empty(self): service_dict = config.merge_service_dicts({}, {}, DEFAULT_VERSION) assert self.config_name not in service_dict def test_no_override(self): service_dict = config.merge_service_dicts( {self.config_name: ['/foo:/code', '/data']}, {}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == {'/foo:/code', '/data'} def test_no_base(self): service_dict = config.merge_service_dicts( {}, {self.config_name: ['/bar:/code']}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == {'/bar:/code'} def test_override_explicit_path(self): service_dict = config.merge_service_dicts( {self.config_name: ['/foo:/code', '/data']}, {self.config_name: ['/bar:/code']}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == {'/bar:/code', '/data'} def test_add_explicit_path(self): service_dict = config.merge_service_dicts( {self.config_name: ['/foo:/code', '/data']}, {self.config_name: ['/bar:/code', '/quux:/data']}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == {'/bar:/code', '/quux:/data'} def test_remove_explicit_path(self): service_dict = config.merge_service_dicts( {self.config_name: ['/foo:/code', '/quux:/data']}, {self.config_name: ['/bar:/code', '/data']}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == {'/bar:/code', '/data'} class MergeVolumesTest(unittest.TestCase, MergePathMappingTest): config_name = 'volumes' class MergeDevicesTest(unittest.TestCase, MergePathMappingTest): config_name = 'devices' class BuildOrImageMergeTest(unittest.TestCase): def test_merge_build_or_image_no_override(self): assert config.merge_service_dicts({'build': '.'}, {}, V1) == {'build': '.'} assert config.merge_service_dicts({'image': 'redis'}, {}, V1) == {'image': 'redis'} def test_merge_build_or_image_override_with_same(self): assert config.merge_service_dicts({'build': '.'}, {'build': './web'}, V1) == {'build': './web'} assert config.merge_service_dicts({'image': 'redis'}, {'image': 'postgres'}, V1) == { 'image': 'postgres' } def test_merge_build_or_image_override_with_other(self): assert config.merge_service_dicts({'build': '.'}, {'image': 'redis'}, V1) == { 'image': 'redis' } assert config.merge_service_dicts({'image': 'redis'}, {'build': '.'}, V1) == {'build': '.'} class MergeListsTest: config_name = "" base_config = [] override_config = [] def merged_config(self): return set(self.base_config) | set(self.override_config) def test_empty(self): assert self.config_name not in config.merge_service_dicts({}, {}, DEFAULT_VERSION) def test_no_override(self): service_dict = config.merge_service_dicts( {self.config_name: self.base_config}, {}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == set(self.base_config) def test_no_base(self): service_dict = config.merge_service_dicts( {}, {self.config_name: self.base_config}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == set(self.base_config) def test_add_item(self): service_dict = config.merge_service_dicts( {self.config_name: self.base_config}, {self.config_name: self.override_config}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == set(self.merged_config()) class MergePortsTest(unittest.TestCase, MergeListsTest): config_name = 'ports' base_config = ['10:8000', '9000'] override_config = ['20:8000'] def merged_config(self): return self.convert(self.base_config) | self.convert(self.override_config) def convert(self, port_config): return set(config.merge_service_dicts( {self.config_name: port_config}, {self.config_name: []}, DEFAULT_VERSION )[self.config_name]) def test_duplicate_port_mappings(self): service_dict = config.merge_service_dicts( {self.config_name: self.base_config}, {self.config_name: self.base_config}, DEFAULT_VERSION ) assert set(service_dict[self.config_name]) == self.convert(self.base_config) def test_no_override(self): service_dict = config.merge_service_dicts( {self.config_name: self.base_config}, {}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == self.convert(self.base_config) def test_no_base(self): service_dict = config.merge_service_dicts( {}, {self.config_name: self.base_config}, DEFAULT_VERSION) assert set(service_dict[self.config_name]) == self.convert(self.base_config) class MergeNetworksTest(unittest.TestCase, MergeListsTest): config_name = 'networks' base_config = {'default': {'aliases': ['foo.bar', 'foo.baz']}} override_config = {'default': {'ipv4_address': '123.234.123.234'}} def test_no_network_overrides(self): service_dict = config.merge_service_dicts( {self.config_name: self.base_config}, {self.config_name: self.override_config}, DEFAULT_VERSION) assert service_dict[self.config_name] == { 'default': { 'aliases': ['foo.bar', 'foo.baz'], 'ipv4_address': '123.234.123.234' } } def test_network_has_none_value(self): service_dict = config.merge_service_dicts( {self.config_name: { 'default': None }}, {self.config_name: { 'default': { 'aliases': [] } }}, DEFAULT_VERSION) assert service_dict[self.config_name] == { 'default': { 'aliases': [] } } def test_all_properties(self): service_dict = config.merge_service_dicts( {self.config_name: { 'default': { 'aliases': ['foo.bar', 'foo.baz'], 'link_local_ips': ['192.168.1.10', '192.168.1.11'], 'ipv4_address': '111.111.111.111', 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-first' } }}, {self.config_name: { 'default': { 'aliases': ['foo.baz', 'foo.baz2'], 'link_local_ips': ['192.168.1.11', '192.168.1.12'], 'ipv4_address': '123.234.123.234', 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second' } }}, DEFAULT_VERSION) assert service_dict[self.config_name] == { 'default': { 'aliases': ['foo.bar', 'foo.baz', 'foo.baz2'], 'link_local_ips': ['192.168.1.10', '192.168.1.11', '192.168.1.12'], 'ipv4_address': '123.234.123.234', 'ipv6_address': 'FE80:CD00:0000:0CDE:1257:0000:211E:729C-second' } } def test_no_network_name_overrides(self): service_dict = config.merge_service_dicts( { self.config_name: { 'default': { 'aliases': ['foo.bar', 'foo.baz'], 'ipv4_address': '123.234.123.234' } } }, { self.config_name: { 'another_network': { 'ipv4_address': '123.234.123.234' } } }, DEFAULT_VERSION) assert service_dict[self.config_name] == { 'default': { 'aliases': ['foo.bar', 'foo.baz'], 'ipv4_address': '123.234.123.234' }, 'another_network': { 'ipv4_address': '123.234.123.234' } } class MergeStringsOrListsTest(unittest.TestCase): def test_no_override(self): service_dict = config.merge_service_dicts( {'dns': '8.8.8.8'}, {}, DEFAULT_VERSION) assert set(service_dict['dns']) == {'8.8.8.8'} def test_no_base(self): service_dict = config.merge_service_dicts( {}, {'dns': '8.8.8.8'}, DEFAULT_VERSION) assert set(service_dict['dns']) == {'8.8.8.8'} def test_add_string(self): service_dict = config.merge_service_dicts( {'dns': ['8.8.8.8']}, {'dns': '9.9.9.9'}, DEFAULT_VERSION) assert set(service_dict['dns']) == {'8.8.8.8', '9.9.9.9'} def test_add_list(self): service_dict = config.merge_service_dicts( {'dns': '8.8.8.8'}, {'dns': ['9.9.9.9']}, DEFAULT_VERSION) assert set(service_dict['dns']) == {'8.8.8.8', '9.9.9.9'} class MergeLabelsTest(unittest.TestCase): def test_empty(self): assert 'labels' not in config.merge_service_dicts({}, {}, DEFAULT_VERSION) def test_no_override(self): service_dict = config.merge_service_dicts( make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), make_service_dict('foo', {'build': '.'}, 'tests/'), DEFAULT_VERSION) assert service_dict['labels'] == {'foo': '1', 'bar': ''} def test_no_base(self): service_dict = config.merge_service_dicts( make_service_dict('foo', {'build': '.'}, 'tests/'), make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'), DEFAULT_VERSION) assert service_dict['labels'] == {'foo': '2'} def test_override_explicit_value(self): service_dict = config.merge_service_dicts( make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), make_service_dict('foo', {'build': '.', 'labels': ['foo=2']}, 'tests/'), DEFAULT_VERSION) assert service_dict['labels'] == {'foo': '2', 'bar': ''} def test_add_explicit_value(self): service_dict = config.merge_service_dicts( make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar']}, 'tests/'), make_service_dict('foo', {'build': '.', 'labels': ['bar=2']}, 'tests/'), DEFAULT_VERSION) assert service_dict['labels'] == {'foo': '1', 'bar': '2'} def test_remove_explicit_value(self): service_dict = config.merge_service_dicts( make_service_dict('foo', {'build': '.', 'labels': ['foo=1', 'bar=2']}, 'tests/'), make_service_dict('foo', {'build': '.', 'labels': ['bar']}, 'tests/'), DEFAULT_VERSION) assert service_dict['labels'] == {'foo': '1', 'bar': ''} class MergeBuildTest(unittest.TestCase): def test_full(self): base = { 'context': '.', 'dockerfile': 'Dockerfile', 'args': { 'x': '1', 'y': '2', }, 'cache_from': ['ubuntu'], 'labels': ['com.docker.compose.test=true'] } override = { 'context': './prod', 'dockerfile': 'Dockerfile.prod', 'args': ['x=12'], 'cache_from': ['debian'], 'labels': { 'com.docker.compose.test': 'false', 'com.docker.compose.prod': 'true', } } result = config.merge_build(None, {'build': base}, {'build': override}) assert result['context'] == override['context'] assert result['dockerfile'] == override['dockerfile'] assert result['args'] == {'x': '12', 'y': '2'} assert set(result['cache_from']) == {'ubuntu', 'debian'} assert result['labels'] == override['labels'] def test_empty_override(self): base = { 'context': '.', 'dockerfile': 'Dockerfile', 'args': { 'x': '1', 'y': '2', }, 'cache_from': ['ubuntu'], 'labels': { 'com.docker.compose.test': 'true' } } override = {} result = config.merge_build(None, {'build': base}, {'build': override}) assert result == base def test_empty_base(self): base = {} override = { 'context': './prod', 'dockerfile': 'Dockerfile.prod', 'args': {'x': '12'}, 'cache_from': ['debian'], 'labels': { 'com.docker.compose.test': 'false', 'com.docker.compose.prod': 'true', } } result = config.merge_build(None, {'build': base}, {'build': override}) assert result == override class MemoryOptionsTest(unittest.TestCase): def test_validation_fails_with_just_memswap_limit(self): """ When you set a 'memswap_limit' it is invalid config unless you also set a mem_limit """ with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'foo': {'image': 'busybox', 'memswap_limit': 2000000}, }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "foo.memswap_limit is invalid: when defining " \ "'memswap_limit' you must set 'mem_limit' as well" \ in excinfo.exconly() def test_validation_with_correct_memswap_values(self): service_dict = config.load( build_config_details( {'foo': {'image': 'busybox', 'mem_limit': 1000000, 'memswap_limit': 2000000}}, 'tests/fixtures/extends', 'common.yml' ) ).services assert service_dict[0]['memswap_limit'] == 2000000 def test_memswap_can_be_a_string(self): service_dict = config.load( build_config_details( {'foo': {'image': 'busybox', 'mem_limit': "1G", 'memswap_limit': "512M"}}, 'tests/fixtures/extends', 'common.yml' ) ).services assert service_dict[0]['memswap_limit'] == "512M" class EnvTest(unittest.TestCase): def test_parse_environment_as_list(self): environment = [ 'NORMAL=F1', 'CONTAINS_EQUALS=F=2', 'TRAILING_EQUALS=', ] assert config.parse_environment(environment) == { 'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': '' } def test_parse_environment_as_dict(self): environment = { 'NORMAL': 'F1', 'CONTAINS_EQUALS': 'F=2', 'TRAILING_EQUALS': None, } assert config.parse_environment(environment) == environment def test_parse_environment_invalid(self): with pytest.raises(ConfigurationError): config.parse_environment('a=b') def test_parse_environment_empty(self): assert config.parse_environment(None) == {} @mock.patch.dict(os.environ) def test_resolve_environment(self): os.environ['FILE_DEF'] = 'E1' os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['ENV_DEF'] = 'E3' service_dict = { 'build': '.', 'environment': { 'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': None, 'NO_DEF': None }, } assert resolve_environment( service_dict, Environment.from_env_file(None) ) == {'FILE_DEF': 'F1', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None} def test_resolve_environment_from_env_file(self): assert resolve_environment({'env_file': ['tests/fixtures/env/one.env']}) == { 'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'bar' } def test_environment_overrides_env_file(self): assert resolve_environment({ 'environment': {'FOO': 'baz'}, 'env_file': ['tests/fixtures/env/one.env'], }) == {'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz'} def test_resolve_environment_with_multiple_env_files(self): service_dict = { 'env_file': [ 'tests/fixtures/env/one.env', 'tests/fixtures/env/two.env' ] } assert resolve_environment(service_dict) == { 'ONE': '2', 'TWO': '1', 'THREE': '3', 'FOO': 'baz', 'DOO': 'dah' } def test_resolve_environment_nonexistent_file(self): with pytest.raises(ConfigurationError) as exc: config.load(build_config_details( {'foo': {'image': 'example', 'env_file': 'nonexistent.env'}}, working_dir='tests/fixtures/env')) assert 'Couldn\'t find env file' in exc.exconly() assert 'nonexistent.env' in exc.exconly() @mock.patch.dict(os.environ) def test_resolve_environment_from_env_file_with_empty_values(self): os.environ['FILE_DEF'] = 'E1' os.environ['FILE_DEF_EMPTY'] = 'E2' os.environ['ENV_DEF'] = 'E3' assert resolve_environment( {'env_file': ['tests/fixtures/env/resolve.env']}, Environment.from_env_file(None) ) == { 'FILE_DEF': 'bär', 'FILE_DEF_EMPTY': '', 'ENV_DEF': 'E3', 'NO_DEF': None } @mock.patch.dict(os.environ) def test_resolve_build_args(self): os.environ['env_arg'] = 'value2' build = { 'context': '.', 'args': { 'arg1': 'value1', 'empty_arg': '', 'env_arg': None, 'no_env': None } } assert resolve_build_args(build['args'], Environment.from_env_file(build['context'])) == { 'arg1': 'value1', 'empty_arg': '', 'env_arg': 'value2', 'no_env': None } @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') @mock.patch.dict(os.environ) def test_resolve_path(self): os.environ['HOSTENV'] = '/tmp' os.environ['CONTAINERENV'] = '/host/tmp' service_dict = config.load( build_config_details( {'services': { 'foo': {'build': '.', 'volumes': ['$HOSTENV:$CONTAINERENV']}}}, "tests/fixtures/env", ) ).services[0] assert set(service_dict['volumes']) == {VolumeSpec.parse('/tmp:/host/tmp')} service_dict = config.load( build_config_details( {'services': { 'foo': {'build': '.', 'volumes': ['/opt${HOSTENV}:/opt${CONTAINERENV}']}}}, "tests/fixtures/env", ) ).services[0] assert set(service_dict['volumes']) == {VolumeSpec.parse('/opt/tmp:/opt/host/tmp')} def load_from_filename(filename, override_dir=None): return config.load( config.find('.', [filename], Environment.from_env_file('.'), override_dir=override_dir) ).services class ExtendsTest(unittest.TestCase): def test_extends(self): service_dicts = load_from_filename('tests/fixtures/extends/docker-compose.yml') assert service_sort(service_dicts) == service_sort([ { 'name': 'mydb', 'image': 'busybox', 'command': 'top', }, { 'name': 'myweb', 'image': 'busybox', 'command': 'top', 'network_mode': 'bridge', 'links': ['mydb:db'], 'environment': { "FOO": "1", "BAR": "2", "BAZ": "2", }, } ]) def test_merging_env_labels_ulimits(self): service_dicts = load_from_filename('tests/fixtures/extends/common-env-labels-ulimits.yml') assert service_sort(service_dicts) == service_sort([ { 'name': 'web', 'image': 'busybox', 'command': '/bin/true', 'network_mode': 'host', 'environment': { "FOO": "2", "BAR": "1", "BAZ": "3", }, 'labels': {'label': 'one'}, 'ulimits': {'nproc': 65535, 'memlock': {'soft': 1024, 'hard': 2048}} } ]) def test_nested(self): service_dicts = load_from_filename('tests/fixtures/extends/nested.yml') assert service_dicts == [ { 'name': 'myweb', 'image': 'busybox', 'command': '/bin/true', 'network_mode': 'host', 'environment': { "FOO": "2", "BAR": "2", }, }, ] def test_self_referencing_file(self): """ We specify a 'file' key that is the filename we're already in. """ service_dicts = load_from_filename('tests/fixtures/extends/specify-file-as-self.yml') assert service_sort(service_dicts) == service_sort([ { 'environment': { 'YEP': '1', 'BAR': '1', 'BAZ': '3' }, 'image': 'busybox', 'name': 'myweb' }, { 'environment': {'YEP': '1'}, 'image': 'busybox', 'name': 'otherweb' }, { 'environment': {'YEP': '1', 'BAZ': '3'}, 'image': 'busybox', 'name': 'web' } ]) def test_circular(self): with pytest.raises(config.CircularReference) as exc: load_from_filename('tests/fixtures/extends/circle-1.yml') path = [ (os.path.basename(filename), service_name) for (filename, service_name) in exc.value.trail ] expected = [ ('circle-1.yml', 'web'), ('circle-2.yml', 'other'), ('circle-1.yml', 'web'), ] assert path == expected def test_extends_validation_empty_dictionary(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '3', 'services': { 'web': {'image': 'busybox', 'extends': {}}, } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert 'service' in excinfo.exconly() def test_extends_validation_missing_service_key(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '3', 'services': { 'web': { 'image': 'busybox', 'extends': {'file': 'common.yml'} } } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "'service' is a required property" in excinfo.exconly() def test_extends_validation_invalid_key(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '3', 'services': { 'web': { 'image': 'busybox', 'extends': { 'file': 'common.yml', 'service': 'web', 'rogue_key': 'is not allowed' } }, } }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "web.extends contains unsupported option: 'rogue_key'" \ in excinfo.exconly() def test_extends_validation_sub_property_key(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details( { 'version': '3', 'services': { 'web': { 'image': 'busybox', 'extends': { 'file': 1, 'service': 'web', } } }, }, 'tests/fixtures/extends', 'filename.yml' ) ) assert "web.extends.file contains 1, which is an invalid type, it should be a string" \ in excinfo.exconly() def test_extends_validation_no_file_key_no_filename_set(self): dictionary = {'extends': {'service': 'web'}} with pytest.raises(ConfigurationError) as excinfo: make_service_dict('myweb', dictionary, working_dir='tests/fixtures/extends') assert 'file' in excinfo.exconly() def test_extends_validation_valid_config(self): service = config.load( build_config_details( { 'web': {'image': 'busybox', 'extends': {'service': 'web', 'file': 'common.yml'}}, }, 'tests/fixtures/extends', 'common.yml' ) ).services assert len(service) == 1 assert isinstance(service[0], dict) assert service[0]['command'] == "/bin/true" def test_extended_service_with_invalid_config(self): with pytest.raises(ConfigurationError) as exc: load_from_filename('tests/fixtures/extends/service-with-invalid-schema.yml') assert ( "myweb has neither an image nor a build context specified" in exc.exconly() ) def test_extended_service_with_valid_config(self): service = load_from_filename('tests/fixtures/extends/service-with-valid-composite-extends.yml') assert service[0]['command'] == "top" def test_extends_file_defaults_to_self(self): """ Test not specifying a file in our extends options that the config is valid and correctly extends from itself. """ service_dicts = load_from_filename('tests/fixtures/extends/no-file-specified.yml') assert service_sort(service_dicts) == service_sort([ { 'name': 'myweb', 'image': 'busybox', 'environment': { "BAR": "1", "BAZ": "3", } }, { 'name': 'web', 'image': 'busybox', 'environment': { "BAZ": "3", } } ]) def test_invalid_links_in_extended_service(self): with pytest.raises(ConfigurationError) as excinfo: load_from_filename('tests/fixtures/extends/invalid-links.yml') assert "services with 'links' cannot be extended" in excinfo.exconly() def test_invalid_volumes_from_in_extended_service(self): with pytest.raises(ConfigurationError) as excinfo: load_from_filename('tests/fixtures/extends/invalid-volumes.yml') assert "services with 'volumes_from' cannot be extended" in excinfo.exconly() def test_invalid_net_in_extended_service(self): with pytest.raises(ConfigurationError) as excinfo: load_from_filename('tests/fixtures/extends/invalid-net-v2.yml') assert 'network_mode: service' in excinfo.exconly() assert 'cannot be extended' in excinfo.exconly() with pytest.raises(ConfigurationError) as excinfo: load_from_filename('tests/fixtures/extends/invalid-net.yml') assert 'net: container' in excinfo.exconly() assert 'cannot be extended' in excinfo.exconly() @mock.patch.dict(os.environ) def test_load_config_runs_interpolation_in_extended_service(self): os.environ.update(HOSTNAME_VALUE="penguin") expected_interpolated_value = "host-penguin" service_dicts = load_from_filename( 'tests/fixtures/extends/valid-interpolation.yml') for service in service_dicts: assert service['hostname'] == expected_interpolated_value @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') def test_volume_path(self): dicts = load_from_filename('tests/fixtures/volume-path/docker-compose.yml') paths = [ VolumeSpec( os.path.abspath('tests/fixtures/volume-path/common/foo'), '/foo', 'rw'), VolumeSpec( os.path.abspath('tests/fixtures/volume-path/bar'), '/bar', 'rw') ] assert set(dicts[0]['volumes']) == set(paths) def test_parent_build_path_dne(self): child = load_from_filename('tests/fixtures/extends/nonexistent-path-child.yml') assert child == [ { 'name': 'dnechild', 'image': 'busybox', 'command': '/bin/true', 'environment': { "FOO": "1", "BAR": "2", }, }, ] def test_load_throws_error_when_base_service_does_not_exist(self): with pytest.raises(ConfigurationError) as excinfo: load_from_filename('tests/fixtures/extends/nonexistent-service.yml') assert "Cannot extend service 'foo'" in excinfo.exconly() assert "Service not found" in excinfo.exconly() def test_partial_service_config_in_extends_is_still_valid(self): dicts = load_from_filename('tests/fixtures/extends/valid-common-config.yml') assert dicts[0]['environment'] == {'FOO': '1'} def test_extended_service_with_verbose_and_shorthand_way(self): services = load_from_filename('tests/fixtures/extends/verbose-and-shorthand.yml') assert service_sort(services) == service_sort([ { 'name': 'base', 'image': 'busybox', 'environment': {'BAR': '1'}, }, { 'name': 'verbose', 'image': 'busybox', 'environment': {'BAR': '1', 'FOO': '1'}, }, { 'name': 'shorthand', 'image': 'busybox', 'environment': {'BAR': '1', 'FOO': '2'}, }, ]) @mock.patch.dict(os.environ) def test_extends_with_environment_and_env_files(self): tmpdir = tempfile.mkdtemp('test_extends_with_environment') self.addCleanup(shutil.rmtree, tmpdir) commondir = os.path.join(tmpdir, 'common') os.mkdir(commondir) with open(os.path.join(commondir, 'base.yml'), mode="w") as base_fh: base_fh.write(""" app: image: 'example/app' env_file: - 'envs' environment: - SECRET - TEST_ONE=common - TEST_TWO=common """) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" ext: extends: file: common/base.yml service: app env_file: - 'envs' environment: - THING - TEST_ONE=top """) with open(os.path.join(commondir, 'envs'), mode="w") as envs_fh: envs_fh.write(""" COMMON_ENV_FILE TEST_ONE=common-env-file TEST_TWO=common-env-file TEST_THREE=common-env-file TEST_FOUR=common-env-file """) with open(os.path.join(tmpdir, 'envs'), mode="w") as envs_fh: envs_fh.write(""" TOP_ENV_FILE TEST_ONE=top-env-file TEST_TWO=top-env-file TEST_THREE=top-env-file """) expected = [ { 'name': 'ext', 'image': 'example/app', 'environment': { 'SECRET': 'secret', 'TOP_ENV_FILE': 'secret', 'COMMON_ENV_FILE': 'secret', 'THING': 'thing', 'TEST_ONE': 'top', 'TEST_TWO': 'common', 'TEST_THREE': 'top-env-file', 'TEST_FOUR': 'common-env-file', }, }, ] os.environ['SECRET'] = 'secret' os.environ['THING'] = 'thing' os.environ['COMMON_ENV_FILE'] = 'secret' os.environ['TOP_ENV_FILE'] = 'secret' config = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert config == expected def test_extends_with_mixed_versions_is_error(self): tmpdir = tempfile.mkdtemp('test_extends_with_mixed_version') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" version: "2" services: web: extends: file: base.yml service: base image: busybox """) with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh: base_fh.write(""" base: volumes: ['/foo'] ports: ['3000:3000'] """) with pytest.raises(ConfigurationError) as exc: load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert 'Version mismatch' in exc.exconly() def test_extends_with_defined_version_passes(self): tmpdir = tempfile.mkdtemp('test_extends_with_defined_version') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" version: "2" services: web: extends: file: base.yml service: base image: busybox """) with open(os.path.join(tmpdir, 'base.yml'), mode="w") as base_fh: base_fh.write(""" version: "2" services: base: volumes: ['/foo'] ports: ['3000:3000'] command: top """) service = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert service[0]['command'] == "top" def test_extends_with_depends_on(self): tmpdir = tempfile.mkdtemp('test_extends_with_depends_on') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" version: "2" services: base: image: example web: extends: base image: busybox depends_on: ['other'] other: image: example """) services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert service_sort(services)[2]['depends_on'] == { 'other': {'condition': 'service_started'} } def test_extends_with_healthcheck(self): service_dicts = load_from_filename('tests/fixtures/extends/healthcheck-2.yml') assert service_sort(service_dicts) == [{ 'name': 'demo', 'image': 'foobar:latest', 'healthcheck': { 'test': ['CMD', '/health.sh'], 'interval': 10000000000, 'timeout': 5000000000, 'retries': 36, } }] def test_extends_with_ports(self): tmpdir = tempfile.mkdtemp('test_extends_with_ports') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" version: '2' services: a: image: nginx ports: - 80 b: extends: service: a """) services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert len(services) == 2 for svc in services: assert svc['ports'] == [types.ServicePort('80', None, None, None, None)] def test_extends_with_security_opt(self): tmpdir = tempfile.mkdtemp('test_extends_with_ports') self.addCleanup(shutil.rmtree, tmpdir) with open(os.path.join(tmpdir, 'docker-compose.yml'), mode="w") as docker_compose_fh: docker_compose_fh.write(""" version: '2' services: a: image: nginx security_opt: - apparmor:unconfined - seccomp:unconfined b: extends: service: a """) services = load_from_filename(str(os.path.join(tmpdir, 'docker-compose.yml'))) assert len(services) == 2 for svc in services: assert types.SecurityOpt.parse('apparmor:unconfined') in svc['security_opt'] assert types.SecurityOpt.parse('seccomp:unconfined') in svc['security_opt'] @mock.patch.object(ConfigFile, 'from_filename', wraps=ConfigFile.from_filename) def test_extends_same_file_optimization(self, from_filename_mock): load_from_filename('tests/fixtures/extends/no-file-specified.yml') from_filename_mock.assert_called_once() @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') class ExpandPathTest(unittest.TestCase): working_dir = '/home/user/somedir' def test_expand_path_normal(self): result = config.expand_path(self.working_dir, 'myfile') assert result == self.working_dir + '/' + 'myfile' def test_expand_path_absolute(self): abs_path = '/home/user/otherdir/somefile' result = config.expand_path(self.working_dir, abs_path) assert result == abs_path def test_expand_path_with_tilde(self): test_path = '~/otherdir/somefile' with mock.patch.dict(os.environ): os.environ['HOME'] = user_path = '/home/user/' result = config.expand_path(self.working_dir, test_path) assert result == user_path + 'otherdir/somefile' class VolumePathTest(unittest.TestCase): def test_split_path_mapping_with_windows_path(self): host_path = "c:\\Users\\msamblanet\\Documents\\anvil\\connect\\config" windows_volume_path = host_path + ":/opt/connect/config:ro" expected_mapping = ("/opt/connect/config", (host_path, 'ro')) mapping = config.split_path_mapping(windows_volume_path) assert mapping == expected_mapping def test_split_path_mapping_with_windows_path_in_container(self): host_path = 'c:\\Users\\remilia\\data' container_path = 'c:\\scarletdevil\\data' expected_mapping = (container_path, (host_path, None)) mapping = config.split_path_mapping('{}:{}'.format(host_path, container_path)) assert mapping == expected_mapping def test_split_path_mapping_with_root_mount(self): host_path = '/' container_path = '/var/hostroot' expected_mapping = (container_path, (host_path, None)) mapping = config.split_path_mapping('{}:{}'.format(host_path, container_path)) assert mapping == expected_mapping @pytest.mark.xfail(IS_WINDOWS_PLATFORM, reason='paths use slash') class BuildPathTest(unittest.TestCase): def setUp(self): self.abs_context_path = os.path.join(os.getcwd(), 'tests/fixtures/build-ctx') def test_nonexistent_path(self): with pytest.raises(ConfigurationError): config.load( build_config_details( { 'foo': {'build': 'nonexistent.path'}, }, 'working_dir', 'filename.yml' ) ) def test_relative_path(self): relative_build_path = '../build-ctx/' service_dict = make_service_dict( 'relpath', {'build': relative_build_path}, working_dir='tests/fixtures/build-path' ) assert service_dict['build'] == self.abs_context_path def test_absolute_path(self): service_dict = make_service_dict( 'abspath', {'build': self.abs_context_path}, working_dir='tests/fixtures/build-path' ) assert service_dict['build'] == self.abs_context_path def test_from_file(self): service_dict = load_from_filename('tests/fixtures/build-path/docker-compose.yml') assert service_dict == [{'name': 'foo', 'build': {'context': self.abs_context_path}}] def test_from_file_override_dir(self): override_dir = os.path.join(os.getcwd(), 'tests/fixtures/') service_dict = load_from_filename( 'tests/fixtures/build-path-override-dir/docker-compose.yml', override_dir=override_dir) assert service_dict == [{'name': 'foo', 'build': {'context': self.abs_context_path}}] def test_valid_url_in_build_path(self): valid_urls = [ 'git://github.com/docker/docker', 'git@github.com:docker/docker.git', 'git@bitbucket.org:atlassianlabs/atlassian-docker.git', 'https://github.com/docker/docker.git', 'http://github.com/docker/docker.git', 'github.com/docker/docker.git', ] for valid_url in valid_urls: service_dict = config.load(build_config_details({ 'validurl': {'build': valid_url}, }, '.', None)).services assert service_dict[0]['build'] == {'context': valid_url} def test_invalid_url_in_build_path(self): invalid_urls = [ 'example.com/bogus', 'ftp://example.com/', '/path/does/not/exist', ] for invalid_url in invalid_urls: with pytest.raises(ConfigurationError) as exc: config.load(build_config_details({ 'invalidurl': {'build': invalid_url}, }, '.', None)) assert 'build path' in exc.exconly() class HealthcheckTest(unittest.TestCase): def test_healthcheck(self): config_dict = config.load( build_config_details({ 'version': '2.3', 'services': { 'test': { 'image': 'busybox', 'healthcheck': { 'test': ['CMD', 'true'], 'interval': '1s', 'timeout': '1m', 'retries': 3, 'start_period': '10s', } } } }) ) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['test'] assert serialized_service['healthcheck'] == { 'test': ['CMD', 'true'], 'interval': '1s', 'timeout': '1m', 'retries': 3, 'start_period': '10s' } def test_disable(self): config_dict = config.load( build_config_details({ 'version': '2.3', 'services': { 'test': { 'image': 'busybox', 'healthcheck': { 'disable': True, } } } }) ) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['test'] assert serialized_service['healthcheck'] == { 'test': ['NONE'], } def test_disable_with_other_config_is_invalid(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details({ 'version': '2.3', 'services': { 'invalid-healthcheck': { 'image': 'busybox', 'healthcheck': { 'disable': True, 'interval': '1s', } } } }) ) assert 'invalid-healthcheck' in excinfo.exconly() assert '"disable: true" cannot be combined with other options' in excinfo.exconly() def test_healthcheck_with_invalid_test(self): with pytest.raises(ConfigurationError) as excinfo: config.load( build_config_details({ 'version': '2.3', 'services': { 'invalid-healthcheck': { 'image': 'busybox', 'healthcheck': { 'test': ['true'], 'interval': '1s', 'timeout': '1m', 'retries': 3, 'start_period': '10s', } } } }) ) assert 'invalid-healthcheck' in excinfo.exconly() assert 'the first item must be either NONE, CMD or CMD-SHELL' in excinfo.exconly() class GetDefaultConfigFilesTestCase(unittest.TestCase): files = [ 'docker-compose.yml', 'docker-compose.yaml', 'compose.yml', 'compose.yaml', ] def test_get_config_path_default_file_in_basedir(self): for index, filename in enumerate(self.files): assert filename == get_config_filename_for_files(self.files[index:]) with pytest.raises(config.ComposeFileNotFound): get_config_filename_for_files([]) def test_get_config_path_default_file_in_parent_dir(self): """Test with files placed in the subdir""" def get_config_in_subdir(files): return get_config_filename_for_files(files, subdir=True) for index, filename in enumerate(self.files): assert filename == get_config_in_subdir(self.files[index:]) with pytest.raises(config.ComposeFileNotFound): get_config_in_subdir([]) def get_config_filename_for_files(filenames, subdir=None): def make_files(dirname, filenames): for fname in filenames: with open(os.path.join(dirname, fname), 'w') as f: f.write('') project_dir = tempfile.mkdtemp() try: make_files(project_dir, filenames) if subdir: base_dir = tempfile.mkdtemp(dir=project_dir) else: base_dir = project_dir filenames = config.get_default_config_files(base_dir) if not filenames: raise config.ComposeFileNotFound(config.SUPPORTED_FILENAMES) return os.path.basename(filenames[0]) finally: shutil.rmtree(project_dir) class SerializeTest(unittest.TestCase): def test_denormalize_depends(self): service_dict = { 'image': 'busybox', 'command': 'true', 'depends_on': { 'service2': {'condition': 'service_started'}, 'service3': {'condition': 'service_started'}, } } assert denormalize_service_dict(service_dict, VERSION) == service_dict def test_serialize_time(self): data = { 9: '9ns', 9000: '9us', 9000000: '9ms', 90000000: '90ms', 900000000: '900ms', 999999999: '999999999ns', 1000000000: '1s', 60000000000: '1m', 60000000001: '60000000001ns', 9000000000000: '150m', 90000000000000: '25h', } for k, v in data.items(): assert serialize_ns_time_value(k) == v def test_denormalize_healthcheck(self): service_dict = { 'image': 'test', 'healthcheck': { 'test': 'exit 1', 'interval': '1m40s', 'timeout': '30s', 'retries': 5, 'start_period': '2s90ms' } } processed_service = config.process_service(config.ServiceConfig( '.', 'test', 'test', service_dict )) denormalized_service = denormalize_service_dict(processed_service, VERSION) assert denormalized_service['healthcheck']['interval'] == '100s' assert denormalized_service['healthcheck']['timeout'] == '30s' assert denormalized_service['healthcheck']['start_period'] == '2090ms' def test_denormalize_image_has_digest(self): service_dict = { 'image': 'busybox' } image_digest = 'busybox@sha256:abcde' assert denormalize_service_dict(service_dict, VERSION, image_digest) == { 'image': 'busybox@sha256:abcde' } def test_denormalize_image_no_digest(self): service_dict = { 'image': 'busybox' } assert denormalize_service_dict(service_dict, VERSION) == { 'image': 'busybox' } def test_serialize_secrets(self): service_dict = { 'image': 'example/web', 'secrets': [ {'source': 'one'}, { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, } ] } secrets_dict = { 'one': {'file': '/one.txt'}, 'source': {'file': '/source.pem'}, 'two': {'external': True}, } config_dict = config.load(build_config_details({ 'version': '3.1', 'services': {'web': service_dict}, 'secrets': secrets_dict })) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['web'] assert secret_sort(serialized_service['secrets']) == secret_sort(service_dict['secrets']) assert 'secrets' in serialized_config assert serialized_config['secrets']['two'] == {'external': True, 'name': 'two'} def test_serialize_ports(self): config_dict = config.Config(config_version=VERSION, version=VERSION, services=[ { 'ports': [types.ServicePort('80', '8080', None, None, None)], 'image': 'alpine', 'name': 'web' } ], volumes={}, networks={}, secrets={}, configs={}) serialized_config = yaml.safe_load(serialize_config(config_dict)) assert [{'published': 8080, 'target': 80}] == serialized_config['services']['web']['ports'] def test_serialize_ports_v1(self): config_dict = config.Config(config_version=V1, version=V1, services=[ { 'ports': [types.ServicePort('80', '8080', None, None, None)], 'image': 'alpine', 'name': 'web' } ], volumes={}, networks={}, secrets={}, configs={}) serialized_config = yaml.safe_load(serialize_config(config_dict)) assert ['8080:80/tcp'] == serialized_config['services']['web']['ports'] def test_serialize_ports_with_ext_ip(self): config_dict = config.Config(config_version=VERSION, version=VERSION, services=[ { 'ports': [types.ServicePort('80', '8080', None, None, '127.0.0.1')], 'image': 'alpine', 'name': 'web' } ], volumes={}, networks={}, secrets={}, configs={}) serialized_config = yaml.safe_load(serialize_config(config_dict)) assert '127.0.0.1:8080:80/tcp' in serialized_config['services']['web']['ports'] def test_serialize_configs(self): service_dict = { 'image': 'example/web', 'configs': [ {'source': 'one'}, { 'source': 'source', 'target': 'target', 'uid': '100', 'gid': '200', 'mode': 0o777, } ] } configs_dict = { 'one': {'file': '/one.txt'}, 'source': {'file': '/source.pem'}, 'two': {'external': True}, } config_dict = config.load(build_config_details({ 'version': '3.3', 'services': {'web': service_dict}, 'configs': configs_dict })) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['web'] assert secret_sort(serialized_service['configs']) == secret_sort(service_dict['configs']) assert 'configs' in serialized_config assert serialized_config['configs']['two'] == {'external': True, 'name': 'two'} def test_serialize_bool_string(self): cfg = { 'version': '2.2', 'services': { 'web': { 'image': 'example/web', 'command': 'true', 'environment': {'FOO': 'Y', 'BAR': 'on'} } } } config_dict = config.load(build_config_details(cfg)) serialized_config = serialize_config(config_dict) assert 'command: "true"\n' in serialized_config assert 'FOO: "Y"\n' in serialized_config assert 'BAR: "on"\n' in serialized_config def test_serialize_escape_dollar_sign(self): cfg = { 'version': '2.2', 'services': { 'web': { 'image': 'busybox', 'command': 'echo $$FOO', 'environment': { 'CURRENCY': '$$' }, 'entrypoint': ['$$SHELL', '-c'], } } } config_dict = config.load(build_config_details(cfg)) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['web'] assert serialized_service['environment']['CURRENCY'] == '$$' assert serialized_service['command'] == 'echo $$FOO' assert serialized_service['entrypoint'][0] == '$$SHELL' def test_serialize_escape_dont_interpolate(self): cfg = { 'version': '2.2', 'services': { 'web': { 'image': 'busybox', 'command': 'echo $FOO', 'environment': { 'CURRENCY': '$' }, 'env_file': ['tests/fixtures/env/three.env'], 'entrypoint': ['$SHELL', '-c'], } } } config_dict = config.load(build_config_details(cfg, working_dir='.'), interpolate=False) serialized_config = yaml.safe_load(serialize_config(config_dict, escape_dollar=False)) serialized_service = serialized_config['services']['web'] assert serialized_service['environment']['CURRENCY'] == '$' # Values coming from env_files are not allowed to have variables assert serialized_service['environment']['FOO'] == 'NO $$ENV VAR' assert serialized_service['environment']['DOO'] == 'NO $${ENV} VAR' assert serialized_service['command'] == 'echo $FOO' assert serialized_service['entrypoint'][0] == '$SHELL' def test_serialize_unicode_values(self): cfg = { 'version': '2.3', 'services': { 'web': { 'image': 'busybox', 'command': 'echo 十六夜 咲夜' } } } config_dict = config.load(build_config_details(cfg)) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_service = serialized_config['services']['web'] assert serialized_service['command'] == 'echo 十六夜 咲夜' def test_serialize_external_false(self): cfg = { 'version': '3.4', 'volumes': { 'test': { 'name': 'test-false', 'external': False } } } config_dict = config.load(build_config_details(cfg)) serialized_config = yaml.safe_load(serialize_config(config_dict)) serialized_volume = serialized_config['volumes']['test'] assert serialized_volume['external'] is False compose-1.29.2/tests/unit/config/environment_test.py000066400000000000000000000037621404620552300226260ustar00rootroot00000000000000import codecs import os import shutil import tempfile from ddt import data from ddt import ddt from ddt import unpack from compose.config.environment import env_vars_from_file from compose.config.environment import Environment from tests import unittest @ddt class EnvironmentTest(unittest.TestCase): @classmethod def test_get_simple(self): env = Environment({ 'FOO': 'bar', 'BAR': '1', 'BAZ': '' }) assert env.get('FOO') == 'bar' assert env.get('BAR') == '1' assert env.get('BAZ') == '' @classmethod def test_get_undefined(self): env = Environment({ 'FOO': 'bar' }) assert env.get('FOOBAR') is None @classmethod def test_get_boolean(self): env = Environment({ 'FOO': '', 'BAR': '0', 'BAZ': 'FALSE', 'FOOBAR': 'true', }) assert env.get_boolean('FOO') is False assert env.get_boolean('BAR') is False assert env.get_boolean('BAZ') is False assert env.get_boolean('FOOBAR') is True assert env.get_boolean('UNDEFINED') is False @data( ('unicode exclude test', '\ufeffPARK_BOM=박봄\n', {'PARK_BOM': '박봄'}), ('export prefixed test', 'export PREFIXED_VARS=yes\n', {"PREFIXED_VARS": "yes"}), ('quoted vars test', "QUOTED_VARS='yes'\n", {"QUOTED_VARS": "yes"}), ('double quoted vars test', 'DOUBLE_QUOTED_VARS="yes"\n', {"DOUBLE_QUOTED_VARS": "yes"}), ('extra spaces test', 'SPACES_VARS = "yes"\n', {"SPACES_VARS": "yes"}), ) @unpack def test_env_vars(self, test_name, content, expected): tmpdir = tempfile.mkdtemp('env_file') self.addCleanup(shutil.rmtree, tmpdir) file_abs_path = str(os.path.join(tmpdir, ".env")) with codecs.open(file_abs_path, 'w', encoding='utf-8') as f: f.write(content) assert env_vars_from_file(file_abs_path) == expected, '"{}" Failed'.format(test_name) compose-1.29.2/tests/unit/config/interpolation_test.py000066400000000000000000000335641404620552300231540ustar00rootroot00000000000000import pytest from compose.config.environment import Environment from compose.config.errors import ConfigurationError from compose.config.interpolation import interpolate_environment_variables from compose.config.interpolation import Interpolator from compose.config.interpolation import InvalidInterpolation from compose.config.interpolation import TemplateWithDefaults from compose.config.interpolation import UnsetRequiredSubstitution from compose.const import COMPOSE_SPEC as VERSION @pytest.fixture def mock_env(): return Environment({ 'USER': 'jenny', 'FOO': 'bar', 'TRUE': 'True', 'FALSE': 'OFF', 'POSINT': '50', 'NEGINT': '-200', 'FLOAT': '0.145', 'MODE': '0600', 'BYTES': '512m', }) @pytest.fixture def variable_mapping(): return Environment({'FOO': 'first', 'BAR': ''}) @pytest.fixture def defaults_interpolator(variable_mapping): return Interpolator(TemplateWithDefaults, variable_mapping).interpolate def test_interpolate_environment_variables_in_services(mock_env): services = { 'servicea': { 'image': 'example:${USER}', 'volumes': ['$FOO:/target'], 'logging': { 'driver': '${FOO}', 'options': { 'user': '$USER', } } } } expected = { 'servicea': { 'image': 'example:jenny', 'volumes': ['bar:/target'], 'logging': { 'driver': 'bar', 'options': { 'user': 'jenny', } } } } value = interpolate_environment_variables(VERSION, services, 'service', mock_env) assert value == expected def test_interpolate_environment_variables_in_volumes(mock_env): volumes = { 'data': { 'driver': '$FOO', 'driver_opts': { 'max': 2, 'user': '${USER}' } }, 'other': None, } expected = { 'data': { 'driver': 'bar', 'driver_opts': { 'max': 2, 'user': 'jenny' } }, 'other': {}, } value = interpolate_environment_variables(VERSION, volumes, 'volume', mock_env) assert value == expected def test_interpolate_environment_variables_in_secrets(mock_env): secrets = { 'secretservice': { 'file': '$FOO', 'labels': { 'max': 2, 'user': '${USER}' } }, 'other': None, } expected = { 'secretservice': { 'file': 'bar', 'labels': { 'max': '2', 'user': 'jenny' } }, 'other': {}, } value = interpolate_environment_variables(VERSION, secrets, 'secret', mock_env) assert value == expected def test_interpolate_environment_services_convert_types_v2(mock_env): entry = { 'service1': { 'blkio_config': { 'weight': '${POSINT}', 'weight_device': [{'file': '/dev/sda1', 'weight': '${POSINT}'}] }, 'cpus': '${FLOAT}', 'cpu_count': '$POSINT', 'healthcheck': { 'retries': '${POSINT:-3}', 'disable': '${FALSE}', 'command': 'true' }, 'mem_swappiness': '${DEFAULT:-127}', 'oom_score_adj': '${NEGINT}', 'scale': '${POSINT}', 'ulimits': { 'nproc': '${POSINT}', 'nofile': { 'soft': '${POSINT}', 'hard': '${DEFAULT:-40000}' }, }, 'privileged': '${TRUE}', 'read_only': '${DEFAULT:-no}', 'tty': '${DEFAULT:-N}', 'stdin_open': '${DEFAULT-on}', 'volumes': [ {'type': 'tmpfs', 'target': '/target', 'tmpfs': {'size': '$BYTES'}} ] } } expected = { 'service1': { 'blkio_config': { 'weight': 50, 'weight_device': [{'file': '/dev/sda1', 'weight': 50}] }, 'cpus': 0.145, 'cpu_count': 50, 'healthcheck': { 'retries': 50, 'disable': False, 'command': 'true' }, 'mem_swappiness': 127, 'oom_score_adj': -200, 'scale': 50, 'ulimits': { 'nproc': 50, 'nofile': { 'soft': 50, 'hard': 40000 }, }, 'privileged': True, 'read_only': False, 'tty': False, 'stdin_open': True, 'volumes': [ {'type': 'tmpfs', 'target': '/target', 'tmpfs': {'size': 536870912}} ] } } value = interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert value == expected def test_interpolate_environment_services_convert_types_v3(mock_env): entry = { 'service1': { 'healthcheck': { 'retries': '${POSINT:-3}', 'disable': '${FALSE}', 'command': 'true' }, 'ulimits': { 'nproc': '${POSINT}', 'nofile': { 'soft': '${POSINT}', 'hard': '${DEFAULT:-40000}' }, }, 'privileged': '${TRUE}', 'read_only': '${DEFAULT:-no}', 'tty': '${DEFAULT:-N}', 'stdin_open': '${DEFAULT-on}', 'deploy': { 'update_config': { 'parallelism': '${DEFAULT:-2}', 'max_failure_ratio': '${FLOAT}', }, 'restart_policy': { 'max_attempts': '$POSINT', }, 'replicas': '${DEFAULT-3}' }, 'ports': [{'target': '${POSINT}', 'published': '${DEFAULT:-5000}'}], 'configs': [{'mode': '${MODE}', 'source': 'config1'}], 'secrets': [{'mode': '${MODE}', 'source': 'secret1'}], } } expected = { 'service1': { 'healthcheck': { 'retries': 50, 'disable': False, 'command': 'true' }, 'ulimits': { 'nproc': 50, 'nofile': { 'soft': 50, 'hard': 40000 }, }, 'privileged': True, 'read_only': False, 'tty': False, 'stdin_open': True, 'deploy': { 'update_config': { 'parallelism': 2, 'max_failure_ratio': 0.145, }, 'restart_policy': { 'max_attempts': 50, }, 'replicas': 3 }, 'ports': [{'target': 50, 'published': 5000}], 'configs': [{'mode': 0o600, 'source': 'config1'}], 'secrets': [{'mode': 0o600, 'source': 'secret1'}], } } value = interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert value == expected def test_interpolate_environment_services_convert_types_invalid(mock_env): entry = {'service1': {'privileged': '${POSINT}'}} with pytest.raises(ConfigurationError) as exc: interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert 'Error while attempting to convert service.service1.privileged to '\ 'appropriate type: "50" is not a valid boolean value' in exc.exconly() entry = {'service1': {'cpus': '${TRUE}'}} with pytest.raises(ConfigurationError) as exc: interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert 'Error while attempting to convert service.service1.cpus to '\ 'appropriate type: "True" is not a valid float' in exc.exconly() entry = {'service1': {'ulimits': {'nproc': '${FLOAT}'}}} with pytest.raises(ConfigurationError) as exc: interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert 'Error while attempting to convert service.service1.ulimits.nproc to '\ 'appropriate type: "0.145" is not a valid integer' in exc.exconly() def test_interpolate_environment_network_convert_types(mock_env): entry = { 'network1': { 'external': '${FALSE}', 'attachable': '${TRUE}', 'internal': '${DEFAULT:-false}' } } expected = { 'network1': { 'external': False, 'attachable': True, 'internal': False, } } value = interpolate_environment_variables(VERSION, entry, 'network', mock_env) assert value == expected def test_interpolate_environment_external_resource_convert_types(mock_env): entry = { 'resource1': { 'external': '${TRUE}', } } expected = { 'resource1': { 'external': True, } } value = interpolate_environment_variables(VERSION, entry, 'network', mock_env) assert value == expected value = interpolate_environment_variables(VERSION, entry, 'volume', mock_env) assert value == expected value = interpolate_environment_variables(VERSION, entry, 'secret', mock_env) assert value == expected value = interpolate_environment_variables(VERSION, entry, 'config', mock_env) assert value == expected def test_interpolate_service_name_uses_dot(mock_env): entry = { 'service.1': { 'image': 'busybox', 'ulimits': { 'nproc': '${POSINT}', 'nofile': { 'soft': '${POSINT}', 'hard': '${DEFAULT:-40000}' }, }, } } expected = { 'service.1': { 'image': 'busybox', 'ulimits': { 'nproc': 50, 'nofile': { 'soft': 50, 'hard': 40000 }, }, } } value = interpolate_environment_variables(VERSION, entry, 'service', mock_env) assert value == expected def test_escaped_interpolation(defaults_interpolator): assert defaults_interpolator('$${foo}') == '${foo}' def test_invalid_interpolation(defaults_interpolator): with pytest.raises(InvalidInterpolation): defaults_interpolator('${') with pytest.raises(InvalidInterpolation): defaults_interpolator('$}') with pytest.raises(InvalidInterpolation): defaults_interpolator('${}') with pytest.raises(InvalidInterpolation): defaults_interpolator('${ }') with pytest.raises(InvalidInterpolation): defaults_interpolator('${ foo}') with pytest.raises(InvalidInterpolation): defaults_interpolator('${foo }') with pytest.raises(InvalidInterpolation): defaults_interpolator('${foo!}') def test_interpolate_missing_no_default(defaults_interpolator): assert defaults_interpolator("This ${missing} var") == "This var" assert defaults_interpolator("This ${BAR} var") == "This var" def test_interpolate_with_value(defaults_interpolator): assert defaults_interpolator("This $FOO var") == "This first var" assert defaults_interpolator("This ${FOO} var") == "This first var" def test_interpolate_missing_with_default(defaults_interpolator): assert defaults_interpolator("ok ${missing:-def}") == "ok def" assert defaults_interpolator("ok ${missing-def}") == "ok def" def test_interpolate_with_empty_and_default_value(defaults_interpolator): assert defaults_interpolator("ok ${BAR:-def}") == "ok def" assert defaults_interpolator("ok ${BAR-def}") == "ok " def test_interpolate_mandatory_values(defaults_interpolator): assert defaults_interpolator("ok ${FOO:?bar}") == "ok first" assert defaults_interpolator("ok ${FOO?bar}") == "ok first" assert defaults_interpolator("ok ${BAR?bar}") == "ok " with pytest.raises(UnsetRequiredSubstitution) as e: defaults_interpolator("not ok ${BAR:?high bar}") assert e.value.err == 'high bar' with pytest.raises(UnsetRequiredSubstitution) as e: defaults_interpolator("not ok ${BAZ?dropped the bazz}") assert e.value.err == 'dropped the bazz' def test_interpolate_mandatory_no_err_msg(defaults_interpolator): with pytest.raises(UnsetRequiredSubstitution) as e: defaults_interpolator("not ok ${BAZ?}") assert e.value.err == 'BAZ' def test_interpolate_mixed_separators(defaults_interpolator): assert defaults_interpolator("ok ${BAR:-/non:-alphanumeric}") == "ok /non:-alphanumeric" assert defaults_interpolator("ok ${BAR:-:?wwegegr??:?}") == "ok :?wwegegr??:?" assert defaults_interpolator("ok ${BAR-:-hello}") == 'ok ' with pytest.raises(UnsetRequiredSubstitution) as e: defaults_interpolator("not ok ${BAR:?xazz:-redf}") assert e.value.err == 'xazz:-redf' assert defaults_interpolator("ok ${BAR?...:?bar}") == "ok " def test_unbraced_separators(defaults_interpolator): assert defaults_interpolator("ok $FOO:-bar") == "ok first:-bar" assert defaults_interpolator("ok $BAZ?error") == "ok ?error" def test_interpolate_unicode_values(): variable_mapping = { 'FOO': '十六夜 咲夜'.encode(), 'BAR': '十六夜 咲夜' } interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate interpol("$FOO") == '十六夜 咲夜' interpol("${BAR}") == '十六夜 咲夜' def test_interpolate_no_fallthrough(): # Test regression on docker/compose#5829 variable_mapping = { 'TEST:-': 'hello', 'TEST-': 'hello', } interpol = Interpolator(TemplateWithDefaults, variable_mapping).interpolate assert interpol('${TEST:-}') == '' assert interpol('${TEST-}') == '' compose-1.29.2/tests/unit/config/sort_services_test.py000066400000000000000000000151451404620552300231520ustar00rootroot00000000000000import pytest from compose.config.errors import DependencyError from compose.config.sort_services import sort_service_dicts from compose.config.types import VolumeFromSpec class TestSortService: def test_sort_service_dicts_1(self): services = [ { 'links': ['redis'], 'name': 'web' }, { 'name': 'grunt' }, { 'name': 'redis' } ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'grunt' assert sorted_services[1]['name'] == 'redis' assert sorted_services[2]['name'] == 'web' def test_sort_service_dicts_2(self): services = [ { 'links': ['redis', 'postgres'], 'name': 'web' }, { 'name': 'postgres', 'links': ['redis'] }, { 'name': 'redis' } ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'redis' assert sorted_services[1]['name'] == 'postgres' assert sorted_services[2]['name'] == 'web' def test_sort_service_dicts_3(self): services = [ { 'name': 'child' }, { 'name': 'parent', 'links': ['child'] }, { 'links': ['parent'], 'name': 'grandparent' }, ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'child' assert sorted_services[1]['name'] == 'parent' assert sorted_services[2]['name'] == 'grandparent' def test_sort_service_dicts_4(self): services = [ { 'name': 'child' }, { 'name': 'parent', 'volumes_from': [VolumeFromSpec('child', 'rw', 'service')] }, { 'links': ['parent'], 'name': 'grandparent' }, ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'child' assert sorted_services[1]['name'] == 'parent' assert sorted_services[2]['name'] == 'grandparent' def test_sort_service_dicts_5(self): services = [ { 'links': ['parent'], 'name': 'grandparent' }, { 'name': 'parent', 'network_mode': 'service:child' }, { 'name': 'child' } ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'child' assert sorted_services[1]['name'] == 'parent' assert sorted_services[2]['name'] == 'grandparent' def test_sort_service_dicts_6(self): services = [ { 'links': ['parent'], 'name': 'grandparent' }, { 'name': 'parent', 'volumes_from': [VolumeFromSpec('child', 'ro', 'service')] }, { 'name': 'child' } ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 3 assert sorted_services[0]['name'] == 'child' assert sorted_services[1]['name'] == 'parent' assert sorted_services[2]['name'] == 'grandparent' def test_sort_service_dicts_7(self): services = [ { 'network_mode': 'service:three', 'name': 'four' }, { 'links': ['two'], 'name': 'three' }, { 'name': 'two', 'volumes_from': [VolumeFromSpec('one', 'rw', 'service')] }, { 'name': 'one' } ] sorted_services = sort_service_dicts(services) assert len(sorted_services) == 4 assert sorted_services[0]['name'] == 'one' assert sorted_services[1]['name'] == 'two' assert sorted_services[2]['name'] == 'three' assert sorted_services[3]['name'] == 'four' def test_sort_service_dicts_circular_imports(self): services = [ { 'links': ['redis'], 'name': 'web' }, { 'name': 'redis', 'links': ['web'] }, ] with pytest.raises(DependencyError) as exc: sort_service_dicts(services) assert 'redis' in exc.exconly() assert 'web' in exc.exconly() def test_sort_service_dicts_circular_imports_2(self): services = [ { 'links': ['postgres', 'redis'], 'name': 'web' }, { 'name': 'redis', 'links': ['web'] }, { 'name': 'postgres' } ] with pytest.raises(DependencyError) as exc: sort_service_dicts(services) assert 'redis' in exc.exconly() assert 'web' in exc.exconly() def test_sort_service_dicts_circular_imports_3(self): services = [ { 'links': ['b'], 'name': 'a' }, { 'name': 'b', 'links': ['c'] }, { 'name': 'c', 'links': ['a'] } ] with pytest.raises(DependencyError) as exc: sort_service_dicts(services) assert 'a' in exc.exconly() assert 'b' in exc.exconly() def test_sort_service_dicts_self_imports(self): services = [ { 'links': ['web'], 'name': 'web' }, ] with pytest.raises(DependencyError) as exc: sort_service_dicts(services) assert 'web' in exc.exconly() def test_sort_service_dicts_depends_on_self(self): services = [ { 'depends_on': ['web'], 'name': 'web' }, ] with pytest.raises(DependencyError) as exc: sort_service_dicts(services) assert 'A service can not depend on itself: web' in exc.exconly() compose-1.29.2/tests/unit/config/types_test.py000066400000000000000000000212471404620552300214240ustar00rootroot00000000000000import pytest from compose.config.errors import ConfigurationError from compose.config.types import parse_extra_hosts from compose.config.types import ServicePort from compose.config.types import VolumeFromSpec from compose.config.types import VolumeSpec from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 def test_parse_extra_hosts_list(): expected = {'www.example.com': '192.168.0.17'} assert parse_extra_hosts(["www.example.com:192.168.0.17"]) == expected expected = {'www.example.com': '192.168.0.17'} assert parse_extra_hosts(["www.example.com: 192.168.0.17"]) == expected assert parse_extra_hosts([ "www.example.com: 192.168.0.17", "static.example.com:192.168.0.19", "api.example.com: 192.168.0.18", "v6.example.com: ::1" ]) == { 'www.example.com': '192.168.0.17', 'static.example.com': '192.168.0.19', 'api.example.com': '192.168.0.18', 'v6.example.com': '::1' } def test_parse_extra_hosts_dict(): assert parse_extra_hosts({ 'www.example.com': '192.168.0.17', 'api.example.com': '192.168.0.18' }) == { 'www.example.com': '192.168.0.17', 'api.example.com': '192.168.0.18' } class TestServicePort: def test_parse_dict(self): data = { 'target': 8000, 'published': 8000, 'protocol': 'udp', 'mode': 'global', } ports = ServicePort.parse(data) assert len(ports) == 1 assert ports[0].repr() == data def test_parse_simple_target_port(self): ports = ServicePort.parse(8000) assert len(ports) == 1 assert ports[0].target == 8000 def test_parse_complete_port_definition(self): port_def = '1.1.1.1:3000:3000/udp' ports = ServicePort.parse(port_def) assert len(ports) == 1 assert ports[0].repr() == { 'target': 3000, 'published': 3000, 'external_ip': '1.1.1.1', 'protocol': 'udp', } assert ports[0].legacy_repr() == port_def def test_parse_ext_ip_no_published_port(self): port_def = '1.1.1.1::3000' ports = ServicePort.parse(port_def) assert len(ports) == 1 assert ports[0].legacy_repr() == port_def + '/tcp' assert ports[0].repr() == { 'target': 3000, 'external_ip': '1.1.1.1', } def test_repr_published_port_0(self): port_def = '0:4000' ports = ServicePort.parse(port_def) assert len(ports) == 1 assert ports[0].legacy_repr() == port_def + '/tcp' def test_parse_port_range(self): ports = ServicePort.parse('25000-25001:4000-4001') assert len(ports) == 2 reprs = [p.repr() for p in ports] assert { 'target': 4000, 'published': 25000 } in reprs assert { 'target': 4001, 'published': 25001 } in reprs def test_parse_port_publish_range(self): ports = ServicePort.parse('4440-4450:4000') assert len(ports) == 1 reprs = [p.repr() for p in ports] assert { 'target': 4000, 'published': '4440-4450' } in reprs def test_parse_invalid_port(self): port_def = '4000p' with pytest.raises(ConfigurationError): ServicePort.parse(port_def) def test_parse_invalid_publish_range(self): port_def = '-4000:4000' with pytest.raises(ConfigurationError): ServicePort.parse(port_def) port_def = 'asdf:4000' with pytest.raises(ConfigurationError): ServicePort.parse(port_def) port_def = '1234-12f:4000' with pytest.raises(ConfigurationError): ServicePort.parse(port_def) port_def = '1234-1235-1239:4000' with pytest.raises(ConfigurationError): ServicePort.parse(port_def) class TestVolumeSpec: def test_parse_volume_spec_only_one_path(self): spec = VolumeSpec.parse('/the/volume') assert spec == (None, '/the/volume', 'rw') def test_parse_volume_spec_internal_and_external(self): spec = VolumeSpec.parse('external:interval') assert spec == ('external', 'interval', 'rw') def test_parse_volume_spec_with_mode(self): spec = VolumeSpec.parse('external:interval:ro') assert spec == ('external', 'interval', 'ro') spec = VolumeSpec.parse('external:interval:z') assert spec == ('external', 'interval', 'z') def test_parse_volume_spec_too_many_parts(self): with pytest.raises(ConfigurationError) as exc: VolumeSpec.parse('one:two:three:four') assert 'has incorrect format' in exc.exconly() def test_parse_volume_windows_absolute_path_normalized(self): windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro" assert VolumeSpec._parse_win32(windows_path, True) == ( "/c/Users/me/Documents/shiny/config", "/opt/shiny/config", "ro" ) def test_parse_volume_windows_absolute_path_native(self): windows_path = "c:\\Users\\me\\Documents\\shiny\\config:/opt/shiny/config:ro" assert VolumeSpec._parse_win32(windows_path, False) == ( "c:\\Users\\me\\Documents\\shiny\\config", "/opt/shiny/config", "ro" ) def test_parse_volume_windows_internal_path_normalized(self): windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro' assert VolumeSpec._parse_win32(windows_path, True) == ( '/c/Users/reimu/scarlet', 'C:\\scarlet\\app', 'ro' ) def test_parse_volume_windows_internal_path_native(self): windows_path = 'C:\\Users\\reimu\\scarlet:C:\\scarlet\\app:ro' assert VolumeSpec._parse_win32(windows_path, False) == ( 'C:\\Users\\reimu\\scarlet', 'C:\\scarlet\\app', 'ro' ) def test_parse_volume_windows_just_drives_normalized(self): windows_path = 'E:\\:C:\\:ro' assert VolumeSpec._parse_win32(windows_path, True) == ( '/e/', 'C:\\', 'ro' ) def test_parse_volume_windows_just_drives_native(self): windows_path = 'E:\\:C:\\:ro' assert VolumeSpec._parse_win32(windows_path, False) == ( 'E:\\', 'C:\\', 'ro' ) def test_parse_volume_windows_mixed_notations_normalized(self): windows_path = 'C:\\Foo:/root/foo' assert VolumeSpec._parse_win32(windows_path, True) == ( '/c/Foo', '/root/foo', 'rw' ) def test_parse_volume_windows_mixed_notations_native(self): windows_path = 'C:\\Foo:/root/foo' assert VolumeSpec._parse_win32(windows_path, False) == ( 'C:\\Foo', '/root/foo', 'rw' ) class TestVolumesFromSpec: services = ['servicea', 'serviceb'] def test_parse_v1_from_service(self): volume_from = VolumeFromSpec.parse('servicea', self.services, V1) assert volume_from == VolumeFromSpec('servicea', 'rw', 'service') def test_parse_v1_from_container(self): volume_from = VolumeFromSpec.parse('foo:ro', self.services, V1) assert volume_from == VolumeFromSpec('foo', 'ro', 'container') def test_parse_v1_invalid(self): with pytest.raises(ConfigurationError): VolumeFromSpec.parse('unknown:format:ro', self.services, V1) def test_parse_v2_from_service(self): volume_from = VolumeFromSpec.parse('servicea', self.services, VERSION) assert volume_from == VolumeFromSpec('servicea', 'rw', 'service') def test_parse_v2_from_service_with_mode(self): volume_from = VolumeFromSpec.parse('servicea:ro', self.services, VERSION) assert volume_from == VolumeFromSpec('servicea', 'ro', 'service') def test_parse_v2_from_container(self): volume_from = VolumeFromSpec.parse('container:foo', self.services, VERSION) assert volume_from == VolumeFromSpec('foo', 'rw', 'container') def test_parse_v2_from_container_with_mode(self): volume_from = VolumeFromSpec.parse('container:foo:ro', self.services, VERSION) assert volume_from == VolumeFromSpec('foo', 'ro', 'container') def test_parse_v2_invalid_type(self): with pytest.raises(ConfigurationError) as exc: VolumeFromSpec.parse('bogus:foo:ro', self.services, VERSION) assert "Unknown volumes_from type 'bogus'" in exc.exconly() def test_parse_v2_invalid(self): with pytest.raises(ConfigurationError): VolumeFromSpec.parse('unknown:format:ro', self.services, VERSION) compose-1.29.2/tests/unit/container_test.py000066400000000000000000000207331404620552300207740ustar00rootroot00000000000000import docker from .. import mock from .. import unittest from ..helpers import BUSYBOX_IMAGE_WITH_TAG from compose.const import LABEL_ONE_OFF from compose.const import LABEL_SLUG from compose.container import Container from compose.container import get_container_name class ContainerTest(unittest.TestCase): def setUp(self): self.container_id = "abcabcabcbabc12345" self.container_dict = { "Id": self.container_id, "Image": BUSYBOX_IMAGE_WITH_TAG, "Command": "top", "Created": 1387384730, "Status": "Up 8 seconds", "Ports": None, "SizeRw": 0, "SizeRootFs": 0, "Names": ["/composetest_db_1", "/composetest_web_1/db"], "NetworkSettings": { "Ports": {}, }, "Config": { "Labels": { "com.docker.compose.project": "composetest", "com.docker.compose.service": "web", "com.docker.compose.container-number": "7", }, } } def test_from_ps(self): container = Container.from_ps(None, self.container_dict, has_been_inspected=True) assert container.dictionary == { "Id": self.container_id, "Image": BUSYBOX_IMAGE_WITH_TAG, "Name": "/composetest_db_1", } def test_from_ps_prefixed(self): self.container_dict['Names'] = [ '/swarm-host-1' + n for n in self.container_dict['Names'] ] container = Container.from_ps( None, self.container_dict, has_been_inspected=True) assert container.dictionary == { "Id": self.container_id, "Image": BUSYBOX_IMAGE_WITH_TAG, "Name": "/composetest_db_1", } def test_environment(self): container = Container(None, { 'Id': 'abc', 'Config': { 'Env': [ 'FOO=BAR', 'BAZ=DOGE', ] } }, has_been_inspected=True) assert container.environment == { 'FOO': 'BAR', 'BAZ': 'DOGE', } def test_number(self): container = Container(None, self.container_dict, has_been_inspected=True) assert container.number == 7 def test_name(self): container = Container.from_ps(None, self.container_dict, has_been_inspected=True) assert container.name == "composetest_db_1" def test_name_without_project(self): self.container_dict['Name'] = "/composetest_web_7" container = Container(None, self.container_dict, has_been_inspected=True) assert container.name_without_project == "web_7" def test_name_without_project_custom_container_name(self): self.container_dict['Name'] = "/custom_name_of_container" container = Container(None, self.container_dict, has_been_inspected=True) assert container.name_without_project == "custom_name_of_container" def test_name_without_project_one_off(self): self.container_dict['Name'] = "/composetest_web_092cd63296f" self.container_dict['Config']['Labels'][LABEL_SLUG] = ( "092cd63296fdc446ad432d3905dd1fcbe12a2ba6b52" ) self.container_dict['Config']['Labels'][LABEL_ONE_OFF] = 'True' container = Container(None, self.container_dict, has_been_inspected=True) assert container.name_without_project == 'web_092cd63296fd' def test_inspect_if_not_inspected(self): mock_client = mock.create_autospec(docker.APIClient) container = Container(mock_client, dict(Id="the_id")) container.inspect_if_not_inspected() mock_client.inspect_container.assert_called_once_with("the_id") assert container.dictionary == mock_client.inspect_container.return_value assert container.has_been_inspected container.inspect_if_not_inspected() assert mock_client.inspect_container.call_count == 1 def test_human_readable_ports_none(self): container = Container(None, self.container_dict, has_been_inspected=True) assert container.human_readable_ports == '' def test_human_readable_ports_public_and_private(self): self.container_dict['NetworkSettings']['Ports'].update({ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}], "45453/tcp": [], }) container = Container(None, self.container_dict, has_been_inspected=True) expected = "45453/tcp, 0.0.0.0:49197->45454/tcp" assert container.human_readable_ports == expected def test_get_local_port(self): self.container_dict['NetworkSettings']['Ports'].update({ "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}], }) container = Container(None, self.container_dict, has_been_inspected=True) assert container.get_local_port(45454, protocol='tcp') == '0.0.0.0:49197' def test_human_readable_states_no_health(self): container = Container(None, { "State": { "Status": "running", "Running": True, "Paused": False, "Restarting": False, "OOMKilled": False, "Dead": False, "Pid": 7623, "ExitCode": 0, "Error": "", "StartedAt": "2018-01-29T00:34:25.2052414Z", "FinishedAt": "0001-01-01T00:00:00Z" }, }, has_been_inspected=True) expected = "Up" assert container.human_readable_state == expected def test_human_readable_states_starting(self): container = Container(None, { "State": { "Status": "running", "Running": True, "Paused": False, "Restarting": False, "OOMKilled": False, "Dead": False, "Pid": 11744, "ExitCode": 0, "Error": "", "StartedAt": "2018-02-03T07:56:20.3591233Z", "FinishedAt": "2018-01-31T08:56:11.0505228Z", "Health": { "Status": "starting", "FailingStreak": 0, "Log": [] } } }, has_been_inspected=True) expected = "Up (health: starting)" assert container.human_readable_state == expected def test_human_readable_states_healthy(self): container = Container(None, { "State": { "Status": "running", "Running": True, "Paused": False, "Restarting": False, "OOMKilled": False, "Dead": False, "Pid": 5674, "ExitCode": 0, "Error": "", "StartedAt": "2018-02-03T08:32:05.3281831Z", "FinishedAt": "2018-02-03T08:11:35.7872706Z", "Health": { "Status": "healthy", "FailingStreak": 0, "Log": [] } } }, has_been_inspected=True) expected = "Up (healthy)" assert container.human_readable_state == expected def test_get(self): container = Container(None, { "Status": "Up 8 seconds", "HostConfig": { "VolumesFrom": ["volume_id"] }, }, has_been_inspected=True) assert container.get('Status') == "Up 8 seconds" assert container.get('HostConfig.VolumesFrom') == ["volume_id"] assert container.get('Foo.Bar.DoesNotExist') is None def test_short_id(self): container = Container(None, self.container_dict, has_been_inspected=True) assert container.short_id == self.container_id[:12] class GetContainerNameTestCase(unittest.TestCase): def test_get_container_name(self): assert get_container_name({}) is None assert get_container_name({'Name': 'myproject_db_1'}) == 'myproject_db_1' assert get_container_name( {'Names': ['/myproject_db_1', '/myproject_web_1/db']} ) == 'myproject_db_1' assert get_container_name({ 'Names': [ '/swarm-host-1/myproject_db_1', '/swarm-host-1/myproject_web_1/db' ] }) == 'myproject_db_1' compose-1.29.2/tests/unit/metrics/000077500000000000000000000000001404620552300170425ustar00rootroot00000000000000compose-1.29.2/tests/unit/metrics/__init__.py000066400000000000000000000000001404620552300211410ustar00rootroot00000000000000compose-1.29.2/tests/unit/metrics/metrics_test.py000066400000000000000000000020401404620552300221150ustar00rootroot00000000000000import unittest from compose.metrics.client import MetricsCommand from compose.metrics.client import Status class MetricsTest(unittest.TestCase): @classmethod def test_metrics(cls): assert MetricsCommand('up', 'moby').to_map() == { 'command': 'compose up', 'context': 'moby', 'status': 'success', 'source': 'docker-compose', } assert MetricsCommand('down', 'local').to_map() == { 'command': 'compose down', 'context': 'local', 'status': 'success', 'source': 'docker-compose', } assert MetricsCommand('help', 'aci', Status.FAILURE).to_map() == { 'command': 'compose help', 'context': 'aci', 'status': 'failure', 'source': 'docker-compose', } assert MetricsCommand('run', 'ecs').to_map() == { 'command': 'compose run', 'context': 'ecs', 'status': 'success', 'source': 'docker-compose', } compose-1.29.2/tests/unit/network_test.py000066400000000000000000000142171404620552300205030ustar00rootroot00000000000000import pytest from .. import mock from .. import unittest from compose.network import check_remote_network_config from compose.network import Network from compose.network import NetworkConfigChangedError class NetworkTest(unittest.TestCase): def test_check_remote_network_config_success(self): options = {'com.docker.network.driver.foo': 'bar'} ipam_config = { 'driver': 'default', 'config': [ {'subnet': '172.0.0.1/16', }, { 'subnet': '156.0.0.1/25', 'gateway': '156.0.0.1', 'aux_addresses': ['11.0.0.1', '24.25.26.27'], 'ip_range': '156.0.0.1-254' } ], 'options': { 'iface': 'eth0', } } labels = { 'com.project.tests.istest': 'true', 'com.project.sound.track': 'way out of here', } remote_labels = labels.copy() remote_labels.update({ 'com.docker.compose.project': 'compose_test', 'com.docker.compose.network': 'net1', }) net = Network( None, 'compose_test', 'net1', 'bridge', options, enable_ipv6=True, ipam=ipam_config, labels=labels ) check_remote_network_config( { 'Driver': 'bridge', 'Options': options, 'EnableIPv6': True, 'Internal': False, 'Attachable': True, 'IPAM': { 'Driver': 'default', 'Config': [{ 'Subnet': '156.0.0.1/25', 'Gateway': '156.0.0.1', 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'], 'IPRange': '156.0.0.1-254' }, { 'Subnet': '172.0.0.1/16', 'Gateway': '172.0.0.1' }], 'Options': { 'iface': 'eth0', }, }, 'Labels': remote_labels }, net ) def test_check_remote_network_config_whitelist(self): options = {'com.docker.network.driver.foo': 'bar'} remote_options = { 'com.docker.network.driver.overlay.vxlanid_list': '257', 'com.docker.network.driver.foo': 'bar', 'com.docker.network.windowsshim.hnsid': 'aac3fd4887daaec1e3b', } net = Network( None, 'compose_test', 'net1', 'overlay', options ) check_remote_network_config( {'Driver': 'overlay', 'Options': remote_options}, net ) @mock.patch('compose.network.Network.true_name', lambda n: n.full_name) def test_check_remote_network_config_driver_mismatch(self): net = Network(None, 'compose_test', 'net1', 'overlay') with pytest.raises(NetworkConfigChangedError) as e: check_remote_network_config( {'Driver': 'bridge', 'Options': {}}, net ) assert 'driver has changed' in str(e.value) @mock.patch('compose.network.Network.true_name', lambda n: n.full_name) def test_check_remote_network_config_options_mismatch(self): net = Network(None, 'compose_test', 'net1', 'overlay') with pytest.raises(NetworkConfigChangedError) as e: check_remote_network_config({'Driver': 'overlay', 'Options': { 'com.docker.network.driver.foo': 'baz' }}, net) assert 'option "com.docker.network.driver.foo" has changed' in str(e.value) def test_check_remote_network_config_null_remote(self): net = Network(None, 'compose_test', 'net1', 'overlay') check_remote_network_config( {'Driver': 'overlay', 'Options': None}, net ) def test_check_remote_network_config_null_remote_ipam_options(self): ipam_config = { 'driver': 'default', 'config': [ {'subnet': '172.0.0.1/16', }, { 'subnet': '156.0.0.1/25', 'gateway': '156.0.0.1', 'aux_addresses': ['11.0.0.1', '24.25.26.27'], 'ip_range': '156.0.0.1-254' } ] } net = Network( None, 'compose_test', 'net1', 'bridge', ipam=ipam_config, ) check_remote_network_config( { 'Driver': 'bridge', 'Attachable': True, 'IPAM': { 'Driver': 'default', 'Config': [{ 'Subnet': '156.0.0.1/25', 'Gateway': '156.0.0.1', 'AuxiliaryAddresses': ['24.25.26.27', '11.0.0.1'], 'IPRange': '156.0.0.1-254' }, { 'Subnet': '172.0.0.1/16', 'Gateway': '172.0.0.1' }], 'Options': None }, }, net ) @mock.patch('compose.network.Network.true_name', lambda n: n.full_name) def test_check_remote_network_labels_mismatch(self): net = Network(None, 'compose_test', 'net1', 'overlay', labels={ 'com.project.touhou.character': 'sakuya.izayoi' }) remote = { 'Driver': 'overlay', 'Options': None, 'Labels': { 'com.docker.compose.network': 'net1', 'com.docker.compose.project': 'compose_test', 'com.project.touhou.character': 'marisa.kirisame', } } with mock.patch('compose.network.log') as mock_log: check_remote_network_config(remote, net) mock_log.warning.assert_called_once_with(mock.ANY) _, args, kwargs = mock_log.warning.mock_calls[0] assert 'label "com.project.touhou.character" has changed' in args[0] def test_remote_config_labels_none(self): remote = {'Labels': None} local = Network(None, 'test_project', 'test_network') check_remote_network_config(remote, local) compose-1.29.2/tests/unit/parallel_test.py000066400000000000000000000112301404620552300205760ustar00rootroot00000000000000import unittest from threading import Lock from docker.errors import APIError from compose.cli.colors import AnsiMode from compose.parallel import GlobalLimit from compose.parallel import parallel_execute from compose.parallel import parallel_execute_iter from compose.parallel import ParallelStreamWriter from compose.parallel import UpstreamError web = 'web' db = 'db' data_volume = 'data_volume' cache = 'cache' objects = [web, db, data_volume, cache] deps = { web: [db, cache], db: [data_volume], data_volume: [], cache: [], } def get_deps(obj): return [(dep, None) for dep in deps[obj]] class ParallelTest(unittest.TestCase): def test_parallel_execute(self): results, errors = parallel_execute( objects=[1, 2, 3, 4, 5], func=lambda x: x * 2, get_name=str, msg="Doubling", ) assert sorted(results) == [2, 4, 6, 8, 10] assert errors == {} def test_parallel_execute_with_limit(self): limit = 1 tasks = 20 lock = Lock() def f(obj): locked = lock.acquire(False) # we should always get the lock because we're the only thread running assert locked lock.release() return None results, errors = parallel_execute( objects=list(range(tasks)), func=f, get_name=str, msg="Testing", limit=limit, ) assert results == tasks * [None] assert errors == {} def test_parallel_execute_with_global_limit(self): GlobalLimit.set_global_limit(1) self.addCleanup(GlobalLimit.set_global_limit, None) tasks = 20 lock = Lock() def f(obj): locked = lock.acquire(False) # we should always get the lock because we're the only thread running assert locked lock.release() return None results, errors = parallel_execute( objects=list(range(tasks)), func=f, get_name=str, msg="Testing", ) assert results == tasks * [None] assert errors == {} def test_parallel_execute_with_deps(self): log = [] def process(x): log.append(x) parallel_execute( objects=objects, func=process, get_name=lambda obj: obj, msg="Processing", get_deps=get_deps, ) assert sorted(log) == sorted(objects) assert log.index(data_volume) < log.index(db) assert log.index(db) < log.index(web) assert log.index(cache) < log.index(web) def test_parallel_execute_with_upstream_errors(self): log = [] def process(x): if x is data_volume: raise APIError(None, None, "Something went wrong") log.append(x) parallel_execute( objects=objects, func=process, get_name=lambda obj: obj, msg="Processing", get_deps=get_deps, ) assert log == [cache] events = [ (obj, result, type(exception)) for obj, result, exception in parallel_execute_iter(objects, process, get_deps, None) ] assert (cache, None, type(None)) in events assert (data_volume, None, APIError) in events assert (db, None, UpstreamError) in events assert (web, None, UpstreamError) in events def test_parallel_execute_alignment(capsys): ParallelStreamWriter.instance = None results, errors = parallel_execute( objects=["short", "a very long name"], func=lambda x: x, get_name=str, msg="Aligning", ) assert errors == {} _, err = capsys.readouterr() a, b = err.split('\n')[:2] assert a.index('...') == b.index('...') def test_parallel_execute_ansi(capsys): ParallelStreamWriter.instance = None ParallelStreamWriter.set_default_ansi_mode(AnsiMode.ALWAYS) results, errors = parallel_execute( objects=["something", "something more"], func=lambda x: x, get_name=str, msg="Control characters", ) assert errors == {} _, err = capsys.readouterr() assert "\x1b" in err def test_parallel_execute_noansi(capsys): ParallelStreamWriter.instance = None ParallelStreamWriter.set_default_ansi_mode(AnsiMode.NEVER) results, errors = parallel_execute( objects=["something", "something more"], func=lambda x: x, get_name=str, msg="Control characters", ) assert errors == {} _, err = capsys.readouterr() assert "\x1b" not in err compose-1.29.2/tests/unit/progress_stream_test.py000066400000000000000000000073021404620552300222260ustar00rootroot00000000000000import os import random import shutil import tempfile from io import StringIO from compose import progress_stream from tests import unittest class ProgressStreamTestCase(unittest.TestCase): def test_stream_output(self): output = [ b'{"status": "Downloading", "progressDetail": {"current": ' b'31019763, "start": 1413653874, "total": 62763875}, ' b'"progress": "..."}', ] events = list(progress_stream.stream_output(output, StringIO())) assert len(events) == 1 def test_stream_output_div_zero(self): output = [ b'{"status": "Downloading", "progressDetail": {"current": ' b'0, "start": 1413653874, "total": 0}, ' b'"progress": "..."}', ] events = list(progress_stream.stream_output(output, StringIO())) assert len(events) == 1 def test_stream_output_null_total(self): output = [ b'{"status": "Downloading", "progressDetail": {"current": ' b'0, "start": 1413653874, "total": null}, ' b'"progress": "..."}', ] events = list(progress_stream.stream_output(output, StringIO())) assert len(events) == 1 def test_stream_output_progress_event_tty(self): events = [ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}' ] class TTYStringIO(StringIO): def isatty(self): return True output = TTYStringIO() events = list(progress_stream.stream_output(events, output)) assert len(output.getvalue()) > 0 def test_stream_output_progress_event_no_tty(self): events = [ b'{"status": "Already exists", "progressDetail": {}, "id": "8d05e3af52b0"}' ] output = StringIO() events = list(progress_stream.stream_output(events, output)) assert len(output.getvalue()) == 0 def test_stream_output_no_progress_event_no_tty(self): events = [ b'{"status": "Pulling from library/xy", "id": "latest"}' ] output = StringIO() events = list(progress_stream.stream_output(events, output)) assert len(output.getvalue()) > 0 def test_mismatched_encoding_stream_write(self): tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir, True) def mktempfile(encoding): fname = os.path.join(tmpdir, hex(random.getrandbits(128))[2:-1]) return open(fname, mode='w+', encoding=encoding) text = '就吃饭' with mktempfile(encoding='utf-8') as tf: progress_stream.write_to_stream(text, tf) tf.seek(0) assert tf.read() == text with mktempfile(encoding='utf-32') as tf: progress_stream.write_to_stream(text, tf) tf.seek(0) assert tf.read() == text with mktempfile(encoding='ascii') as tf: progress_stream.write_to_stream(text, tf) tf.seek(0) assert tf.read() == '???' def test_get_digest_from_push(self): digest = "sha256:abcd" events = [ {"status": "..."}, {"status": "..."}, {"progressDetail": {}, "aux": {"Digest": digest}}, ] assert progress_stream.get_digest_from_push(events) == digest def test_get_digest_from_pull(self): events = list() assert progress_stream.get_digest_from_pull(events) is None digest = "sha256:abcd" events = [ {"status": "..."}, {"status": "..."}, {"status": "Digest: %s" % digest}, {"status": "..."}, ] assert progress_stream.get_digest_from_pull(events) == digest compose-1.29.2/tests/unit/project_test.py000066400000000000000000000773431404620552300204710ustar00rootroot00000000000000import datetime import os import tempfile import docker import pytest from docker.errors import NotFound from .. import mock from .. import unittest from ..helpers import BUSYBOX_IMAGE_WITH_TAG from compose.config import ConfigurationError from compose.config.config import Config from compose.config.types import VolumeFromSpec from compose.const import COMPOSE_SPEC as VERSION from compose.const import COMPOSEFILE_V1 as V1 from compose.const import DEFAULT_TIMEOUT from compose.const import LABEL_SERVICE from compose.container import Container from compose.errors import OperationFailedError from compose.project import get_secrets from compose.project import NoSuchService from compose.project import Project from compose.project import ProjectError from compose.service import ImageType from compose.service import Service def build_config(**kwargs): return Config( config_version=kwargs.get('config_version', VERSION), version=kwargs.get('version', VERSION), services=kwargs.get('services'), volumes=kwargs.get('volumes'), networks=kwargs.get('networks'), secrets=kwargs.get('secrets'), configs=kwargs.get('configs'), ) class ProjectTest(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client._general_configs = {} self.mock_client.api_version = docker.constants.DEFAULT_DOCKER_API_VERSION def test_from_config_v1(self): config = build_config( version=V1, services=[ { 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, }, { 'name': 'db', 'image': BUSYBOX_IMAGE_WITH_TAG, }, ], networks=None, volumes=None, secrets=None, configs=None, ) project = Project.from_config( name='composetest', config_data=config, client=None, ) assert len(project.services) == 2 assert project.get_service('web').name == 'web' assert project.get_service('web').options['image'] == BUSYBOX_IMAGE_WITH_TAG assert project.get_service('db').name == 'db' assert project.get_service('db').options['image'] == BUSYBOX_IMAGE_WITH_TAG assert not project.networks.use_networking @mock.patch('compose.network.Network.true_name', lambda n: n.full_name) def test_from_config_v2(self): config = build_config( services=[ { 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, }, { 'name': 'db', 'image': BUSYBOX_IMAGE_WITH_TAG, }, ], networks=None, volumes=None, secrets=None, configs=None, ) project = Project.from_config('composetest', config, None) assert len(project.services) == 2 assert project.networks.use_networking def test_get_service(self): web = Service( project='composetest', name='web', client=None, image=BUSYBOX_IMAGE_WITH_TAG, ) project = Project('test', [web], None) assert project.get_service('web') == web def test_get_services_returns_all_services_without_args(self): web = Service( project='composetest', name='web', image='foo', ) console = Service( project='composetest', name='console', image='foo', ) project = Project('test', [web, console], None) assert project.get_services() == [web, console] def test_get_services_returns_listed_services_with_args(self): web = Service( project='composetest', name='web', image='foo', ) console = Service( project='composetest', name='console', image='foo', ) project = Project('test', [web, console], None) assert project.get_services(['console']) == [console] def test_get_services_with_include_links(self): db = Service( project='composetest', name='db', image='foo', ) web = Service( project='composetest', name='web', image='foo', links=[(db, 'database')] ) cache = Service( project='composetest', name='cache', image='foo' ) console = Service( project='composetest', name='console', image='foo', links=[(web, 'web')] ) project = Project('test', [web, db, cache, console], None) assert project.get_services(['console'], include_deps=True) == [db, web, console] def test_get_services_removes_duplicates_following_links(self): db = Service( project='composetest', name='db', image='foo', ) web = Service( project='composetest', name='web', image='foo', links=[(db, 'database')] ) project = Project('test', [web, db], None) assert project.get_services(['web', 'db'], include_deps=True) == [db, web] def test_use_volumes_from_container(self): container_id = 'aabbccddee' container_dict = dict(Name='aaa', Id=container_id) self.mock_client.inspect_container.return_value = container_dict project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[{ 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': [VolumeFromSpec('aaa', 'rw', 'container')] }], networks=None, volumes=None, secrets=None, configs=None, ), ) assert project.get_service('test')._get_volumes_from() == [container_id + ":rw"] def test_use_volumes_from_service_no_container(self): container_name = 'test_vol_1' self.mock_client.containers.return_value = [ { "Name": container_name, "Names": [container_name], "Id": container_name, "Image": BUSYBOX_IMAGE_WITH_TAG } ] project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[ { 'name': 'vol', 'image': BUSYBOX_IMAGE_WITH_TAG }, { 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')] } ], networks=None, volumes=None, secrets=None, configs=None, ), ) assert project.get_service('test')._get_volumes_from() == [container_name + ":rw"] @mock.patch('compose.network.Network.true_name', lambda n: n.full_name) def test_use_volumes_from_service_container(self): container_ids = ['aabbccddee', '12345'] project = Project.from_config( name='test', client=None, config_data=build_config( services=[ { 'name': 'vol', 'image': BUSYBOX_IMAGE_WITH_TAG }, { 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, 'volumes_from': [VolumeFromSpec('vol', 'rw', 'service')] } ], networks=None, volumes=None, secrets=None, configs=None, ), ) with mock.patch.object(Service, 'containers') as mock_return: mock_return.return_value = [ mock.Mock(id=container_id, spec=Container) for container_id in container_ids] assert ( project.get_service('test')._get_volumes_from() == [container_ids[0] + ':rw'] ) def test_events_legacy(self): services = [Service(name='web'), Service(name='db')] project = Project('test', services, self.mock_client) self.mock_client.api_version = '1.21' self.mock_client.events.return_value = iter([ { 'status': 'create', 'from': 'example/image', 'id': 'abcde', 'time': 1420092061, 'timeNano': 14200920610000002000, }, { 'status': 'attach', 'from': 'example/image', 'id': 'abcde', 'time': 1420092061, 'timeNano': 14200920610000003000, }, { 'status': 'create', 'from': 'example/other', 'id': 'bdbdbd', 'time': 1420092061, 'timeNano': 14200920610000005000, }, { 'status': 'create', 'from': 'example/db', 'id': 'ababa', 'time': 1420092061, 'timeNano': 14200920610000004000, }, { 'status': 'destroy', 'from': 'example/db', 'id': 'eeeee', 'time': 1420092061, 'timeNano': 14200920610000004000, }, ]) def dt_with_microseconds(dt, us): return datetime.datetime.fromtimestamp(dt).replace(microsecond=us) def get_container(cid): if cid == 'eeeee': raise NotFound(None, None, "oops") if cid == 'abcde': name = 'web' labels = {LABEL_SERVICE: name} elif cid == 'ababa': name = 'db' labels = {LABEL_SERVICE: name} else: labels = {} name = '' return { 'Id': cid, 'Config': {'Labels': labels}, 'Name': '/project_%s_1' % name, } self.mock_client.inspect_container.side_effect = get_container events = project.events() events_list = list(events) # Assert the return value is a generator assert not list(events) assert events_list == [ { 'type': 'container', 'service': 'web', 'action': 'create', 'id': 'abcde', 'attributes': { 'name': 'project_web_1', 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 2), 'container': Container(None, {'Id': 'abcde'}), }, { 'type': 'container', 'service': 'web', 'action': 'attach', 'id': 'abcde', 'attributes': { 'name': 'project_web_1', 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 3), 'container': Container(None, {'Id': 'abcde'}), }, { 'type': 'container', 'service': 'db', 'action': 'create', 'id': 'ababa', 'attributes': { 'name': 'project_db_1', 'image': 'example/db', }, 'time': dt_with_microseconds(1420092061, 4), 'container': Container(None, {'Id': 'ababa'}), }, ] def test_events(self): services = [Service(name='web'), Service(name='db')] project = Project('test', services, self.mock_client) self.mock_client.api_version = '1.35' self.mock_client.events.return_value = iter([ { 'status': 'create', 'from': 'example/image', 'Type': 'container', 'Actor': { 'ID': 'abcde', 'Attributes': { 'com.docker.compose.project': 'test', 'com.docker.compose.service': 'web', 'image': 'example/image', 'name': 'test_web_1', } }, 'id': 'abcde', 'time': 1420092061, 'timeNano': 14200920610000002000, }, { 'status': 'attach', 'from': 'example/image', 'Type': 'container', 'Actor': { 'ID': 'abcde', 'Attributes': { 'com.docker.compose.project': 'test', 'com.docker.compose.service': 'web', 'image': 'example/image', 'name': 'test_web_1', } }, 'id': 'abcde', 'time': 1420092061, 'timeNano': 14200920610000003000, }, { 'status': 'create', 'from': 'example/other', 'Type': 'container', 'Actor': { 'ID': 'bdbdbd', 'Attributes': { 'image': 'example/other', 'name': 'shrewd_einstein', } }, 'id': 'bdbdbd', 'time': 1420092061, 'timeNano': 14200920610000005000, }, { 'status': 'create', 'from': 'example/db', 'Type': 'container', 'Actor': { 'ID': 'ababa', 'Attributes': { 'com.docker.compose.project': 'test', 'com.docker.compose.service': 'db', 'image': 'example/db', 'name': 'test_db_1', } }, 'id': 'ababa', 'time': 1420092061, 'timeNano': 14200920610000004000, }, { 'status': 'destroy', 'from': 'example/db', 'Type': 'container', 'Actor': { 'ID': 'eeeee', 'Attributes': { 'com.docker.compose.project': 'test', 'com.docker.compose.service': 'db', 'image': 'example/db', 'name': 'test_db_1', } }, 'id': 'eeeee', 'time': 1420092061, 'timeNano': 14200920610000004000, }, ]) def dt_with_microseconds(dt, us): return datetime.datetime.fromtimestamp(dt).replace(microsecond=us) def get_container(cid): if cid == 'eeeee': raise NotFound(None, None, "oops") if cid == 'abcde': name = 'web' labels = {LABEL_SERVICE: name} elif cid == 'ababa': name = 'db' labels = {LABEL_SERVICE: name} else: labels = {} name = '' return { 'Id': cid, 'Config': {'Labels': labels}, 'Name': '/project_%s_1' % name, } self.mock_client.inspect_container.side_effect = get_container events = project.events() events_list = list(events) # Assert the return value is a generator assert not list(events) assert events_list == [ { 'type': 'container', 'service': 'web', 'action': 'create', 'id': 'abcde', 'attributes': { 'name': 'test_web_1', 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 2), 'container': Container(None, get_container('abcde')), }, { 'type': 'container', 'service': 'web', 'action': 'attach', 'id': 'abcde', 'attributes': { 'name': 'test_web_1', 'image': 'example/image', }, 'time': dt_with_microseconds(1420092061, 3), 'container': Container(None, get_container('abcde')), }, { 'type': 'container', 'service': 'db', 'action': 'create', 'id': 'ababa', 'attributes': { 'name': 'test_db_1', 'image': 'example/db', }, 'time': dt_with_microseconds(1420092061, 4), 'container': Container(None, get_container('ababa')), }, { 'type': 'container', 'service': 'db', 'action': 'destroy', 'id': 'eeeee', 'attributes': { 'name': 'test_db_1', 'image': 'example/db', }, 'time': dt_with_microseconds(1420092061, 4), 'container': None, }, ] def test_net_unset(self): project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( version=V1, services=[ { 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, } ], networks=None, volumes=None, secrets=None, configs=None, ), ) service = project.get_service('test') assert service.network_mode.id is None assert 'NetworkMode' not in service._get_container_host_config({}) def test_use_net_from_container(self): container_id = 'aabbccddee' container_dict = dict(Name='aaa', Id=container_id) self.mock_client.inspect_container.return_value = container_dict project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[ { 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, 'network_mode': 'container:aaa' }, ], networks=None, volumes=None, secrets=None, configs=None, ), ) service = project.get_service('test') assert service.network_mode.mode == 'container:' + container_id def test_use_net_from_service(self): container_name = 'test_aaa_1' self.mock_client.containers.return_value = [ { "Name": container_name, "Names": [container_name], "Id": container_name, "Image": BUSYBOX_IMAGE_WITH_TAG } ] project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[ { 'name': 'aaa', 'image': BUSYBOX_IMAGE_WITH_TAG }, { 'name': 'test', 'image': BUSYBOX_IMAGE_WITH_TAG, 'network_mode': 'service:aaa' }, ], networks=None, volumes=None, secrets=None, configs=None, ), ) service = project.get_service('test') assert service.network_mode.mode == 'container:' + container_name def test_uses_default_network_true(self): project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[ { 'name': 'foo', 'image': BUSYBOX_IMAGE_WITH_TAG }, ], networks=None, volumes=None, secrets=None, configs=None, ), ) assert 'default' in project.networks.networks def test_uses_default_network_false(self): project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[ { 'name': 'foo', 'image': BUSYBOX_IMAGE_WITH_TAG, 'networks': {'custom': None} }, ], networks={'custom': {}}, volumes=None, secrets=None, configs=None, ), ) assert 'default' not in project.networks.networks def test_container_without_name(self): self.mock_client.containers.return_value = [ {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '1', 'Name': '1'}, {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '2', 'Name': None}, {'Image': BUSYBOX_IMAGE_WITH_TAG, 'Id': '3'}, ] self.mock_client.inspect_container.return_value = { 'Id': '1', 'Config': { 'Labels': { LABEL_SERVICE: 'web', }, }, } project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, }], networks=None, volumes=None, secrets=None, configs=None, ), ) assert [c.id for c in project.containers()] == ['1'] def test_down_with_no_resources(self): project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, }], networks={'default': {}}, volumes={'data': {}}, secrets=None, configs=None, ), ) self.mock_client.remove_network.side_effect = NotFound(None, None, 'oops') self.mock_client.remove_volume.side_effect = NotFound(None, None, 'oops') project.down(ImageType.all, True) self.mock_client.remove_image.assert_called_once_with(BUSYBOX_IMAGE_WITH_TAG) def test_no_warning_on_stop(self): self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'active'}} project = Project('composetest', [], self.mock_client) with mock.patch('compose.project.log') as fake_log: project.stop() assert fake_log.warn.call_count == 0 def test_no_warning_in_normal_mode(self): self.mock_client.info.return_value = {'Swarm': {'LocalNodeState': 'inactive'}} project = Project('composetest', [], self.mock_client) with mock.patch('compose.project.log') as fake_log: project.up() assert fake_log.warn.call_count == 0 def test_no_warning_with_no_swarm_info(self): self.mock_client.info.return_value = {} project = Project('composetest', [], self.mock_client) with mock.patch('compose.project.log') as fake_log: project.up() assert fake_log.warn.call_count == 0 def test_no_such_service_unicode(self): assert NoSuchService('十六夜 咲夜'.encode()).msg == 'No such service: 十六夜 咲夜' assert NoSuchService('十六夜 咲夜').msg == 'No such service: 十六夜 咲夜' def test_project_platform_value(self): service_config = { 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, } config_data = build_config( services=[service_config], networks={}, volumes={}, secrets=None, configs=None ) project = Project.from_config(name='test', client=self.mock_client, config_data=config_data) assert project.get_service('web').platform is None project = Project.from_config( name='test', client=self.mock_client, config_data=config_data, default_platform='windows' ) assert project.get_service('web').platform == 'windows' service_config['platform'] = 'linux/s390x' project = Project.from_config(name='test', client=self.mock_client, config_data=config_data) assert project.get_service('web').platform == 'linux/s390x' project = Project.from_config( name='test', client=self.mock_client, config_data=config_data, default_platform='windows' ) assert project.get_service('web').platform == 'linux/s390x' def test_build_container_operation_with_timeout_func_does_not_mutate_options_with_timeout(self): config_data = build_config( services=[ {'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG}, {'name': 'db', 'image': BUSYBOX_IMAGE_WITH_TAG, 'stop_grace_period': '1s'}, ], networks={}, volumes={}, secrets=None, configs=None, ) project = Project.from_config(name='test', client=self.mock_client, config_data=config_data) stop_op = project.build_container_operation_with_timeout_func('stop', options={}) web_container = mock.create_autospec(Container, service='web') db_container = mock.create_autospec(Container, service='db') # `stop_grace_period` is not set to 'web' service, # then it is stopped with the default timeout. stop_op(web_container) web_container.stop.assert_called_once_with(timeout=DEFAULT_TIMEOUT) # `stop_grace_period` is set to 'db' service, # then it is stopped with the specified timeout and # the value is not overridden by the previous function call. stop_op(db_container) db_container.stop.assert_called_once_with(timeout=1) @mock.patch('compose.parallel.ParallelStreamWriter._write_noansi') def test_error_parallel_pull(self, mock_write): project = Project.from_config( name='test', client=self.mock_client, config_data=build_config( services=[{ 'name': 'web', 'image': BUSYBOX_IMAGE_WITH_TAG, }], networks=None, volumes=None, secrets=None, configs=None, ), ) self.mock_client.pull.side_effect = OperationFailedError('pull error') with pytest.raises(ProjectError): project.pull(parallel_pull=True) self.mock_client.pull.side_effect = OperationFailedError(b'pull error') with pytest.raises(ProjectError): project.pull(parallel_pull=True) def test_avoid_multiple_push(self): service_config_latest = {'image': 'busybox:latest', 'build': '.'} service_config_default = {'image': 'busybox', 'build': '.'} service_config_sha = { 'image': 'busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d', 'build': '.' } svc1 = Service('busy1', **service_config_latest) svc1_1 = Service('busy11', **service_config_latest) svc2 = Service('busy2', **service_config_default) svc2_1 = Service('busy21', **service_config_default) svc3 = Service('busy3', **service_config_sha) svc3_1 = Service('busy31', **service_config_sha) project = Project( 'composetest', [svc1, svc1_1, svc2, svc2_1, svc3, svc3_1], self.mock_client ) with mock.patch('compose.service.Service.push') as fake_push: project.push() assert fake_push.call_count == 2 def test_get_secrets_no_secret_def(self): service = 'foo' secret_source = 'bar' secret_defs = mock.Mock() secret_defs.get.return_value = None secret = mock.Mock(source=secret_source) with self.assertRaises(ConfigurationError): get_secrets(service, [secret], secret_defs) def test_get_secrets_external_warning(self): service = 'foo' secret_source = 'bar' secret_def = mock.Mock() secret_def.get.return_value = True secret_defs = mock.Mock() secret_defs.get.side_effect = secret_def secret = mock.Mock(source=secret_source) with mock.patch('compose.project.log') as mock_log: get_secrets(service, [secret], secret_defs) mock_log.warning.assert_called_with("Service \"{service}\" uses secret \"{secret}\" " "which is external. External secrets are not available" " to containers created by docker-compose." .format(service=service, secret=secret_source)) def test_get_secrets_uid_gid_mode_warning(self): service = 'foo' secret_source = 'bar' fd, filename_path = tempfile.mkstemp() os.close(fd) self.addCleanup(os.remove, filename_path) def mock_get(key): return {'external': False, 'file': filename_path}[key] secret_def = mock.MagicMock() secret_def.get = mock.MagicMock(side_effect=mock_get) secret_defs = mock.Mock() secret_defs.get.return_value = secret_def secret = mock.Mock(uid=True, gid=True, mode=True, source=secret_source) with mock.patch('compose.project.log') as mock_log: get_secrets(service, [secret], secret_defs) mock_log.warning.assert_called_with("Service \"{service}\" uses secret \"{secret}\" with uid, " "gid, or mode. These fields are not supported by this " "implementation of the Compose file" .format(service=service, secret=secret_source)) def test_get_secrets_secret_file_warning(self): service = 'foo' secret_source = 'bar' not_a_path = 'NOT_A_PATH' def mock_get(key): return {'external': False, 'file': not_a_path}[key] secret_def = mock.MagicMock() secret_def.get = mock.MagicMock(side_effect=mock_get) secret_defs = mock.Mock() secret_defs.get.return_value = secret_def secret = mock.Mock(uid=False, gid=False, mode=False, source=secret_source) with mock.patch('compose.project.log') as mock_log: get_secrets(service, [secret], secret_defs) mock_log.warning.assert_called_with("Service \"{service}\" uses an undefined secret file " "\"{secret_file}\", the following file should be created " "\"{secret_file}\"" .format(service=service, secret_file=not_a_path)) compose-1.29.2/tests/unit/service_test.py000066400000000000000000001633641404620552300204620ustar00rootroot00000000000000import docker import pytest from docker.constants import DEFAULT_DOCKER_API_VERSION from docker.errors import APIError from docker.errors import ImageNotFound from docker.errors import NotFound from .. import mock from .. import unittest from compose.config.errors import DependencyError from compose.config.types import MountSpec from compose.config.types import ServicePort from compose.config.types import ServiceSecret from compose.config.types import VolumeFromSpec from compose.config.types import VolumeSpec from compose.const import API_VERSIONS from compose.const import LABEL_CONFIG_HASH from compose.const import LABEL_ONE_OFF from compose.const import LABEL_PROJECT from compose.const import LABEL_SERVICE from compose.const import SECRETS_PATH from compose.const import WINDOWS_LONGPATH_PREFIX from compose.container import Container from compose.errors import OperationFailedError from compose.parallel import ParallelStreamWriter from compose.project import OneOffFilter from compose.service import build_ulimits from compose.service import build_volume_binding from compose.service import BuildAction from compose.service import ContainerNetworkMode from compose.service import format_environment from compose.service import formatted_ports from compose.service import get_container_data_volumes from compose.service import ImageType from compose.service import merge_volume_bindings from compose.service import NeedsBuildError from compose.service import NetworkMode from compose.service import NoSuchImageError from compose.service import parse_repository_tag from compose.service import rewrite_build_path from compose.service import Service from compose.service import ServiceNetworkMode from compose.service import warn_on_masked_volume class ServiceTest(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION self.mock_client._general_configs = {} def test_containers(self): service = Service('db', self.mock_client, 'myproject', image='foo') self.mock_client.containers.return_value = [] assert list(service.containers()) == [] def test_containers_with_containers(self): self.mock_client.containers.return_value = [ dict(Name=str(i), Image='foo', Id=i) for i in range(3) ] service = Service('db', self.mock_client, 'myproject', image='foo') assert [c.id for c in service.containers()] == list(range(3)) expected_labels = [ '{}=myproject'.format(LABEL_PROJECT), '{}=db'.format(LABEL_SERVICE), '{}=False'.format(LABEL_ONE_OFF), ] self.mock_client.containers.assert_called_once_with( all=False, filters={'label': expected_labels}) def test_container_without_name(self): self.mock_client.containers.return_value = [ {'Image': 'foo', 'Id': '1', 'Name': '1'}, {'Image': 'foo', 'Id': '2', 'Name': None}, {'Image': 'foo', 'Id': '3'}, ] service = Service('db', self.mock_client, 'myproject', image='foo') assert [c.id for c in service.containers()] == ['1'] assert service._next_container_number() == 2 assert service.get_container(1).id == '1' def test_get_volumes_from_container(self): container_id = 'aabbccddee' service = Service( 'test', image='foo', volumes_from=[ VolumeFromSpec( mock.Mock(id=container_id, spec=Container), 'rw', 'container')]) assert service._get_volumes_from() == [container_id + ':rw'] def test_get_volumes_from_container_read_only(self): container_id = 'aabbccddee' service = Service( 'test', image='foo', volumes_from=[ VolumeFromSpec( mock.Mock(id=container_id, spec=Container), 'ro', 'container')]) assert service._get_volumes_from() == [container_id + ':ro'] def test_get_volumes_from_service_container_exists(self): container_ids = ['aabbccddee', '12345'] from_service = mock.create_autospec(Service) from_service.containers.return_value = [ mock.Mock(id=container_id, spec=Container) for container_id in container_ids ] service = Service( 'test', volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')], image='foo') assert service._get_volumes_from() == [container_ids[0] + ":rw"] def test_get_volumes_from_service_container_exists_with_flags(self): for mode in ['ro', 'rw', 'z', 'rw,z', 'z,rw']: container_ids = ['aabbccddee:' + mode, '12345:' + mode] from_service = mock.create_autospec(Service) from_service.containers.return_value = [ mock.Mock(id=container_id.split(':')[0], spec=Container) for container_id in container_ids ] service = Service( 'test', volumes_from=[VolumeFromSpec(from_service, mode, 'service')], image='foo') assert service._get_volumes_from() == [container_ids[0]] def test_get_volumes_from_service_no_container(self): container_id = 'abababab' from_service = mock.create_autospec(Service) from_service.containers.return_value = [] from_service.create_container.return_value = mock.Mock( id=container_id, spec=Container) service = Service( 'test', image='foo', volumes_from=[VolumeFromSpec(from_service, 'rw', 'service')]) assert service._get_volumes_from() == [container_id + ':rw'] from_service.create_container.assert_called_once_with() def test_memory_swap_limit(self): self.mock_client.create_host_config.return_value = {} service = Service( name='foo', image='foo', hostname='name', client=self.mock_client, mem_limit=1000000000, memswap_limit=2000000000) service._get_container_create_options({'some': 'overrides'}, 1) assert self.mock_client.create_host_config.called assert self.mock_client.create_host_config.call_args[1]['mem_limit'] == 1000000000 assert self.mock_client.create_host_config.call_args[1]['memswap_limit'] == 2000000000 def test_self_reference_external_link(self): service = Service( name='foo', external_links=['default_foo_1'] ) with pytest.raises(DependencyError): service.get_container_name('foo', 1) def test_mem_reservation(self): self.mock_client.create_host_config.return_value = {} service = Service( name='foo', image='foo', hostname='name', client=self.mock_client, mem_reservation='512m' ) service._get_container_create_options({'some': 'overrides'}, 1) assert self.mock_client.create_host_config.called is True assert self.mock_client.create_host_config.call_args[1]['mem_reservation'] == '512m' def test_cgroup_parent(self): self.mock_client.create_host_config.return_value = {} service = Service( name='foo', image='foo', hostname='name', client=self.mock_client, cgroup_parent='test') service._get_container_create_options({'some': 'overrides'}, 1) assert self.mock_client.create_host_config.called assert self.mock_client.create_host_config.call_args[1]['cgroup_parent'] == 'test' def test_log_opt(self): self.mock_client.create_host_config.return_value = {} log_opt = {'syslog-address': 'tcp://192.168.0.42:123'} logging = {'driver': 'syslog', 'options': log_opt} service = Service( name='foo', image='foo', hostname='name', client=self.mock_client, log_driver='syslog', logging=logging) service._get_container_create_options({'some': 'overrides'}, 1) assert self.mock_client.create_host_config.called assert self.mock_client.create_host_config.call_args[1]['log_config'] == { 'Type': 'syslog', 'Config': {'syslog-address': 'tcp://192.168.0.42:123'} } def test_stop_grace_period(self): self.mock_client.api_version = '1.25' self.mock_client.create_host_config.return_value = {} service = Service( 'foo', image='foo', client=self.mock_client, stop_grace_period="1m35s") opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts['stop_timeout'] == 95 def test_split_domainname_none(self): service = Service( 'foo', image='foo', hostname='name.domain.tld', client=self.mock_client) opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts['hostname'] == 'name.domain.tld', 'hostname' assert not ('domainname' in opts), 'domainname' def test_split_domainname_fqdn(self): self.mock_client.api_version = '1.22' service = Service( 'foo', hostname='name.domain.tld', image='foo', client=self.mock_client) opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts['hostname'] == 'name', 'hostname' assert opts['domainname'] == 'domain.tld', 'domainname' def test_split_domainname_both(self): self.mock_client.api_version = '1.22' service = Service( 'foo', hostname='name', image='foo', domainname='domain.tld', client=self.mock_client) opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts['hostname'] == 'name', 'hostname' assert opts['domainname'] == 'domain.tld', 'domainname' def test_split_domainname_weird(self): self.mock_client.api_version = '1.22' service = Service( 'foo', hostname='name.sub', domainname='domain.tld', image='foo', client=self.mock_client) opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts['hostname'] == 'name.sub', 'hostname' assert opts['domainname'] == 'domain.tld', 'domainname' def test_no_default_hostname_when_not_using_networking(self): service = Service( 'foo', image='foo', use_networking=False, client=self.mock_client, ) opts = service._get_container_create_options({'image': 'foo'}, 1) assert opts.get('hostname') is None def test_get_container_create_options_with_name_option(self): service = Service( 'foo', image='foo', client=self.mock_client, container_name='foo1') name = 'the_new_name' opts = service._get_container_create_options( {'name': name}, 1, one_off=OneOffFilter.only) assert opts['name'] == name def test_get_container_create_options_does_not_mutate_options(self): labels = {'thing': 'real'} environment = {'also': 'real'} service = Service( 'foo', image='foo', labels=dict(labels), client=self.mock_client, environment=dict(environment), ) self.mock_client.inspect_image.return_value = {'Id': 'abcd'} prev_container = mock.Mock( id='ababab', image_config={'ContainerConfig': {}} ) prev_container.full_slug = 'abcdefff1234' prev_container.get.return_value = None opts = service._get_container_create_options( {}, 1, previous_container=prev_container ) assert service.options['labels'] == labels assert service.options['environment'] == environment assert opts['labels'][LABEL_CONFIG_HASH] == \ '6da0f3ec0d5adf901de304bdc7e0ee44ec5dd7adb08aebc20fe0dd791d4ee5a8' assert opts['environment'] == ['also=real'] def test_get_container_create_options_sets_affinity_with_binds(self): service = Service( 'foo', image='foo', client=self.mock_client, ) self.mock_client.inspect_image.return_value = {'Id': 'abcd'} prev_container = mock.Mock( id='ababab', image_config={'ContainerConfig': {'Volumes': ['/data']}}) def container_get(key): return { 'Mounts': [ { 'Destination': '/data', 'Source': '/some/path', 'Name': 'abab1234', }, ] }.get(key, None) prev_container.get.side_effect = container_get prev_container.full_slug = 'abcdefff1234' opts = service._get_container_create_options( {}, 1, previous_container=prev_container ) assert opts['environment'] == ['affinity:container==ababab'] def test_get_container_create_options_no_affinity_without_binds(self): service = Service('foo', image='foo', client=self.mock_client) self.mock_client.inspect_image.return_value = {'Id': 'abcd'} prev_container = mock.Mock( id='ababab', image_config={'ContainerConfig': {}}) prev_container.get.return_value = None prev_container.full_slug = 'abcdefff1234' opts = service._get_container_create_options( {}, 1, previous_container=prev_container) assert opts['environment'] == [] def test_get_container_not_found(self): self.mock_client.containers.return_value = [] service = Service('foo', client=self.mock_client, image='foo') with pytest.raises(ValueError): service.get_container() @mock.patch('compose.service.Container', autospec=True) def test_get_container(self, mock_container_class): container_dict = dict(Name='default_foo_2_bdfa3ed91e2c') self.mock_client.containers.return_value = [container_dict] service = Service('foo', image='foo', client=self.mock_client) container = service.get_container(number=2) assert container == mock_container_class.from_ps.return_value mock_container_class.from_ps.assert_called_once_with( self.mock_client, container_dict) @mock.patch('compose.service.log', autospec=True) def test_pull_image(self, mock_log): service = Service('foo', client=self.mock_client, image='someimage:sometag') service.pull() self.mock_client.pull.assert_called_once_with( 'someimage', tag='sometag', stream=True, platform=None) mock_log.info.assert_called_once_with('Pulling foo (someimage:sometag)...') def test_pull_image_no_tag(self): service = Service('foo', client=self.mock_client, image='ababab') service.pull() self.mock_client.pull.assert_called_once_with( 'ababab', tag='latest', stream=True, platform=None) @mock.patch('compose.service.log', autospec=True) def test_pull_image_digest(self, mock_log): service = Service('foo', client=self.mock_client, image='someimage@sha256:1234') service.pull() self.mock_client.pull.assert_called_once_with( 'someimage', tag='sha256:1234', stream=True, platform=None) mock_log.info.assert_called_once_with('Pulling foo (someimage@sha256:1234)...') @mock.patch('compose.service.log', autospec=True) def test_pull_image_with_platform(self, mock_log): self.mock_client.api_version = '1.35' service = Service( 'foo', client=self.mock_client, image='someimage:sometag', platform='windows/x86_64' ) service.pull() assert self.mock_client.pull.call_count == 1 call_args = self.mock_client.pull.call_args assert call_args[1]['platform'] == 'windows/x86_64' @mock.patch('compose.service.log', autospec=True) def test_pull_image_with_platform_unsupported_api(self, mock_log): self.mock_client.api_version = '1.33' service = Service( 'foo', client=self.mock_client, image='someimage:sometag', platform='linux/arm' ) with pytest.raises(OperationFailedError): service.pull() def test_pull_image_with_default_platform(self): self.mock_client.api_version = '1.35' service = Service( 'foo', client=self.mock_client, image='someimage:sometag', default_platform='linux' ) assert service.platform == 'linux' service.pull() assert self.mock_client.pull.call_count == 1 call_args = self.mock_client.pull.call_args assert call_args[1]['platform'] == 'linux' @mock.patch('compose.service.Container', autospec=True) def test_recreate_container(self, _): mock_container = mock.create_autospec(Container) mock_container.full_slug = 'abcdefff1234' service = Service('foo', client=self.mock_client, image='someimage') service.image = lambda: {'Id': 'abc123'} new_container = service.recreate_container(mock_container) mock_container.stop.assert_called_once_with(timeout=10) mock_container.rename_to_tmp_name.assert_called_once_with() new_container.start.assert_called_once_with() mock_container.remove.assert_called_once_with() @mock.patch('compose.service.Container', autospec=True) def test_recreate_container_with_timeout(self, _): mock_container = mock.create_autospec(Container) mock_container.full_slug = 'abcdefff1234' self.mock_client.inspect_image.return_value = {'Id': 'abc123'} service = Service('foo', client=self.mock_client, image='someimage') service.recreate_container(mock_container, timeout=1) mock_container.stop.assert_called_once_with(timeout=1) def test_parse_repository_tag(self): assert parse_repository_tag("root") == ("root", "", ":") assert parse_repository_tag("root:tag") == ("root", "tag", ":") assert parse_repository_tag("user/repo") == ("user/repo", "", ":") assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag", ":") assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", "", ":") assert parse_repository_tag("url:5000/repo:tag") == ("url:5000/repo", "tag", ":") assert parse_repository_tag("root@sha256:digest") == ("root", "sha256:digest", "@") assert parse_repository_tag("user/repo@sha256:digest") == ("user/repo", "sha256:digest", "@") assert parse_repository_tag("url:5000/repo@sha256:digest") == ( "url:5000/repo", "sha256:digest", "@" ) def test_create_container(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.side_effect = [ NoSuchImageError, {'Id': 'abc123'}, ] self.mock_client.build.return_value = [ '{"stream": "Successfully built abcd"}', ] with mock.patch('compose.service.log', autospec=True) as mock_log: service.create_container() assert mock_log.warning.called _, args, _ = mock_log.warning.mock_calls[0] assert 'was built because it did not already exist' in args[0] assert self.mock_client.build.call_count == 1 assert self.mock_client.build.call_args[1]['tag'] == 'default_foo' def test_create_container_binary_string_error(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) service.image = lambda: {'Id': 'abc123'} self.mock_client.create_container.side_effect = APIError(None, None, b"Test binary string explanation") with pytest.raises(OperationFailedError) as ex: service.create_container() assert ex.value.msg == "Cannot create container for service foo: Test binary string explanation" def test_start_binary_string_error(self): service = Service('foo', client=self.mock_client) container = Container(self.mock_client, {'Id': 'abc123'}) self.mock_client.start.side_effect = APIError(None, None, b"Test binary string explanation with " b"driver failed programming external " b"connectivity") with mock.patch('compose.service.log', autospec=True) as mock_log: with pytest.raises(OperationFailedError) as ex: service.start_container(container) assert ex.value.msg == "Cannot start service foo: " \ "Test binary string explanation " \ "with driver failed programming external connectivity" mock_log.warn.assert_called_once_with("Host is already in use by another container") def test_ensure_image_exists_no_build(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.return_value = {'Id': 'abc123'} service.ensure_image_exists(do_build=BuildAction.skip) assert not self.mock_client.build.called def test_ensure_image_exists_no_build_but_needs_build(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.side_effect = NoSuchImageError with pytest.raises(NeedsBuildError): service.ensure_image_exists(do_build=BuildAction.skip) def test_ensure_image_exists_force_build(self): service = Service('foo', client=self.mock_client, build={'context': '.'}) self.mock_client.inspect_image.return_value = {'Id': 'abc123'} self.mock_client.build.return_value = [ '{"stream": "Successfully built abcd"}', ] with mock.patch('compose.service.log', autospec=True) as mock_log: service.ensure_image_exists(do_build=BuildAction.force) assert not mock_log.warning.called assert self.mock_client.build.call_count == 1 self.mock_client.build.call_args[1]['tag'] == 'default_foo' def test_build_does_not_pull(self): self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service('foo', client=self.mock_client, build={'context': '.'}) service.build() assert self.mock_client.build.call_count == 1 assert not self.mock_client.build.call_args[1]['pull'] def test_build_with_platform(self): self.mock_client.api_version = '1.35' self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service('foo', client=self.mock_client, build={'context': '.'}, platform='linux') service.build() assert self.mock_client.build.call_count == 1 call_args = self.mock_client.build.call_args assert call_args[1]['platform'] == 'linux' def test_build_with_default_platform(self): self.mock_client.api_version = '1.35' self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service( 'foo', client=self.mock_client, build={'context': '.'}, default_platform='linux' ) assert service.platform == 'linux' service.build() assert self.mock_client.build.call_count == 1 call_args = self.mock_client.build.call_args assert call_args[1]['platform'] == 'linux' def test_service_platform_precedence(self): self.mock_client.api_version = '1.35' service = Service( 'foo', client=self.mock_client, platform='linux/arm', default_platform='osx' ) assert service.platform == 'linux/arm' def test_service_ignore_default_platform_with_unsupported_api(self): self.mock_client.api_version = '1.32' self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service( 'foo', client=self.mock_client, default_platform='windows', build={'context': '.'} ) assert service.platform is None service.build() assert self.mock_client.build.call_count == 1 call_args = self.mock_client.build.call_args assert call_args[1]['platform'] is None def test_build_with_override_build_args(self): self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] build_args = { 'arg1': 'arg1_new_value', } service = Service('foo', client=self.mock_client, build={'context': '.', 'args': {'arg1': 'arg1', 'arg2': 'arg2'}}) service.build(build_args_override=build_args) called_build_args = self.mock_client.build.call_args[1]['buildargs'] assert called_build_args['arg1'] == build_args['arg1'] assert called_build_args['arg2'] == 'arg2' def test_build_with_isolation_from_service_config(self): self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service('foo', client=self.mock_client, build={'context': '.'}, isolation='hyperv') service.build() assert self.mock_client.build.call_count == 1 called_build_args = self.mock_client.build.call_args[1] assert called_build_args['isolation'] == 'hyperv' def test_build_isolation_from_build_override_service_config(self): self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service( 'foo', client=self.mock_client, build={'context': '.', 'isolation': 'default'}, isolation='hyperv' ) service.build() assert self.mock_client.build.call_count == 1 called_build_args = self.mock_client.build.call_args[1] assert called_build_args['isolation'] == 'default' def test_config_dict(self): self.mock_client.inspect_image.return_value = {'Id': 'abcd'} service = Service( 'foo', image='example.com/foo', client=self.mock_client, network_mode=ServiceNetworkMode(Service('other')), networks={'default': None}, links=[(Service('one'), 'one')], volumes_from=[VolumeFromSpec(Service('two'), 'rw', 'service')]) config_dict = service.config_dict() expected = { 'image_id': 'abcd', 'ipc_mode': None, 'options': {'image': 'example.com/foo'}, 'links': [('one', 'one')], 'net': 'other', 'secrets': [], 'networks': {'default': None}, 'volumes_from': [('two', 'rw')], } assert config_dict == expected def test_config_dict_with_network_mode_from_container(self): self.mock_client.inspect_image.return_value = {'Id': 'abcd'} container = Container( self.mock_client, {'Id': 'aaabbb', 'Name': '/foo_1'}) service = Service( 'foo', image='example.com/foo', client=self.mock_client, network_mode=ContainerNetworkMode(container)) config_dict = service.config_dict() expected = { 'image_id': 'abcd', 'ipc_mode': None, 'options': {'image': 'example.com/foo'}, 'links': [], 'networks': {}, 'secrets': [], 'net': 'aaabbb', 'volumes_from': [], } assert config_dict == expected def test_config_hash_matches_label(self): self.mock_client.inspect_image.return_value = {'Id': 'abcd'} service = Service( 'foo', image='example.com/foo', client=self.mock_client, network_mode=NetworkMode('bridge'), networks={'bridge': {}, 'net2': {}}, links=[(Service('one', client=self.mock_client), 'one')], volumes_from=[VolumeFromSpec(Service('two', client=self.mock_client), 'rw', 'service')], volumes=[VolumeSpec('/ext', '/int', 'ro')], build={'context': 'some/random/path'}, ) config_hash = service.config_hash for api_version in set(API_VERSIONS.values()): self.mock_client.api_version = api_version assert service._get_container_create_options( {}, 1 )['labels'][LABEL_CONFIG_HASH] == config_hash def test_remove_image_none(self): web = Service('web', image='example', client=self.mock_client) assert not web.remove_image(ImageType.none) assert not self.mock_client.remove_image.called def test_remove_image_local_with_image_name_doesnt_remove(self): web = Service('web', image='example', client=self.mock_client) assert not web.remove_image(ImageType.local) assert not self.mock_client.remove_image.called def test_remove_image_local_without_image_name_does_remove(self): web = Service('web', build='.', client=self.mock_client) assert web.remove_image(ImageType.local) self.mock_client.remove_image.assert_called_once_with(web.image_name) def test_remove_image_all_does_remove(self): web = Service('web', image='example', client=self.mock_client) assert web.remove_image(ImageType.all) self.mock_client.remove_image.assert_called_once_with(web.image_name) def test_remove_image_with_error(self): self.mock_client.remove_image.side_effect = error = APIError( message="testing", response={}, explanation="Boom") web = Service('web', image='example', client=self.mock_client) with mock.patch('compose.service.log', autospec=True) as mock_log: assert not web.remove_image(ImageType.all) mock_log.error.assert_called_once_with( "Failed to remove image for service %s: %s", web.name, error) def test_remove_non_existing_image(self): self.mock_client.remove_image.side_effect = ImageNotFound('image not found') web = Service('web', image='example', client=self.mock_client) with mock.patch('compose.service.log', autospec=True) as mock_log: assert not web.remove_image(ImageType.all) mock_log.warning.assert_called_once_with("Image %s not found.", web.image_name) def test_specifies_host_port_with_no_ports(self): service = Service( 'foo', image='foo') assert not service.specifies_host_port() def test_specifies_host_port_with_container_port(self): service = Service( 'foo', image='foo', ports=["2000"]) assert not service.specifies_host_port() def test_specifies_host_port_with_host_port(self): service = Service( 'foo', image='foo', ports=["1000:2000"]) assert service.specifies_host_port() def test_specifies_host_port_with_host_ip_no_port(self): service = Service( 'foo', image='foo', ports=["127.0.0.1::2000"]) assert not service.specifies_host_port() def test_specifies_host_port_with_host_ip_and_port(self): service = Service( 'foo', image='foo', ports=["127.0.0.1:1000:2000"]) assert service.specifies_host_port() def test_specifies_host_port_with_container_port_range(self): service = Service( 'foo', image='foo', ports=["2000-3000"]) assert not service.specifies_host_port() def test_specifies_host_port_with_host_port_range(self): service = Service( 'foo', image='foo', ports=["1000-2000:2000-3000"]) assert service.specifies_host_port() def test_specifies_host_port_with_host_ip_no_port_range(self): service = Service( 'foo', image='foo', ports=["127.0.0.1::2000-3000"]) assert not service.specifies_host_port() def test_specifies_host_port_with_host_ip_and_port_range(self): service = Service( 'foo', image='foo', ports=["127.0.0.1:1000-2000:2000-3000"]) assert service.specifies_host_port() def test_image_name_from_config(self): image_name = 'example/web:mytag' service = Service('foo', image=image_name) assert service.image_name == image_name def test_image_name_default(self): service = Service('foo', project='testing') assert service.image_name == 'testing_foo' @mock.patch('compose.service.log', autospec=True) def test_only_log_warning_when_host_ports_clash(self, mock_log): self.mock_client.inspect_image.return_value = {'Id': 'abcd'} ParallelStreamWriter.instance = None name = 'foo' service = Service( name, client=self.mock_client, ports=["8080:80"]) service.scale(0) assert not mock_log.warning.called service.scale(1) assert not mock_log.warning.called service.scale(2) mock_log.warning.assert_called_once_with( 'The "{}" service specifies a port on the host. If multiple containers ' 'for this service are created on a single host, the port will clash.'.format(name)) def test_parse_proxy_config(self): default_proxy_config = { 'httpProxy': 'http://proxy.mycorp.com:3128', 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129', 'ftpProxy': 'http://ftpproxy.mycorp.com:21', 'noProxy': '*.intra.mycorp.com', } self.mock_client.base_url = 'http+docker://localunixsocket' self.mock_client._general_configs = { 'proxies': { 'default': default_proxy_config, } } service = Service('foo', client=self.mock_client) assert service._parse_proxy_config() == { 'HTTP_PROXY': default_proxy_config['httpProxy'], 'http_proxy': default_proxy_config['httpProxy'], 'HTTPS_PROXY': default_proxy_config['httpsProxy'], 'https_proxy': default_proxy_config['httpsProxy'], 'FTP_PROXY': default_proxy_config['ftpProxy'], 'ftp_proxy': default_proxy_config['ftpProxy'], 'NO_PROXY': default_proxy_config['noProxy'], 'no_proxy': default_proxy_config['noProxy'], } def test_parse_proxy_config_per_host(self): default_proxy_config = { 'httpProxy': 'http://proxy.mycorp.com:3128', 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129', 'ftpProxy': 'http://ftpproxy.mycorp.com:21', 'noProxy': '*.intra.mycorp.com', } host_specific_proxy_config = { 'httpProxy': 'http://proxy.example.com:3128', 'httpsProxy': 'https://user:password@proxy.example.com:3129', 'ftpProxy': 'http://ftpproxy.example.com:21', 'noProxy': '*.intra.example.com' } self.mock_client.base_url = 'http+docker://localunixsocket' self.mock_client._general_configs = { 'proxies': { 'default': default_proxy_config, 'tcp://example.docker.com:2376': host_specific_proxy_config, } } service = Service('foo', client=self.mock_client) assert service._parse_proxy_config() == { 'HTTP_PROXY': default_proxy_config['httpProxy'], 'http_proxy': default_proxy_config['httpProxy'], 'HTTPS_PROXY': default_proxy_config['httpsProxy'], 'https_proxy': default_proxy_config['httpsProxy'], 'FTP_PROXY': default_proxy_config['ftpProxy'], 'ftp_proxy': default_proxy_config['ftpProxy'], 'NO_PROXY': default_proxy_config['noProxy'], 'no_proxy': default_proxy_config['noProxy'], } self.mock_client._original_base_url = 'tcp://example.docker.com:2376' assert service._parse_proxy_config() == { 'HTTP_PROXY': host_specific_proxy_config['httpProxy'], 'http_proxy': host_specific_proxy_config['httpProxy'], 'HTTPS_PROXY': host_specific_proxy_config['httpsProxy'], 'https_proxy': host_specific_proxy_config['httpsProxy'], 'FTP_PROXY': host_specific_proxy_config['ftpProxy'], 'ftp_proxy': host_specific_proxy_config['ftpProxy'], 'NO_PROXY': host_specific_proxy_config['noProxy'], 'no_proxy': host_specific_proxy_config['noProxy'], } def test_build_service_with_proxy_config(self): default_proxy_config = { 'httpProxy': 'http://proxy.mycorp.com:3128', 'httpsProxy': 'https://user:password@proxy.example.com:3129', } buildargs = { 'HTTPS_PROXY': 'https://rdcf.th08.jp:8911', 'https_proxy': 'https://rdcf.th08.jp:8911', } self.mock_client._general_configs = { 'proxies': { 'default': default_proxy_config, } } self.mock_client.base_url = 'http+docker://localunixsocket' self.mock_client.build.return_value = [ b'{"stream": "Successfully built 12345"}', ] service = Service('foo', client=self.mock_client, build={'context': '.', 'args': buildargs}) service.build() assert self.mock_client.build.call_count == 1 assert self.mock_client.build.call_args[1]['buildargs'] == { 'HTTP_PROXY': default_proxy_config['httpProxy'], 'http_proxy': default_proxy_config['httpProxy'], 'HTTPS_PROXY': buildargs['HTTPS_PROXY'], 'https_proxy': buildargs['HTTPS_PROXY'], } def test_get_create_options_with_proxy_config(self): default_proxy_config = { 'httpProxy': 'http://proxy.mycorp.com:3128', 'httpsProxy': 'https://user:password@proxy.mycorp.com:3129', 'ftpProxy': 'http://ftpproxy.mycorp.com:21', } self.mock_client._general_configs = { 'proxies': { 'default': default_proxy_config, } } self.mock_client.base_url = 'http+docker://localunixsocket' override_options = { 'environment': { 'FTP_PROXY': 'ftp://xdge.exo.au:21', 'ftp_proxy': 'ftp://xdge.exo.au:21', } } environment = { 'HTTPS_PROXY': 'https://rdcf.th08.jp:8911', 'https_proxy': 'https://rdcf.th08.jp:8911', } service = Service('foo', client=self.mock_client, environment=environment) create_opts = service._get_container_create_options(override_options, 1) assert set(create_opts['environment']) == set(format_environment({ 'HTTP_PROXY': default_proxy_config['httpProxy'], 'http_proxy': default_proxy_config['httpProxy'], 'HTTPS_PROXY': environment['HTTPS_PROXY'], 'https_proxy': environment['HTTPS_PROXY'], 'FTP_PROXY': override_options['environment']['FTP_PROXY'], 'ftp_proxy': override_options['environment']['FTP_PROXY'], })) def test_create_when_removed_containers_are_listed(self): # This is aimed at simulating a race between the API call to list the # containers, and the ones to inspect each of the listed containers. # It can happen that a container has been removed after we listed it. # containers() returns a container that is about to be removed self.mock_client.containers.return_value = [ {'Id': 'rm_cont_id', 'Name': 'rm_cont', 'Image': 'img_id'}, ] # inspect_container() will raise a NotFound when trying to inspect # rm_cont_id, which at this point has been removed def inspect(name): if name == 'rm_cont_id': raise NotFound(message='Not Found') if name == 'new_cont_id': return {'Id': 'new_cont_id'} raise NotImplementedError("incomplete mock") self.mock_client.inspect_container.side_effect = inspect self.mock_client.inspect_image.return_value = {'Id': 'imageid'} self.mock_client.create_container.return_value = {'Id': 'new_cont_id'} # We should nonetheless be able to create a new container service = Service('foo', client=self.mock_client) assert service.create_container().id == 'new_cont_id' def test_build_volume_options_duplicate_binds(self): self.mock_client.api_version = '1.29' # Trigger 3.2 format workaround service = Service('foo', client=self.mock_client) ctnr_opts, override_opts = service._build_container_volume_options( previous_container=None, container_options={ 'volumes': [ MountSpec.parse({'source': 'vol', 'target': '/data', 'type': 'volume'}), VolumeSpec.parse('vol:/data:rw'), ], 'environment': {}, }, override_options={}, ) assert 'binds' in override_opts assert len(override_opts['binds']) == 1 assert override_opts['binds'][0] == 'vol:/data:rw' def test_volumes_order_is_preserved(self): service = Service('foo', client=self.mock_client) volumes = [ VolumeSpec.parse(cfg) for cfg in [ '/v{0}:/v{0}:rw'.format(i) for i in range(6) ] ] ctnr_opts, override_opts = service._build_container_volume_options( previous_container=None, container_options={ 'volumes': volumes, 'environment': {}, }, override_options={}, ) assert override_opts['binds'] == [vol.repr() for vol in volumes] class TestServiceNetwork(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION self.mock_client._general_configs = {} def test_connect_container_to_networks_short_aliase_exists(self): service = Service( 'db', self.mock_client, 'myproject', image='foo', networks={'project_default': {}}) container = Container( None, { 'Id': 'abcdef', 'NetworkSettings': { 'Networks': { 'project_default': { 'Aliases': ['analias', 'abcdef'], }, }, }, }, True) service.connect_container_to_networks(container) assert not self.mock_client.disconnect_container_from_network.call_count assert not self.mock_client.connect_container_to_network.call_count def sort_by_name(dictionary_list): return sorted(dictionary_list, key=lambda k: k['name']) class BuildUlimitsTestCase(unittest.TestCase): def test_build_ulimits_with_dict(self): ulimits = build_ulimits( { 'nofile': {'soft': 10000, 'hard': 20000}, 'nproc': {'soft': 65535, 'hard': 65535} } ) expected = [ {'name': 'nofile', 'soft': 10000, 'hard': 20000}, {'name': 'nproc', 'soft': 65535, 'hard': 65535} ] assert sort_by_name(ulimits) == sort_by_name(expected) def test_build_ulimits_with_ints(self): ulimits = build_ulimits({'nofile': 20000, 'nproc': 65535}) expected = [ {'name': 'nofile', 'soft': 20000, 'hard': 20000}, {'name': 'nproc', 'soft': 65535, 'hard': 65535} ] assert sort_by_name(ulimits) == sort_by_name(expected) def test_build_ulimits_with_integers_and_dicts(self): ulimits = build_ulimits( { 'nproc': 65535, 'nofile': {'soft': 10000, 'hard': 20000} } ) expected = [ {'name': 'nofile', 'soft': 10000, 'hard': 20000}, {'name': 'nproc', 'soft': 65535, 'hard': 65535} ] assert sort_by_name(ulimits) == sort_by_name(expected) class NetTestCase(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION self.mock_client._general_configs = {} def test_network_mode(self): network_mode = NetworkMode('host') assert network_mode.id == 'host' assert network_mode.mode == 'host' assert network_mode.service_name is None def test_network_mode_container(self): container_id = 'abcd' network_mode = ContainerNetworkMode(Container(None, {'Id': container_id})) assert network_mode.id == container_id assert network_mode.mode == 'container:' + container_id assert network_mode.service_name is None def test_network_mode_service(self): container_id = 'bbbb' service_name = 'web' self.mock_client.containers.return_value = [ {'Id': container_id, 'Name': container_id, 'Image': 'abcd'}, ] service = Service(name=service_name, client=self.mock_client) network_mode = ServiceNetworkMode(service) assert network_mode.id == service_name assert network_mode.mode == 'container:' + container_id assert network_mode.service_name == service_name def test_network_mode_service_no_containers(self): service_name = 'web' self.mock_client.containers.return_value = [] service = Service(name=service_name, client=self.mock_client) network_mode = ServiceNetworkMode(service) assert network_mode.id == service_name assert network_mode.mode is None assert network_mode.service_name == service_name class ServicePortsTest(unittest.TestCase): def test_formatted_ports(self): ports = [ '3000', '0.0.0.0:4025-4030:23000-23005', ServicePort(6000, None, None, None, None), ServicePort(8080, 8080, None, None, None), ServicePort('20000', '20000', 'udp', 'ingress', None), ServicePort(30000, '30000', 'tcp', None, '127.0.0.1'), ] formatted = formatted_ports(ports) assert ports[0] in formatted assert ports[1] in formatted assert '6000/tcp' in formatted assert '8080:8080/tcp' in formatted assert '20000:20000/udp' in formatted assert '127.0.0.1:30000:30000/tcp' in formatted def build_mount(destination, source, mode='rw'): return {'Source': source, 'Destination': destination, 'Mode': mode} class ServiceVolumesTest(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION self.mock_client._general_configs = {} def test_build_volume_binding(self): binding = build_volume_binding(VolumeSpec.parse('/outside:/inside', True)) assert binding == ('/inside', '/outside:/inside:rw') def test_get_container_data_volumes(self): options = [VolumeSpec.parse(v) for v in [ '/host/volume:/host/volume:ro', '/new/volume', '/existing/volume', 'named:/named/vol', '/dev/tmpfs' ]] self.mock_client.inspect_image.return_value = { 'ContainerConfig': { 'Volumes': { '/mnt/image/data': {}, } } } container = Container(self.mock_client, { 'Image': 'ababab', 'Mounts': [ { 'Source': '/host/volume', 'Destination': '/host/volume', 'Mode': '', 'RW': True, 'Name': 'hostvolume', }, { 'Source': '/var/lib/docker/aaaaaaaa', 'Destination': '/existing/volume', 'Mode': '', 'RW': True, 'Name': 'existingvolume', }, { 'Source': '/var/lib/docker/bbbbbbbb', 'Destination': '/removed/volume', 'Mode': '', 'RW': True, 'Name': 'removedvolume', }, { 'Source': '/var/lib/docker/cccccccc', 'Destination': '/mnt/image/data', 'Mode': '', 'RW': True, 'Name': 'imagedata', }, ] }, has_been_inspected=True) expected = [ VolumeSpec.parse('existingvolume:/existing/volume:rw'), VolumeSpec.parse('imagedata:/mnt/image/data:rw'), ] volumes, _ = get_container_data_volumes(container, options, ['/dev/tmpfs'], []) assert sorted(volumes) == sorted(expected) def test_merge_volume_bindings(self): options = [ VolumeSpec.parse(v, True) for v in [ '/host/volume:/host/volume:ro', '/host/rw/volume:/host/rw/volume', '/new/volume', '/existing/volume', '/dev/tmpfs' ] ] self.mock_client.inspect_image.return_value = { 'ContainerConfig': {'Volumes': {}} } previous_container = Container(self.mock_client, { 'Id': 'cdefab', 'Image': 'ababab', 'Mounts': [{ 'Source': '/var/lib/docker/aaaaaaaa', 'Destination': '/existing/volume', 'Mode': '', 'RW': True, 'Name': 'existingvolume', }], }, has_been_inspected=True) expected = [ '/host/volume:/host/volume:ro', '/host/rw/volume:/host/rw/volume:rw', 'existingvolume:/existing/volume:rw', ] binds, affinity = merge_volume_bindings(options, ['/dev/tmpfs'], previous_container, []) assert sorted(binds) == sorted(expected) assert affinity == {'affinity:container': '=cdefab'} def test_mount_same_host_path_to_two_volumes(self): service = Service( 'web', image='busybox', volumes=[ VolumeSpec.parse('/host/path:/data1', True), VolumeSpec.parse('/host/path:/data2', True), ], client=self.mock_client, ) self.mock_client.inspect_image.return_value = { 'Id': 'ababab', 'ContainerConfig': { 'Volumes': {} } } service._get_container_create_options( override_options={}, number=1, ) assert set(self.mock_client.create_host_config.call_args[1]['binds']) == {'/host/path:/data1:rw', '/host/path:/data2:rw'} def test_get_container_create_options_with_different_host_path_in_container_json(self): service = Service( 'web', image='busybox', volumes=[VolumeSpec.parse('/host/path:/data')], client=self.mock_client, ) volume_name = 'abcdefff1234' self.mock_client.inspect_image.return_value = { 'Id': 'ababab', 'ContainerConfig': { 'Volumes': { '/data': {}, } } } self.mock_client.inspect_container.return_value = { 'Id': '123123123', 'Image': 'ababab', 'Mounts': [ { 'Destination': '/data', 'Source': '/mnt/sda1/host/path', 'Mode': '', 'RW': True, 'Driver': 'local', 'Name': volume_name, }, ] } service._get_container_create_options( override_options={}, number=1, previous_container=Container(self.mock_client, {'Id': '123123123'}), ) assert ( self.mock_client.create_host_config.call_args[1]['binds'] == ['{}:/data:rw'.format(volume_name)] ) def test_warn_on_masked_volume_no_warning_when_no_container_volumes(self): volumes_option = [VolumeSpec('/home/user', '/path', 'rw')] container_volumes = [] service = 'service_name' with mock.patch('compose.service.log', autospec=True) as mock_log: warn_on_masked_volume(volumes_option, container_volumes, service) assert not mock_log.warning.called def test_warn_on_masked_volume_when_masked(self): volumes_option = [VolumeSpec('/home/user', '/path', 'rw')] container_volumes = [ VolumeSpec('/var/lib/docker/path', '/path', 'rw'), VolumeSpec('/var/lib/docker/path', '/other', 'rw'), ] service = 'service_name' with mock.patch('compose.service.log', autospec=True) as mock_log: warn_on_masked_volume(volumes_option, container_volumes, service) mock_log.warning.assert_called_once_with(mock.ANY) def test_warn_on_masked_no_warning_with_same_path(self): volumes_option = [VolumeSpec('/home/user', '/path', 'rw')] container_volumes = [VolumeSpec('/home/user', '/path', 'rw')] service = 'service_name' with mock.patch('compose.service.log', autospec=True) as mock_log: warn_on_masked_volume(volumes_option, container_volumes, service) assert not mock_log.warning.called def test_warn_on_masked_no_warning_with_container_only_option(self): volumes_option = [VolumeSpec(None, '/path', 'rw')] container_volumes = [ VolumeSpec('/var/lib/docker/volume/path', '/path', 'rw') ] service = 'service_name' with mock.patch('compose.service.log', autospec=True) as mock_log: warn_on_masked_volume(volumes_option, container_volumes, service) assert not mock_log.warning.called def test_create_with_special_volume_mode(self): self.mock_client.inspect_image.return_value = {'Id': 'imageid'} self.mock_client.create_container.return_value = {'Id': 'containerid'} volume = '/tmp:/foo:z' Service( 'web', client=self.mock_client, image='busybox', volumes=[VolumeSpec.parse(volume, True)], ).create_container() assert self.mock_client.create_container.call_count == 1 assert self.mock_client.create_host_config.call_args[1]['binds'] == [volume] class ServiceSecretTest(unittest.TestCase): def setUp(self): self.mock_client = mock.create_autospec(docker.APIClient) self.mock_client.api_version = DEFAULT_DOCKER_API_VERSION self.mock_client._general_configs = {} def test_get_secret_volumes(self): secret1 = { 'secret': ServiceSecret.parse({'source': 'secret1', 'target': 'b.txt'}), 'file': 'a.txt' } service = Service( 'web', client=self.mock_client, image='busybox', secrets=[secret1] ) volumes = service.get_secret_volumes() assert volumes[0].source == secret1['file'] assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].target) def test_get_secret_volumes_abspath(self): secret1 = { 'secret': ServiceSecret.parse({'source': 'secret1', 'target': '/d.txt'}), 'file': 'c.txt' } service = Service( 'web', client=self.mock_client, image='busybox', secrets=[secret1] ) volumes = service.get_secret_volumes() assert volumes[0].source == secret1['file'] assert volumes[0].target == secret1['secret'].target def test_get_secret_volumes_no_target(self): secret1 = { 'secret': ServiceSecret.parse({'source': 'secret1'}), 'file': 'c.txt' } service = Service( 'web', client=self.mock_client, image='busybox', secrets=[secret1] ) volumes = service.get_secret_volumes() assert volumes[0].source == secret1['file'] assert volumes[0].target == '{}/{}'.format(SECRETS_PATH, secret1['secret'].source) class RewriteBuildPathTest(unittest.TestCase): @mock.patch('compose.service.IS_WINDOWS_PLATFORM', True) def test_rewrite_url_no_prefix(self): urls = [ 'http://test.com', 'https://test.com', 'git://test.com', 'github.com/test/test', 'git@test.com', ] for u in urls: assert rewrite_build_path(u) == u @mock.patch('compose.service.IS_WINDOWS_PLATFORM', True) def test_rewrite_windows_path(self): assert rewrite_build_path('C:\\context') == WINDOWS_LONGPATH_PREFIX + 'C:\\context' assert rewrite_build_path( rewrite_build_path('C:\\context') ) == rewrite_build_path('C:\\context') @mock.patch('compose.service.IS_WINDOWS_PLATFORM', False) def test_rewrite_unix_path(self): assert rewrite_build_path('/context') == '/context' compose-1.29.2/tests/unit/split_buffer_test.py000066400000000000000000000025131404620552300214720ustar00rootroot00000000000000from .. import unittest from compose.utils import split_buffer class SplitBufferTest(unittest.TestCase): def test_single_line_chunks(self): def reader(): yield b'abc\n' yield b'def\n' yield b'ghi\n' self.assert_produces(reader, ['abc\n', 'def\n', 'ghi\n']) def test_no_end_separator(self): def reader(): yield b'abc\n' yield b'def\n' yield b'ghi' self.assert_produces(reader, ['abc\n', 'def\n', 'ghi']) def test_multiple_line_chunk(self): def reader(): yield b'abc\ndef\nghi' self.assert_produces(reader, ['abc\n', 'def\n', 'ghi']) def test_chunked_line(self): def reader(): yield b'a' yield b'b' yield b'c' yield b'\n' yield b'd' self.assert_produces(reader, ['abc\n', 'd']) def test_preserves_unicode_sequences_within_lines(self): string = "a\u2022c\n" def reader(): yield string.encode('utf-8') self.assert_produces(reader, [string]) def assert_produces(self, reader, expectations): split = split_buffer(reader()) for (actual, expected) in zip(split, expectations): assert type(actual) == type(expected) assert actual == expected compose-1.29.2/tests/unit/timeparse_test.py000066400000000000000000000020271404620552300207770ustar00rootroot00000000000000from compose import timeparse def test_milli(): assert timeparse.timeparse('5ms') == 0.005 def test_milli_float(): assert timeparse.timeparse('50.5ms') == 0.0505 def test_second_milli(): assert timeparse.timeparse('200s5ms') == 200.005 def test_second_milli_micro(): assert timeparse.timeparse('200s5ms10us') == 200.00501 def test_second(): assert timeparse.timeparse('200s') == 200 def test_second_as_float(): assert timeparse.timeparse('20.5s') == 20.5 def test_minute(): assert timeparse.timeparse('32m') == 1920 def test_hour_minute(): assert timeparse.timeparse('2h32m') == 9120 def test_minute_as_float(): assert timeparse.timeparse('1.5m') == 90 def test_hour_minute_second(): assert timeparse.timeparse('5h34m56s') == 20096 def test_invalid_with_space(): assert timeparse.timeparse('5h 34m 56s') is None def test_invalid_with_comma(): assert timeparse.timeparse('5h,34m,56s') is None def test_invalid_with_empty_string(): assert timeparse.timeparse('') is None compose-1.29.2/tests/unit/utils_test.py000066400000000000000000000041401404620552300201440ustar00rootroot00000000000000from compose import utils class TestJsonSplitter: def test_json_splitter_no_object(self): data = '{"foo": "bar' assert utils.json_splitter(data) is None def test_json_splitter_with_object(self): data = '{"foo": "bar"}\n \n{"next": "obj"}' assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') def test_json_splitter_leading_whitespace(self): data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}' assert utils.json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') class TestStreamAsText: def test_stream_with_non_utf_unicode_character(self): stream = [b'\xed\xf3\xf3'] output, = utils.stream_as_text(stream) assert output == '���' def test_stream_with_utf_character(self): stream = ['ěĝ'.encode()] output, = utils.stream_as_text(stream) assert output == 'ěĝ' class TestJsonStream: def test_with_falsy_entries(self): stream = [ '{"one": "two"}\n{}\n', "[1, 2, 3]\n[]\n", ] output = list(utils.json_stream(stream)) assert output == [ {'one': 'two'}, {}, [1, 2, 3], [], ] def test_with_leading_whitespace(self): stream = [ '\n \r\n {"one": "two"}{"x": 1}', ' {"three": "four"}\t\t{"x": 2}' ] output = list(utils.json_stream(stream)) assert output == [ {'one': 'two'}, {'x': 1}, {'three': 'four'}, {'x': 2} ] class TestParseBytes: def test_parse_bytes(self): assert utils.parse_bytes('123kb') == 123 * 1024 assert utils.parse_bytes(123) == 123 assert utils.parse_bytes('foobar') is None assert utils.parse_bytes('123') == 123 class TestMoreItertools: def test_unique_everseen(self): unique = utils.unique_everseen assert list(unique([2, 1, 2, 1])) == [2, 1] assert list(unique([2, 1, 2, 1], hash)) == [2, 1] assert list(unique([2, 1, 2, 1], lambda x: 'key_%s' % x)) == [2, 1] compose-1.29.2/tests/unit/volume_test.py000066400000000000000000000011251404620552300203130ustar00rootroot00000000000000import docker import pytest from compose import volume from tests import mock @pytest.fixture def mock_client(): return mock.create_autospec(docker.APIClient) class TestVolume: def test_remove_local_volume(self, mock_client): vol = volume.Volume(mock_client, 'foo', 'project') vol.remove() mock_client.remove_volume.assert_called_once_with('foo_project') def test_remove_external_volume(self, mock_client): vol = volume.Volume(mock_client, 'foo', 'project', external=True) vol.remove() assert not mock_client.remove_volume.called compose-1.29.2/tox.ini000066400000000000000000000017741404620552300145770ustar00rootroot00000000000000[tox] envlist = py37,py39,pre-commit [testenv] usedevelop=True whitelist_externals=mkdir passenv = LD_LIBRARY_PATH DOCKER_HOST DOCKER_CERT_PATH DOCKER_TLS_VERIFY DOCKER_VERSION SWARM_SKIP_* SWARM_ASSUME_MULTINODE setenv = HOME=/tmp deps = -rrequirements-indirect.txt -rrequirements.txt -rrequirements-dev.txt commands = mkdir -p .coverage-binfiles py.test -v \ --cov=compose \ --cov-report html \ --cov-report term \ --cov-config=tox.ini \ {posargs:tests} [testenv:pre-commit] skip_install = True deps = pre-commit commands = pre-commit install pre-commit run --all-files --show-diff-on-failure # Coverage configuration [run] branch = True data_file = .coverage-binfiles/.coverage [report] show_missing = true [html] directory = coverage-html # end coverage configuration [flake8] max-line-length = 105 # Set this high for now max-complexity = 12 exclude = compose/packages [pytest] addopts = --tb=short -rxs