pax_global_header00006660000000000000000000000064144145154620014520gustar00rootroot0000000000000052 comment=f6dbce36181c44d0d08b6f4ca166508542875ce1 podman-compose-1.0.6/000077500000000000000000000000001441451546200144455ustar00rootroot00000000000000podman-compose-1.0.6/.github/000077500000000000000000000000001441451546200160055ustar00rootroot00000000000000podman-compose-1.0.6/.github/ISSUE_TEMPLATE/000077500000000000000000000000001441451546200201705ustar00rootroot00000000000000podman-compose-1.0.6/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000025441441451546200226670ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: bug assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. Please make sure it's not a bug in podman (in that case report it to podman) or your understanding of docker-compose or how rootless containers work (for example, it's normal for rootless container not to be able to listen for port less than 1024 like 80) please try to reproduce the bug in latest devel branch **To Reproduce** Steps to reproduce the behavior: 1. what is the content of the current working directory (ex. `docker-compose.yml`, `.env`, `Dockerfile`, ...etc.) 2. what is the sequence of commands you typed please use [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) for example give me a small busybox-based compose yaml **Expected behavior** A clear and concise description of what you expected to happen. **Actual behavior** What is the behavior you actually got and that should not happen. **Output** ``` $ podman-compose version using podman version: 3.4.0 podman-compose version 0.1.7dev podman --version podman version 3.4.0 $ podman-compose up ... ``` **Environment:** - OS: Linux / WSL / Mac - podman version: - podman compose version: (git hex) **Additional context** Add any other context about the problem here. podman-compose-1.0.6/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000011341441451546200237140ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. podman-compose-1.0.6/.github/dependabot.yml000066400000000000000000000001661441451546200206400ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" podman-compose-1.0.6/.github/workflows/000077500000000000000000000000001441451546200200425ustar00rootroot00000000000000podman-compose-1.0.6/.github/workflows/pylint.yml000066400000000000000000000020321441451546200221010ustar00rootroot00000000000000name: Pylint on: - push - pull_request jobs: lint-black: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install psf/black requirements run: | apt-get update apt-get install -y python3 python3-venv - uses: psf/black@stable with: options: "--check --verbose" version: "~= 23.3" lint-pylint: runs-on: ubuntu-latest strategy: matrix: python-version: ["3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | python -m pip install --upgrade pip if [ -f requirements.txt ]; then pip install -r requirements.txt; fi pip install pylint - name: Analysing the code with pylint run: | python -m compileall podman_compose.py pylint podman_compose.py # pylint $(git ls-files '*.py') podman-compose-1.0.6/.github/workflows/pytest.yml000066400000000000000000000022141441451546200221140ustar00rootroot00000000000000# This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: PyTest on: push: branches: [ devel ] pull_request: branches: [ devel ] jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Set up Python 3.10 uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip pip install flake8 pytest if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | python -m pytest ./pytests podman-compose-1.0.6/.gitignore000066400000000000000000000022721441451546200164400ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ .idea/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ podman-compose-1.0.6/.pre-commit-config.yaml000066400000000000000000000015111441451546200207240ustar00rootroot00000000000000repos: - repo: https://github.com/psf/black rev: 23.3.0 hooks: - id: black # It is recommended to specify the latest version of Python # supported by your project here, or alternatively use # pre-commit's default_language_version, see # https://pre-commit.com/#top_level-default_language_version language_version: python3.10 types: [python] - repo: https://github.com/pycqa/flake8 rev: 6.0.0 hooks: - id: flake8 types: [python] - repo: local hooks: - id: pylint name: pylint entry: pylint language: system types: [python] args: [ "-rn", # Only display messages "-sn", # Don't display the score "--rcfile=.pylintrc", # Link to your config file ] podman-compose-1.0.6/.pylintrc000066400000000000000000000017501441451546200163150ustar00rootroot00000000000000[MESSAGES CONTROL] # C0111 missing-docstring: missing-class-docstring, missing-function-docstring, missing-method-docstring, missing-module-docstrin # consider-using-with: we need it for color formatter pipe disable=too-many-lines,too-many-branches,too-many-locals,too-many-statements,too-many-arguments,too-many-instance-attributes,fixme,multiple-statements,missing-docstring,line-too-long,consider-using-f-string,consider-using-with,unnecessary-lambda-assignment # allow _ for ignored variables # allow generic names like a,b,c and i,j,k,l,m,n and x,y,z # allow k,v for key/value # allow e for exceptions, it for iterator, ix for index # allow ip for ip address # allow w,h for width, height # allow op for operation/operator/opcode # allow t, t0, t1, t2, and t3 for time # allow dt for delta time # allow db for database # allow ls for list # allow p for pipe # allow ex for examples, exists ..etc good-names=_,a,b,c,dt,db,e,f,fn,fd,i,j,k,v,kv,kw,l,m,n,ls,t,t0,t1,t2,t3,w,h,x,y,z,it,ix,ip,op,p,ex podman-compose-1.0.6/CODE-OF-CONDUCT.md000066400000000000000000000003151441451546200170770ustar00rootroot00000000000000## The Podman Compose Project Community Code of Conduct The Podman Compose project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). podman-compose-1.0.6/CONTRIBUTING.md000066400000000000000000000102141441451546200166740ustar00rootroot00000000000000# Contributing to podman-compose ## Who can contribute? - Users that found a bug - Users that wants to propose new functionalities or enhancements - Users that want to help other users to troubleshoot their environments - Developers that want to fix bugs - Developers that want to implement new functionalities or enhancements ## Branches Please request your PR to be merged into the `devel` branch. Changes to the `stable` branch are managed by the repository maintainers. ## Development environment setup Note: Some steps are OPTIONAL but all are RECOMMENDED. 1. Fork the project repo and clone it ```shell $ git clone https://github.com/USERNAME/podman-compose.git $ cd podman-compose ``` 1. (OPTIONAL) Create a python virtual environment. Example using [virtualenv wrapper](https://virtualenvwrapper.readthedocs.io/en/latest/): ```shell mkvirtualenv podman-compose ``` 2. Install the project runtime and development requirements ```shell $ pip install '.[devel]' ``` 3. (OPTIONAL) Install `pre-commit` git hook scripts (https://pre-commit.com/#3-install-the-git-hook-scripts) ```shell $ pre-commit install ``` 4. Create a new branch, develop and add tests when possible 5. Run linting & testing before commiting code. Ensure all the hooks are passing. ```shell $ pre-commit run --all-files ``` 6. Commit your code to your fork's branch. - Make sure you include a `Signed-off-by` message in your commits. Read [this guide](https://docs.github.com/en/authentication/managing-commit-signature-verification/signing-commits) to learn how to sign your commits - In the commit message reference the Issue ID that your code fixes and a brief description of the changes. Example: `Fixes #516: allow empty network` 7. Open a PR to `containers/podman-compose:devel` and wait for a maintainer to review your work. ## Adding new commands To add a command you need to add a function that is decorated with `@cmd_run` passing the compose instance, command name and description. the wrapped function should accept two arguments the compose instance and the command-specific arguments (resulted from python's `argparse` package) inside that command you can run PodMan like this `compose.podman.run(['inspect', 'something'])` and inside that function you can access `compose.pods` and `compose.containers` ...etc. Here is an example ``` @cmd_run(podman_compose, 'build', 'build images defined in the stack') def compose_build(compose, args): compose.podman.run(['build', 'something']) ``` ## Command arguments parsing Add a function that accept `parser` which is an instance from `argparse`. In side that function you can call `parser.add_argument()`. The function decorated with `@cmd_parse` accepting the compose instance, and command names (as a list or as a string). You can do this multiple times. Here is an example ``` @cmd_parse(podman_compose, 'build') def compose_build_parse(parser): parser.add_argument("--pull", help="attempt to pull a newer version of the image", action='store_true') parser.add_argument("--pull-always", help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action='store_true') ``` NOTE: `@cmd_parse` should be after `@cmd_run` ## Calling a command from inside another If you need to call `podman-compose down` from inside `podman-compose up` do something like: ``` @cmd_run(podman_compose, 'up', 'up desc') def compose_up(compose, args): compose.commands['down'](compose, args) # or compose.commands['down'](argparse.Namespace(foo=123)) ``` ## Missing Commands (help needed) ``` bundle Generate a Docker bundle from the Compose file config Validate and view the Compose file create Create services events Receive real time events from containers images List images logs View output from containers port Print the public port for a port binding ps List containers rm Remove stopped containers run Run a one-off command scale Set number of containers for a service top Display the running processes ``` podman-compose-1.0.6/LICENSE000066400000000000000000000432541441451546200154620ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. podman-compose-1.0.6/README.md000066400000000000000000000077761441451546200157450ustar00rootroot00000000000000# Podman Compose ## [![Pylint Test: ](https://github.com/containers/podman-compose/actions/workflows/pylint.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pylint.yml) [![Unit tests PyTest](https://github.com/containers/podman-compose/actions/workflows/pytest.yml/badge.svg)](https://github.com/containers/podman-compose/actions/workflows/pytest.yml) An implementation of [Compose Spec](https://compose-spec.io/) with [Podman](https://podman.io/) backend. This project focuses on: * rootless * daemon-less process model, we directly execute podman, no running daemon. This project only depends on: * `podman` * [podman dnsname plugin](https://github.com/containers/dnsname): It is usually found in the `podman-plugins` or `podman-dnsname` distro packages, those packages are not pulled by default and you need to install them. This allows containers to be able to resolve each other if they are on the same CNI network. * Python3 * [PyYAML](https://pyyaml.org/) * [python-dotenv](https://pypi.org/project/python-dotenv/) And it's formed as a single Python file script that you can drop into your PATH and run. ## References: * [spec.md](https://github.com/compose-spec/compose-spec/blob/master/spec.md) * [docker-compose compose-file-v3](https://docs.docker.com/compose/compose-file/compose-file-v3/) * [docker-compose compose-file-v2](https://docs.docker.com/compose/compose-file/compose-file-v2/) ## Alternatives As in [this article](https://fedoramagazine.org/use-docker-compose-with-podman-to-orchestrate-containers-on-fedora/) you can setup a `podman.socket` and use unmodified `docker-compose` that talks to that socket but in this case you lose the process-model (ex. `docker-compose build` will send a possibly large context tarball to the daemon) For production-like single-machine containerized environment consider - [k3s](https://k3s.io) | [k3s github](https://github.com/rancher/k3s) - [MiniKube](https://minikube.sigs.k8s.io/) For the real thing (multi-node clusters) check any production OpenShift/Kubernetes distribution like [OKD](https://www.okd.io/). ## Versions If you have legacy version of `podman` (before 3.1.0) you might need to stick with legacy `podman-compose` `0.1.x` branch. The legacy branch 0.1.x uses mappings and workarounds to compensate for rootless limitations. Modern podman versions (>=3.4) do not have those limitations, and thus you can use latest and stable 1.x branch. If you are upgrading from `podman-compose` version `0.1.x` then we no longer have global option `-t` to set mapping type like `hostnet`. If you desire that behavior, pass it the standard way like `network_mode: host` in the YAML. ## Installation Install the latest stable version from PyPI: ``` pip3 install podman-compose ``` pass `--user` to install inside regular user home without being root. Or latest development version from GitHub: ``` pip3 install https://github.com/containers/podman-compose/archive/devel.tar.gz ``` or install from Fedora (starting from f31) repositories: ``` sudo dnf install podman-compose ``` ## Basic Usage We have included fully functional sample stacks inside `examples/` directory. You can get more examples from [awesome-compose](https://github.com/docker/awesome-compose). A quick example would be ``` cd examples/busybox podman-compose --help podman-compose up --help podman-compose up ``` A more rich example can be found in [examples/awx3](examples/awx3) which have - A Postgres Database - RabbitMQ server - MemCached server - a django web server - a django tasks When testing the `AWX3` example, if you got errors, just wait for db migrations to end. There is also AWX 17.1.0 ## Tests Inside `tests/` directory we have many useless docker-compose stacks that are meant to test as many cases as we can to make sure we are compatible ### Unit tests with pytest run a pytest with following command ```shell python -m pytest pytests ``` # Contributing guide If you are a user or a developer and want to contribute please check the [CONTRIBUTING](CONTRIBUTING.md) section podman-compose-1.0.6/SECURITY.md000066400000000000000000000003751441451546200162430ustar00rootroot00000000000000## Security and Disclosure Information Policy for the Podman Compose Project The Podman Compose Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. podman-compose-1.0.6/completion/000077500000000000000000000000001441451546200166165ustar00rootroot00000000000000podman-compose-1.0.6/completion/bash/000077500000000000000000000000001441451546200175335ustar00rootroot00000000000000podman-compose-1.0.6/completion/bash/podman-compose000066400000000000000000000273601441451546200224070ustar00rootroot00000000000000# Naming convention: # * _camelCase for function names # * snake_case for variable names # all functions will return 0 if they successfully complete the argument # (or establish there is no need or no way to complete), and something # other than 0 if that's not the case # complete arguments to global options _completeGlobalOptArgs() { # arguments to options that take paths as arguments: complete paths for el in ${path_arg_global_opts}; do if [[ ${prev} == ${el} ]]; then COMPREPLY=( $(compgen -f -- ${cur}) ) return 0 fi done # arguments to options that take generic arguments: don't complete for el in ${generic_arg_global_opts}; do if [[ ${prev} == ${el} ]]; then return 0 fi done return 1 } # complete root subcommands and options _completeRoot() { # if we're completing an option if [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${global_opts}" -- ${cur}) ) return 0 fi # complete root commands COMPREPLY=( $(compgen -W "${root_commands}" -- ${cur}) ) return 0 } # complete names of Compose services _completeServiceNames() { # ideally we should complete service names, # but parsing the compose spec file in the # completion script is quite complex return 0 } # complete commands to run inside containers _completeCommand() { # we would need to complete commands to run inside # a container return 0 } # complete the arguments for `podman-compose up` and return 0 _completeUpArgs() { up_opts="${help_opts} -d --detach --no-color --quiet-pull --no-deps --force-recreate --always-recreate-deps --no-recreate --no-build --no-start --build --abort-on-container-exit -t --timeout -V --renew-anon-volumes --remove-orphans --scale --exit-code-from --pull --pull-always --build-arg --no-cache" if [[ ${prev} == "--scale" || ${prev} == "-t" || ${prev} == "--timeout" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${up_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose exec` and return 0 _completeExecArgs() { exec_opts="${help_opts} -d --detach --privileged -u --user -T --index -e --env -w --workdir" if [[ ${prev} == "-u" || ${prev} == "--user" || ${prev} == "--index" || ${prev} == "-e" || ${prev} == "--env" || ${prev} == "-w" || ${prev} == "--workdir" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${exec_opts}" -- ${cur}) ) return 0 elif [[ ${comp_cword_adj} -eq 2 ]]; then # complete service name _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi elif [[ ${comp_cword_adj} -eq 3 ]]; then _completeCommand if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose down` and return 0 _completeDownArgs() { down_opts="${help_opts} -v --volumes -t --timeout --remove-orphans" if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${down_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose build` and return 0 _completeBuildArgs() { build_opts="${help_opts} --pull --pull-always --build-arg --no-cache" if [[ ${prev} == "--build-arg" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${build_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose logs` and return 0 _completeLogsArgs() { logs_opts="${help_opts} -f --follow -l --latest -n --names --since -t --timestamps --tail --until" if [[ ${prev} == "--since" || ${prev} == "--tail" || ${prev} == "--until" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${logs_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose ps` and return 0 _completePsArgs() { ps_opts="${help_opts} -q --quiet" if [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${ps_opts}" -- ${cur}) ) return 0 else return 0 fi } # complete the arguments for `podman-compose pull` and return 0 _completePullArgs() { pull_opts="${help_opts} --force-local" if [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${pull_opts}" -- ${cur}) ) return 0 else return 0 fi } # complete the arguments for `podman-compose push` and return 0 _completePushArgs() { push_opts="${help_opts} --ignore-push-failures" if [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${push_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose restart` and return 0 _completeRestartArgs() { restart_opts="${help_opts} -t --timeout" if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${restart_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose stop` and return 0 _completeStopArgs() { stop_opts="${help_opts} -t --timeout" if [[ ${prev} == "-t" || ${prev} == "--timeout" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${stop_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose start` and return 0 _completeStartArgs() { start_opts="${help_opts}" if [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${start_opts}" -- ${cur}) ) return 0 else _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi return 0 fi } # complete the arguments for `podman-compose run` and return 0 _completeRunArgs() { run_opts="${help_opts} -d --detach --privileged -u --user -T --index -e --env -w --workdir" if [[ ${prev} == "-u" || ${prev} == "--user" || ${prev} == "--index" || ${prev} == "-e" || ${prev} == "--env" || ${prev} == "-w" || ${prev} == "--workdir" ]]; then return 0 elif [[ ${cur} == -* ]]; then COMPREPLY=( $(compgen -W "${run_opts}" -- ${cur}) ) return 0 elif [[ ${comp_cword_adj} -eq 2 ]]; then # complete service name _completeServiceNames if [[ $? -eq 0 ]]; then return 0 fi elif [[ ${comp_cword_adj} -eq 3 ]]; then _completeCommand if [[ $? -eq 0 ]]; then return 0 fi fi } _podmanCompose() { cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" root_commands="help version pull push build up down ps run exec start stop restart logs" # options to output help text (used as global and subcommand options) help_opts="-h --help" # global options that don't take additional arguments basic_global_opts="${help_opts} -v --no-ansi --no-cleanup --dry-run" # global options that take paths as arguments path_arg_global_opts="-f --file --podman-path" path_arg_global_opts_array=($arg_global_opts) # global options that take arguments that are not files generic_arg_global_opts="-p --project-name --podman-path --podman-args --podman-pull-args --podman-push-args --podman-build-args --podman-inspect-args --podman-run-args --podman-start-args --podman-stop-args --podman-rm-args --podman-volume-args" generic_arg_global_opts_array=($generic_arg_global_opts) # all global options that take arguments arg_global_opts="${path_arg_global_opts} ${generic_arg_global_opts}" arg_global_opts_array=($arg_global_opts) # all global options global_opts="${basic_global_opts} ${arg_global_opts}" chosen_root_command="" _completeGlobalOptArgs if [[ $? -eq 0 ]]; then return 0 fi # computing comp_cword_adj, which thruthfully tells us how deep in the subcommands tree we are # additionally, set the chosen_root_command if possible comp_cword_adj=${COMP_CWORD} if [[ ${COMP_CWORD} -ge 2 ]]; then skip_next="no" for el in ${COMP_WORDS[@]}; do # if the user has asked for help text there's no need to complete further if [[ ${el} == "-h" || ${el} == "--help" ]]; then return 0 fi if [[ ${skip_next} == "yes" ]]; then let "comp_cword_adj--" skip_next="no" continue fi if [[ ${el} == -* && ${el} != ${cur} ]]; then let "comp_cword_adj--" for opt in ${arg_global_opts_array[@]}; do if [[ ${el} == ${opt} ]]; then skip_next="yes" fi done elif [[ ${el} != ${cur} && ${el} != ${COMP_WORDS[0]} && ${chosen_root_command} == "" ]]; then chosen_root_command=${el} fi done fi if [[ ${comp_cword_adj} -eq 1 ]]; then _completeRoot # Given that we check the value of comp_cword_adj outside # of it, at the moment _completeRoot should always return # 0, this is just here in case changes are made. The same # will apply to similar functions below if [[ $? -eq 0 ]]; then return 0 fi fi case $chosen_root_command in up) _completeUpArgs if [[ $? -eq 0 ]]; then return 0 fi ;; down) _completeDownArgs if [[ $? -eq 0 ]]; then return 0 fi ;; exec) _completeExecArgs if [[ $? -eq 0 ]]; then return 0 fi ;; build) _completeBuildArgs if [[ $? -eq 0 ]]; then return 0 fi ;; logs) _completeLogsArgs if [[ $? -eq 0 ]]; then return 0 fi ;; ps) _completePsArgs if [[ $? -eq 0 ]]; then return 0 fi ;; pull) _completePullArgs if [[ $? -eq 0 ]]; then return 0 fi ;; push) _completePushArgs if [[ $? -eq 0 ]]; then return 0 fi ;; restart) _completeRestartArgs if [[ $? -eq 0 ]]; then return 0 fi ;; start) _completeStartArgs if [[ $? -eq 0 ]]; then return 0 fi ;; stop) _completeStopArgs if [[ $? -eq 0 ]]; then return 0 fi ;; run) _completeRunArgs if [[ $? -eq 0 ]]; then return 0 fi ;; esac } complete -F _podmanCompose podman-compose podman-compose-1.0.6/docs/000077500000000000000000000000001441451546200153755ustar00rootroot00000000000000podman-compose-1.0.6/docs/Mappings.md000066400000000000000000000011141441451546200174720ustar00rootroot00000000000000# Overview * `1podfw` - create all containers in one pod (inter-container communication is done via `localhost`), doing port mapping in that pod * `1pod` - create all containers in one pod, doing port mapping in each container (does not work) * `identity` - no mapping * `hostnet` - use host network, and inter-container communication is done via host gateway and published ports * `cntnet` - create a container and use it via `--network container:name` (inter-container communication via `localhost`) * `publishall` - publish all ports to host (using `-P`) and communicate via gateway podman-compose-1.0.6/examples/000077500000000000000000000000001441451546200162635ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/000077500000000000000000000000001441451546200172325ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/README.md000066400000000000000000000023321441451546200205110ustar00rootroot00000000000000# AWX Compose the directory roles is taken from [here](https://github.com/ansible/awx/tree/17.1.0/installer/roles/local_docker) also look at https://github.com/ansible/awx/tree/17.1.0/tools/docker-compose ``` mkdir deploy awx17 ansible localhost \ -e host_port=8080 \ -e awx_secret_key='awx,secret.123' \ -e secret_key='awx,secret.123' \ -e admin_user='admin' \ -e admin_password='admin' \ -e pg_password='awx,123.' \ -e pg_username='awx' \ -e pg_database='awx' \ -e pg_port='5432' \ -e redis_image="docker.io/library/redis:6-alpine" \ -e postgres_data_dir="./data/pg" \ -e compose_start_containers=false \ -e dockerhub_base='docker.io/ansible' \ -e awx_image='docker.io/ansible/awx' \ -e awx_version='17.1.0' \ -e dockerhub_version='17.1.0' \ -e docker_deploy_base_path=$PWD/deploy \ -e docker_compose_dir=$PWD/awx17 \ -e awx_task_hostname=awx \ -e awx_web_hostname=awxweb \ -m include_role -a name=local_docker cp awx17/docker-compose.yml awx17/docker-compose.yml.orig sed -i -re "s#- \"$PWD/awx17/(.*):/#- \"./\1:/#" awx17/docker-compose.yml cd awx17 podman-compose run --rm --service-ports task awx-manage migrate --no-input podman-compose up -d ``` podman-compose-1.0.6/examples/awx17/roles/000077500000000000000000000000001441451546200203565ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/roles/local_docker/000077500000000000000000000000001441451546200227775ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/roles/local_docker/defaults/000077500000000000000000000000001441451546200246065ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/roles/local_docker/defaults/main.yml000066400000000000000000000003711441451546200262560ustar00rootroot00000000000000--- dockerhub_version: "{{ lookup('file', playbook_dir + '/../VERSION') }}" awx_image: "awx" redis_image: "redis" postgresql_version: "12" postgresql_image: "postgres:{{postgresql_version}}" compose_start_containers: true upgrade_postgres: false podman-compose-1.0.6/examples/awx17/roles/local_docker/tasks/000077500000000000000000000000001441451546200241245ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/roles/local_docker/tasks/compose.yml000066400000000000000000000042751441451546200263240ustar00rootroot00000000000000--- - name: Create {{ docker_compose_dir }} directory file: path: "{{ docker_compose_dir }}" state: directory - name: Create Redis socket directory file: path: "{{ docker_compose_dir }}/redis_socket" state: directory mode: 0777 - name: Create Docker Compose Configuration template: src: "{{ item.file }}.j2" dest: "{{ docker_compose_dir }}/{{ item.file }}" mode: "{{ item.mode }}" loop: - file: environment.sh mode: "0600" - file: credentials.py mode: "0600" - file: docker-compose.yml mode: "0600" - file: nginx.conf mode: "0600" - file: redis.conf mode: "0664" register: awx_compose_config - name: Render SECRET_KEY file copy: content: "{{ secret_key }}" dest: "{{ docker_compose_dir }}/SECRET_KEY" mode: 0600 register: awx_secret_key - block: - name: Remove AWX containers before migrating postgres so that the old postgres container does not get used docker_compose: project_src: "{{ docker_compose_dir }}" state: absent ignore_errors: true - name: Run migrations in task container shell: docker-compose run --rm --service-ports task awx-manage migrate --no-input args: chdir: "{{ docker_compose_dir }}" - name: Start the containers docker_compose: project_src: "{{ docker_compose_dir }}" restarted: "{{ awx_compose_config is changed or awx_secret_key is changed }}" register: awx_compose_start - name: Update CA trust in awx_web container command: docker exec awx_web '/usr/bin/update-ca-trust' when: awx_compose_config.changed or awx_compose_start.changed - name: Update CA trust in awx_task container command: docker exec awx_task '/usr/bin/update-ca-trust' when: awx_compose_config.changed or awx_compose_start.changed - name: Wait for launch script to create user wait_for: timeout: 10 delegate_to: localhost - name: Create Preload data command: docker exec awx_task bash -c "/usr/bin/awx-manage create_preload_data" when: create_preload_data|bool register: cdo changed_when: "'added' in cdo.stdout" when: compose_start_containers|bool podman-compose-1.0.6/examples/awx17/roles/local_docker/tasks/main.yml000066400000000000000000000006161441451546200255760ustar00rootroot00000000000000--- - name: Generate broadcast websocket secret set_fact: broadcast_websocket_secret: "{{ lookup('password', '/dev/null length=128') }}" run_once: true no_log: true when: broadcast_websocket_secret is not defined - import_tasks: upgrade_postgres.yml when: - postgres_data_dir is defined - pg_hostname is not defined - import_tasks: set_image.yml - import_tasks: compose.yml podman-compose-1.0.6/examples/awx17/roles/local_docker/tasks/set_image.yml000066400000000000000000000034741441451546200266140ustar00rootroot00000000000000--- - name: Manage AWX Container Images block: - name: Export Docker awx image if it isnt local and there isnt a registry defined docker_image: name: "{{ awx_image }}" tag: "{{ awx_version }}" archive_path: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar" when: inventory_hostname != "localhost" and docker_registry is not defined delegate_to: localhost - name: Set docker base path set_fact: docker_deploy_base_path: "{{ awx_base_path|default('/tmp') }}/docker_deploy" when: ansible_connection != "local" and docker_registry is not defined - name: Ensure directory exists file: path: "{{ docker_deploy_base_path }}" state: directory when: ansible_connection != "local" and docker_registry is not defined - name: Copy awx image to docker execution copy: src: "{{ awx_local_base_config_path|default('/tmp') }}/{{ awx_image }}_{{ awx_version }}.tar" dest: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar" when: ansible_connection != "local" and docker_registry is not defined - name: Load awx image docker_image: name: "{{ awx_image }}" tag: "{{ awx_version }}" load_path: "{{ docker_deploy_base_path }}/{{ awx_image }}_{{ awx_version }}.tar" timeout: 300 when: ansible_connection != "local" and docker_registry is not defined - name: Set full image path for local install set_fact: awx_docker_actual_image: "{{ awx_image }}:{{ awx_version }}" when: docker_registry is not defined when: dockerhub_base is not defined - name: Set DockerHub Image Paths set_fact: awx_docker_actual_image: "{{ dockerhub_base }}/awx:{{ dockerhub_version }}" when: dockerhub_base is defined podman-compose-1.0.6/examples/awx17/roles/local_docker/tasks/upgrade_postgres.yml000066400000000000000000000040741441451546200302310ustar00rootroot00000000000000--- - name: Create {{ postgres_data_dir }} directory file: path: "{{ postgres_data_dir }}" state: directory - name: Get full path of postgres data dir shell: "echo {{ postgres_data_dir }}" register: fq_postgres_data_dir - name: Register temporary docker container set_fact: container_command: "docker run --rm -v '{{ fq_postgres_data_dir.stdout }}:/var/lib/postgresql' centos:8 bash -c " - name: Check for existing Postgres data (run from inside the container for access to file) shell: cmd: | {{ container_command }} "[[ -f /var/lib/postgresql/10/data/PG_VERSION ]] && echo 'exists'" register: pg_version_file ignore_errors: true - name: Record Postgres version shell: | {{ container_command }} "cat /var/lib/postgresql/10/data/PG_VERSION" register: old_pg_version when: pg_version_file is defined and pg_version_file.stdout == 'exists' - name: Determine whether to upgrade postgres set_fact: upgrade_postgres: "{{ old_pg_version.stdout == '10' }}" when: old_pg_version.changed - name: Set up new postgres paths pre-upgrade shell: | {{ container_command }} "mkdir -p /var/lib/postgresql/12/data/" when: upgrade_postgres | bool - name: Stop AWX before upgrading postgres docker_compose: project_src: "{{ docker_compose_dir }}" stopped: true when: upgrade_postgres | bool - name: Upgrade Postgres shell: | docker run --rm \ -v {{ postgres_data_dir }}/10/data:/var/lib/postgresql/10/data \ -v {{ postgres_data_dir }}/12/data:/var/lib/postgresql/12/data \ -e PGUSER={{ pg_username }} -e POSTGRES_INITDB_ARGS="-U {{ pg_username }}" \ tianon/postgres-upgrade:10-to-12 --username={{ pg_username }} when: upgrade_postgres | bool - name: Copy old pg_hba.conf shell: | {{ container_command }} "cp /var/lib/postgresql/10/data/pg_hba.conf /var/lib/postgresql/12/data/pg_hba.conf" when: upgrade_postgres | bool - name: Remove old data directory shell: | {{ container_command }} "rm -rf /var/lib/postgresql/10/data" when: - upgrade_postgres | bool - compose_start_containers|bool podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/000077500000000000000000000000001441451546200247755ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/credentials.py.j2000066400000000000000000000006271441451546200301630ustar00rootroot00000000000000DATABASES = { 'default': { 'ATOMIC_REQUESTS': True, 'ENGINE': 'django.db.backends.postgresql', 'NAME': "{{ pg_database }}", 'USER': "{{ pg_username }}", 'PASSWORD': "{{ pg_password }}", 'HOST': "{{ pg_hostname | default('postgres') }}", 'PORT': "{{ pg_port }}", } } BROADCAST_WEBSOCKET_SECRET = "{{ broadcast_websocket_secret | b64encode }}" podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/docker-compose.yml.j2000066400000000000000000000174541441451546200307570ustar00rootroot00000000000000#jinja2: lstrip_blocks: True version: '2' services: web: image: {{ awx_docker_actual_image }} container_name: awx_web depends_on: - redis {% if pg_hostname is not defined %} - postgres {% endif %} {% if (host_port is defined) or (host_port_ssl is defined) %} ports: {% if (host_port_ssl is defined) and (ssl_certificate is defined) %} - "{{ host_port_ssl }}:8053" {% endif %} {% if host_port is defined %} - "{{ host_port }}:8052" {% endif %} {% endif %} hostname: {{ awx_web_hostname }} user: root restart: unless-stopped {% if (awx_web_container_labels is defined) and (',' in awx_web_container_labels) %} {% set awx_web_container_labels_list = awx_web_container_labels.split(',') %} labels: {% for awx_web_container_label in awx_web_container_labels_list %} - {{ awx_web_container_label }} {% endfor %} {% elif awx_web_container_labels is defined %} labels: - {{ awx_web_container_labels }} {% endif %} volumes: - supervisor-socket:/var/run/supervisor - rsyslog-socket:/var/run/awx-rsyslog/ - rsyslog-config:/var/lib/awx/rsyslog/ - "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY" - "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh" - "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py" - "{{ docker_compose_dir }}/nginx.conf:/etc/nginx/nginx.conf:ro" - "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw" {% if project_data_dir is defined %} - "{{ project_data_dir +':/var/lib/awx/projects:rw' }}" {% endif %} {% if custom_venv_dir is defined %} - "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}" {% endif %} {% if ca_trust_dir is defined %} - "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}" {% endif %} {% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %} - "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}" - "{{ ssl_certificate_key +':/etc/nginx/awxweb_key.pem:ro' }}" {% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %} - "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}" {% endif %} {% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %} {% set awx_container_search_domains_list = awx_container_search_domains.split(',') %} dns_search: {% for awx_container_search_domain in awx_container_search_domains_list %} - {{ awx_container_search_domain }} {% endfor %} {% elif awx_container_search_domains is defined %} dns_search: "{{ awx_container_search_domains }}" {% endif %} {% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %} {% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %} dns: {% for awx_alternate_dns_server in awx_alternate_dns_servers_list %} - {{ awx_alternate_dns_server }} {% endfor %} {% elif awx_alternate_dns_servers is defined %} dns: "{{ awx_alternate_dns_servers }}" {% endif %} {% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %} {% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %} extra_hosts: {% for docker_compose_extra_host in docker_compose_extra_hosts_list %} - "{{ docker_compose_extra_host }}" {% endfor %} {% endif %} environment: http_proxy: {{ http_proxy | default('') }} https_proxy: {{ https_proxy | default('') }} no_proxy: {{ no_proxy | default('') }} {% if docker_logger is defined %} logging: driver: {{ docker_logger }} {% endif %} task: image: {{ awx_docker_actual_image }} container_name: awx_task depends_on: - redis - web {% if pg_hostname is not defined %} - postgres {% endif %} command: /usr/bin/launch_awx_task.sh hostname: {{ awx_task_hostname }} user: root restart: unless-stopped volumes: - supervisor-socket:/var/run/supervisor - rsyslog-socket:/var/run/awx-rsyslog/ - rsyslog-config:/var/lib/awx/rsyslog/ - "{{ docker_compose_dir }}/SECRET_KEY:/etc/tower/SECRET_KEY" - "{{ docker_compose_dir }}/environment.sh:/etc/tower/conf.d/environment.sh" - "{{ docker_compose_dir }}/credentials.py:/etc/tower/conf.d/credentials.py" - "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw" {% if project_data_dir is defined %} - "{{ project_data_dir +':/var/lib/awx/projects:rw' }}" {% endif %} {% if custom_venv_dir is defined %} - "{{ custom_venv_dir +':'+ custom_venv_dir +':rw' }}" {% endif %} {% if ca_trust_dir is defined %} - "{{ ca_trust_dir +':/etc/pki/ca-trust/source/anchors:ro' }}" {% endif %} {% if ssl_certificate is defined %} - "{{ ssl_certificate +':/etc/nginx/awxweb.pem:ro' }}" {% endif %} {% if (awx_container_search_domains is defined) and (',' in awx_container_search_domains) %} {% set awx_container_search_domains_list = awx_container_search_domains.split(',') %} dns_search: {% for awx_container_search_domain in awx_container_search_domains_list %} - {{ awx_container_search_domain }} {% endfor %} {% elif awx_container_search_domains is defined %} dns_search: "{{ awx_container_search_domains }}" {% endif %} {% if (awx_alternate_dns_servers is defined) and (',' in awx_alternate_dns_servers) %} {% set awx_alternate_dns_servers_list = awx_alternate_dns_servers.split(',') %} dns: {% for awx_alternate_dns_server in awx_alternate_dns_servers_list %} - {{ awx_alternate_dns_server }} {% endfor %} {% elif awx_alternate_dns_servers is defined %} dns: "{{ awx_alternate_dns_servers }}" {% endif %} {% if (docker_compose_extra_hosts is defined) and (':' in docker_compose_extra_hosts) %} {% set docker_compose_extra_hosts_list = docker_compose_extra_hosts.split(',') %} extra_hosts: {% for docker_compose_extra_host in docker_compose_extra_hosts_list %} - "{{ docker_compose_extra_host }}" {% endfor %} {% endif %} environment: AWX_SKIP_MIGRATIONS: "1" http_proxy: {{ http_proxy | default('') }} https_proxy: {{ https_proxy | default('') }} no_proxy: {{ no_proxy | default('') }} SUPERVISOR_WEB_CONFIG_PATH: '/etc/supervisord.conf' redis: image: {{ redis_image }} container_name: awx_redis restart: unless-stopped environment: http_proxy: {{ http_proxy | default('') }} https_proxy: {{ https_proxy | default('') }} no_proxy: {{ no_proxy | default('') }} command: ["/usr/local/etc/redis/redis.conf"] volumes: - "{{ docker_compose_dir }}/redis.conf:/usr/local/etc/redis/redis.conf:ro" - "{{ docker_compose_dir }}/redis_socket:/var/run/redis/:rw" {% if docker_logger is defined %} logging: driver: {{ docker_logger }} {% endif %} {% if pg_hostname is not defined %} postgres: image: {{ postgresql_image }} container_name: awx_postgres restart: unless-stopped volumes: - "{{ postgres_data_dir }}/12/data/:/var/lib/postgresql/data:Z" environment: POSTGRES_USER: {{ pg_username }} POSTGRES_PASSWORD: {{ pg_password }} POSTGRES_DB: {{ pg_database }} http_proxy: {{ http_proxy | default('') }} https_proxy: {{ https_proxy | default('') }} no_proxy: {{ no_proxy | default('') }} {% if docker_logger is defined %} logging: driver: {{ docker_logger }} {% endif %} {% endif %} {% if docker_compose_subnet is defined %} networks: default: driver: bridge ipam: driver: default config: - subnet: {{ docker_compose_subnet }} {% endif %} volumes: supervisor-socket: rsyslog-socket: rsyslog-config: podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/environment.sh.j2000066400000000000000000000006611441451546200302120ustar00rootroot00000000000000DATABASE_USER={{ pg_username|quote }} DATABASE_NAME={{ pg_database|quote }} DATABASE_HOST={{ pg_hostname|default('postgres')|quote }} DATABASE_PORT={{ pg_port|default('5432')|quote }} DATABASE_PASSWORD={{ pg_password|default('awxpass')|quote }} {% if pg_admin_password is defined %} DATABASE_ADMIN_PASSWORD={{ pg_admin_password|quote }} {% endif %} AWX_ADMIN_USER={{ admin_user|quote }} AWX_ADMIN_PASSWORD={{ admin_password|quote }} podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/nginx.conf.j2000066400000000000000000000072731441451546200273120ustar00rootroot00000000000000#user awx; worker_processes 1; pid /tmp/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; server_tokens off; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /dev/stdout main; map $http_upgrade $connection_upgrade { default upgrade; '' close; } sendfile on; #tcp_nopush on; #gzip on; upstream uwsgi { server 127.0.0.1:8050; } upstream daphne { server 127.0.0.1:8051; } {% if ssl_certificate is defined %} server { listen 8052 default_server; server_name _; # Redirect all HTTP links to the matching HTTPS page return 301 https://$host$request_uri; } {%endif %} server { {% if (ssl_certificate is defined) and (ssl_certificate_key is defined) %} listen 8053 ssl; ssl_certificate /etc/nginx/awxweb.pem; ssl_certificate_key /etc/nginx/awxweb_key.pem; {% elif (ssl_certificate is defined) and (ssl_certificate_key is not defined) %} listen 8053 ssl; ssl_certificate /etc/nginx/awxweb.pem; ssl_certificate_key /etc/nginx/awxweb.pem; {% else %} listen 8052 default_server; {% endif %} # If you have a domain name, this is where to add it server_name _; keepalive_timeout 65; # HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months) add_header Strict-Transport-Security max-age=15768000; # Protect against click-jacking https://www.owasp.org/index.php/Testing_for_Clickjacking_(OTG-CLIENT-009) add_header X-Frame-Options "DENY"; location /nginx_status { stub_status on; access_log off; allow 127.0.0.1; deny all; } location /static/ { alias /var/lib/awx/public/static/; } location /favicon.ico { alias /var/lib/awx/public/static/favicon.ico; } location /websocket { # Pass request to the upstream alias proxy_pass http://daphne; # Require http version 1.1 to allow for upgrade requests proxy_http_version 1.1; # We want proxy_buffering off for proxying to websockets. proxy_buffering off; # http://en.wikipedia.org/wiki/X-Forwarded-For proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # enable this if you use HTTPS: proxy_set_header X-Forwarded-Proto https; # pass the Host: header from the client for the sake of redirects proxy_set_header Host $http_host; # We've set the Host header, so we don't need Nginx to muddle # about with redirects proxy_redirect off; # Depending on the request value, set the Upgrade and # connection headers proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; } location / { # Add trailing / if missing rewrite ^(.*)$http_host(.*[^/])$ $1$http_host$2/ permanent; uwsgi_read_timeout 120s; uwsgi_pass uwsgi; include /etc/nginx/uwsgi_params; {%- if extra_nginx_include is defined %} include {{ extra_nginx_include }}; {%- endif %} proxy_set_header X-Forwarded-Port 443; uwsgi_param HTTP_X_FORWARDED_PORT 443; } } } podman-compose-1.0.6/examples/awx17/roles/local_docker/templates/redis.conf.j2000066400000000000000000000001161441451546200272620ustar00rootroot00000000000000unixsocket /var/run/redis/redis.sock unixsocketperm 660 port 0 bind 127.0.0.1 podman-compose-1.0.6/examples/awx3/000077500000000000000000000000001441451546200171455ustar00rootroot00000000000000podman-compose-1.0.6/examples/awx3/docker-compose.yml000066400000000000000000000027131441451546200226050ustar00rootroot00000000000000version: '3' services: postgres: image: "postgres:9.6" environment: POSTGRES_USER: awx POSTGRES_PASSWORD: awxpass POSTGRES_DB: awx rabbitmq: image: "rabbitmq:3" environment: RABBITMQ_DEFAULT_VHOST: awx memcached: image: "memcached:alpine" awx_web: # image: "geerlingguy/awx_web:latest" image: "ansible/awx_web:3.0.1" links: - rabbitmq - memcached - postgres ports: - "8080:8052" hostname: awxweb user: root environment: SECRET_KEY: aabbcc DATABASE_USER: awx DATABASE_PASSWORD: awxpass DATABASE_NAME: awx DATABASE_PORT: 5432 DATABASE_HOST: postgres RABBITMQ_USER: guest RABBITMQ_PASSWORD: guest RABBITMQ_HOST: rabbitmq RABBITMQ_PORT: 5672 RABBITMQ_VHOST: awx MEMCACHED_HOST: memcached MEMCACHED_PORT: 11211 awx_task: # image: "geerlingguy/awx_task:latest" image: "ansible/awx_task:3.0.1" links: - rabbitmq - memcached - awx_web:awxweb - postgres hostname: awx user: root environment: SECRET_KEY: aabbcc DATABASE_USER: awx DATABASE_PASSWORD: awxpass DATABASE_NAME: awx DATABASE_PORT: 5432 DATABASE_HOST: postgres RABBITMQ_USER: guest RABBITMQ_PASSWORD: guest RABBITMQ_HOST: rabbitmq RABBITMQ_PORT: 5672 RABBITMQ_VHOST: awx MEMCACHED_HOST: memcached MEMCACHED_PORT: 11211 podman-compose-1.0.6/examples/azure-vote/000077500000000000000000000000001441451546200203645ustar00rootroot00000000000000podman-compose-1.0.6/examples/azure-vote/README.md000066400000000000000000000005261441451546200216460ustar00rootroot00000000000000# Azure Vote Example This example have two containers: * backend: `redis` used as storage * frontend: having supervisord, nginx, uwsgi/python ``` echo "HOST_PORT=8080" > .env podman-compose up ``` after typing the commands above open your browser on the host port you picked above like [http://localhost:8080/](http://localhost:8080/) podman-compose-1.0.6/examples/azure-vote/docker-compose.yaml000066400000000000000000000007041441451546200241630ustar00rootroot00000000000000--- # from https://github.com/Azure-Samples/azure-voting-app-redis/blob/master/docker-compose.yaml version: '3' services: azure-vote-back: image: mcr.microsoft.com/oss/bitnami/redis:6.0.8 container_name: azure-vote-back environment: ALLOW_EMPTY_PASSWORD: "yes" azure-vote-front: image: mcr.microsoft.com/azuredocs/azure-vote-front:v1 environment: REDIS: azure-vote-back ports: - "${HOST_PORT:-8080}:80" podman-compose-1.0.6/examples/busybox/000077500000000000000000000000001441451546200177565ustar00rootroot00000000000000podman-compose-1.0.6/examples/busybox/docker-compose.yaml000066400000000000000000000013111441451546200235500ustar00rootroot00000000000000version: "2" services: redis: image: redis:alpine ports: - "6379" environment: - SECRET_KEY=aabbcc - ENV_IS_SET frontend: image: busybox #entrypoint: [] command: ["/bin/busybox", "httpd", "-f", "-p", "8080"] working_dir: / environment: SECRET_KEY2: aabbcc ENV_IS_SET2: ports: - "8080" links: - redis:myredis labels: my.label: my_value #tmpfs: /run #tmpfs: # - /run # - /tmp #user: postgresql #working_dir: /code #domainname: foo.com #hostname: foo #ipc: host #mac_address: 02:42:ac:11:65:43 #privileged: true #read_only: true #shm_size: 64M #stdin_open: true #tty: true podman-compose-1.0.6/examples/echo/000077500000000000000000000000001441451546200172015ustar00rootroot00000000000000podman-compose-1.0.6/examples/echo/README.md000066400000000000000000000007571441451546200204710ustar00rootroot00000000000000# Echo Service example ``` podman-compose up ``` Test the service with `curl like this` ``` $ curl -X POST -d "foobar" http://localhost:8080/; echo CLIENT VALUES: client_address=10.89.31.2 command=POST real path=/ query=nil request_version=1.1 request_uri=http://localhost:8080/ SERVER VALUES: server_version=nginx: 1.10.0 - lua: 10001 HEADERS RECEIVED: accept=*/* content-length=6 content-type=application/x-www-form-urlencoded host=localhost:8080 user-agent=curl/7.76.1 BODY: foobar ``` podman-compose-1.0.6/examples/echo/docker-compose.yaml000066400000000000000000000001671441451546200230030ustar00rootroot00000000000000--- version: '3' services: web: image: k8s.gcr.io/echoserver:1.4 ports: - "${HOST_PORT:-8080}:8080" podman-compose-1.0.6/examples/hello-app-redis/000077500000000000000000000000001441451546200212505ustar00rootroot00000000000000podman-compose-1.0.6/examples/hello-app-redis/README.md000066400000000000000000000005741441451546200225350ustar00rootroot00000000000000# GCR Hello App Redis A 6-node redis cluster using [Bitnami](https://github.com/bitnami/bitnami-docker-redis-cluster) with a [simple hit counter](https://github.com/GoogleCloudPlatform/kubernetes-engine-samples/tree/main/hello-app-redis) that persists on that redis cluster ``` podman-compose up ``` then open your browser on [http://localhost:8080/](http://localhost:8080/) podman-compose-1.0.6/examples/hello-app-redis/docker-compose.yaml000066400000000000000000000037531441451546200250560ustar00rootroot00000000000000--- version: '3' volumes: redis-node1-data: redis-node2-data: redis-node3-data: redis-node4-data: redis-node5-data: redis-data: services: web: image: gcr.io/google-samples/hello-app-redis:1.0 depends_on: - redis-cluster ports: - "${HOST_PORT:-8080}:8080" redis-node1: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-node1-data:/bitnami/redis/data environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster redis-node2: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-node2-data:/bitnami/redis/data environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster redis-node3: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-node3-data:/bitnami/redis/data environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster redis-node4: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-node4-data:/bitnami/redis/data environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster redis-node5: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-node5-data:/bitnami/redis/data environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster redis-cluster: image: docker.io/bitnami/redis-cluster:6.2 volumes: - redis-data:/bitnami/redis/data depends_on: - redis-node1 - redis-node2 - redis-node3 - redis-node4 - redis-node5 environment: - ALLOW_EMPTY_PASSWORD=yes - REDIS_NODES=redis-node1 redis-node2 redis-node3 redis-node4 redis-node5 redis-cluster - REDIS_CLUSTER_CREATOR=yes podman-compose-1.0.6/examples/hello-app/000077500000000000000000000000001441451546200201445ustar00rootroot00000000000000podman-compose-1.0.6/examples/hello-app/README.md000066400000000000000000000002221441451546200214170ustar00rootroot00000000000000# GCR Hello App A small ~2MB image, type ``` podman-compose up ``` then open your browser on [http://localhost:8080/](http://localhost:8080/) podman-compose-1.0.6/examples/hello-app/docker-compose.yaml000066400000000000000000000002011441451546200237330ustar00rootroot00000000000000--- version: '3' services: web: image: gcr.io/google-samples/hello-app:1.0 ports: - "${HOST_PORT:-8080}:8080" podman-compose-1.0.6/examples/hello-python/000077500000000000000000000000001441451546200207055ustar00rootroot00000000000000podman-compose-1.0.6/examples/hello-python/App/000077500000000000000000000000001441451546200214255ustar00rootroot00000000000000podman-compose-1.0.6/examples/hello-python/App/__init__.py000066400000000000000000000000001441451546200235240ustar00rootroot00000000000000podman-compose-1.0.6/examples/hello-python/App/web.py000066400000000000000000000014101441451546200225500ustar00rootroot00000000000000import os import asyncio import aioredis from aiohttp import web REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") REDIS_PORT = int(os.environ.get("REDIS_PORT", "6379")) REDIS_DB = int(os.environ.get("REDIS_DB", "0")) redis = aioredis.from_url(f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}") app = web.Application() routes = web.RouteTableDef() @routes.get("/") async def hello(request): counter = await redis.incr("mycounter") return web.Response(text=f"counter={counter}") @routes.get("/hello.json") async def hello_json(request): counter = await redis.incr("mycounter") data = {"counter": counter} return web.json_response(data) app.add_routes(routes) def main(): web.run_app(app, port=8080) if __name__ == "__main__": main() podman-compose-1.0.6/examples/hello-python/Dockerfile000066400000000000000000000002641441451546200227010ustar00rootroot00000000000000FROM python:3.9-alpine WORKDIR /usr/src/app COPY requirements.txt ./ RUN pip install --no-cache-dir -r requirements.txt COPY . . CMD [ "python", "-m", "App.web" ] EXPOSE 8080 podman-compose-1.0.6/examples/hello-python/README.md000066400000000000000000000001721441451546200221640ustar00rootroot00000000000000# Simple Python Demo ## A Redis counter ``` podman-compose up -d curl localhost:8080/ curl localhost:8080/hello.json ``` podman-compose-1.0.6/examples/hello-python/docker-compose.yaml000066400000000000000000000005711441451546200245060ustar00rootroot00000000000000--- version: '3' volumes: redis: services: redis: read_only: true image: docker.io/redis:alpine command: ["redis-server", "--appendonly", "yes", "--notify-keyspace-events", "Ex"] volumes: - redis:/data web: read_only: true build: context: . image: hello-py-aioweb ports: - 8080:8080 environment: REDIS_HOST: redis podman-compose-1.0.6/examples/hello-python/requirements.txt000066400000000000000000000000451441451546200241700ustar00rootroot00000000000000aiohttp aioredis # aioredis[hiredis] podman-compose-1.0.6/examples/nodeproj/000077500000000000000000000000001441451546200201035ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/.eslintrc.json000066400000000000000000000030621441451546200227000ustar00rootroot00000000000000{ "env": { "node": true, "es6": true }, "settings": { "import/resolver": { "node": { "extensions": [".js", ".mjs", ".ts", ".cjs"] } } }, "parser": "@typescript-eslint/parser", "parserOptions": { "ecmaVersion": 2020, "sourceType": "module", "allowImportExportEverywhere": true }, "extends": [ "eslint:recommended", "plugin:import/errors", "plugin:import/warnings", "plugin:import/typescript", "plugin:promise/recommended", "google", "plugin:security/recommended" ], "plugins": ["promise", "security", "import"], "overrides": [ { "files": "public/**/*.min.js", "env": { "browser": true, "node": false, "es6": false }, "parserOptions": { "sourceType": "script" }, "extends": ["plugin:compat/recommended"], "plugins": [], "rules": { "no-var": ["off"] } } ], "rules": { "security/detect-non-literal-fs-filename":["off"], "security/detect-object-injection":["off"], "camelcase": ["off"], "no-console": ["off"], "require-jsdoc": ["off"], "one-var": ["off"], "guard-for-in": ["off"], "max-len": [ "warn", { "ignoreComments": true, "ignoreTrailingComments": true, "ignoreUrls": true, "code": 200 } ], "indent": ["warn", 4], "no-unused-vars": ["warn"], "no-extra-semi": ["warn"], "linebreak-style": ["error", "unix"], "quotes": ["warn", "double"], "semi": ["error", "always"] } } podman-compose-1.0.6/examples/nodeproj/.gitignore000066400000000000000000000000431441451546200220700ustar00rootroot00000000000000local.env .env *.pid node_modules podman-compose-1.0.6/examples/nodeproj/.home/000077500000000000000000000000001441451546200211115ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/.home/.gitignore000066400000000000000000000000021441451546200230710ustar00rootroot00000000000000* podman-compose-1.0.6/examples/nodeproj/README.md000066400000000000000000000003321441451546200213600ustar00rootroot00000000000000# How to run example ``` cp example.local.env local.env cp example.env .env cat local.env cat .env echo "UID=$UID" >> .env cat .env podman-compose build podman-compose run --rm --no-deps init podman-compose up ``` podman-compose-1.0.6/examples/nodeproj/containers/000077500000000000000000000000001441451546200222505ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/containers/node16-runtime/000077500000000000000000000000001441451546200250255ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/containers/node16-runtime/Dockerfile000066400000000000000000000010641441451546200270200ustar00rootroot00000000000000FROM registry.fedoraproject.org/fedora-minimal:35 ARG NODE_VER=16 # microdnf -y module enable nodejs:${NODE_VER} RUN \ echo -e "[nodejs]\nname=nodejs\nstream=${NODE_VER}\nprofiles=\nstate=enabled\n" > /etc/dnf/modules.d/nodejs.module && \ microdnf -y install shadow-utils nodejs zopfli findutils busybox && \ microdnf clean all RUN adduser -d /app app && mkdir -p /app/code/.home && chown app:app -R /app/code && chmod 711 /app /app/code/.home && usermod -d /app/code/.home app ENV XDG_CONFIG_HOME=/app/code/.home ENV HOME=/app/code/.home WORKDIR /app/code podman-compose-1.0.6/examples/nodeproj/docker-compose.yml000066400000000000000000000017441441451546200235460ustar00rootroot00000000000000version: '3' volumes: redis: services: redis: read_only: true image: docker.io/redis:alpine command: ["redis-server", "--appendonly", "yes", "--notify-keyspace-events", "Ex"] volumes: - redis:/data tmpfs: - /tmp - /var/run - /run init: read_only: true #userns_mode: keep-id user: ${UID:-1000} build: context: ./containers/${NODE_IMG:-node16-runtime} image: ${NODE_IMG:-node16-runtime} env_file: - local.env volumes: - .:/app/code command: ["/bin/sh", "-c", "mkdir -p ~/; [ -d ./node_modules ] && echo '** node_modules exists' || npm install"] tmpfs: - /tmp - /run task: extends: service: init command: ["npm", "run", "cli", "--", "task"] links: - redis depends_on: - redis web: extends: service: init command: ["npm", "run", "cli", "--", "web"] ports: - ${WEB_LISTEN_PORT:-3000}:3000 depends_on: - redis links: - mongo podman-compose-1.0.6/examples/nodeproj/example.env000066400000000000000000000000601441451546200222440ustar00rootroot00000000000000WEB_LISTEN_PORT=3000 # pass UID= your IDE user podman-compose-1.0.6/examples/nodeproj/example.local.env000066400000000000000000000000221441451546200233330ustar00rootroot00000000000000REDIS_HOST=redis podman-compose-1.0.6/examples/nodeproj/index.js000066400000000000000000000001131441451546200215430ustar00rootroot00000000000000#! /usr/bin/env node "use strict"; import {start} from "./lib"; start(); podman-compose-1.0.6/examples/nodeproj/jsconfig.json000066400000000000000000000004021441451546200225740ustar00rootroot00000000000000{ "compilerOptions": { "target": "es2020", "module": "es2020", "moduleResolution": "node", "allowSyntheticDefaultImports": true }, "files": [ "index.js" ], "include": [ "lib/**/*.js" ] }podman-compose-1.0.6/examples/nodeproj/lib/000077500000000000000000000000001441451546200206515ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/lib/commands/000077500000000000000000000000001441451546200224525ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/lib/commands/task.js000066400000000000000000000013061441451546200237520ustar00rootroot00000000000000"use strict"; import {proj} from "../proj"; async function loop() { const poped = await proj.predis.blpop("queue", 5); const task_desc_s = poped[1]; let task_desc; try { task_desc = JSON.parse(task_desc_s); } catch (e) { console.exception(e); } console.info("got task "+task_desc.func); const func = task_desc.func; const args = task_desc.args; if (typeof(proj.tasks[func])!="function") { console.log(`task ${func} not found`); process.exit(-1) } try { await ((this.tasks[func])(...args)); } catch (e) { console.exception(e); } } export async function start() { while(true) { loop(); } } podman-compose-1.0.6/examples/nodeproj/lib/commands/web.js000066400000000000000000000010261441451546200235640ustar00rootroot00000000000000"use strict"; import {proj} from "../proj"; import http from "http"; import express from "express"; export async function start() { const app = express(); const server = http.createServer(app); // Routing app.use(express.static(proj.config.basedir + "/public")); app.get("/healthz", function(req, res) { res.send("ok@"+Date.now()); }); server.listen(proj.config.LISTEN_PORT, proj.config.LISTEN_HOST, function() { console.warn(`listening at port ${proj.config.LISTEN_PORT}`); }); } podman-compose-1.0.6/examples/nodeproj/package.json000066400000000000000000000007561441451546200224010ustar00rootroot00000000000000{ "name": "nodeproj", "version": "0.0.1", "description": "nodejs example project", "exports": { ".": "./index.js", "./lib": "./lib" }, "main": "index.js", "type": "module", "scripts": { "cli": "nodemon -w lib -w index.js --es-module-specifier-resolution=node ./index.js" }, "dependencies": { "express": "~4.16.4", "redis": "^3.1.2" }, "private": true, "author": "", "license": "proprietary", "devDependencies": { "nodemon": "^2.0.14" } } podman-compose-1.0.6/examples/nodeproj/public/000077500000000000000000000000001441451546200213615ustar00rootroot00000000000000podman-compose-1.0.6/examples/nodeproj/public/index.html000066400000000000000000000006271441451546200233630ustar00rootroot00000000000000 Vote

This is a Heading

This is a paragraph.

podman-compose-1.0.6/examples/wordpress/000077500000000000000000000000001441451546200203135ustar00rootroot00000000000000podman-compose-1.0.6/examples/wordpress/docker-compose.yaml000066400000000000000000000011301441451546200241040ustar00rootroot00000000000000--- volumes: db_data: services: wordpress: image: docker.io/library/wordpress:latest ports: - 8080:80 environment: - WORDPRESS_DB_HOST=db - WORDPRESS_DB_USER=wordpress - WORDPRESS_DB_PASSWORD=password - WORDPRESS_DB_NAME=wordpress db: image: docker.io/library/mariadb:10.6.4-focal command: '--default-authentication-plugin=mysql_native_password' volumes: - db_data:/var/lib/mysql environment: - MYSQL_ROOT_PASSWORD=somewordpress - MYSQL_DATABASE=wordpress - MYSQL_USER=wordpress - MYSQL_PASSWORD=password podman-compose-1.0.6/podman_compose.py000077500000000000000000003060411441451546200200310ustar00rootroot00000000000000#! /usr/bin/python3 # -*- coding: utf-8 -*- # https://docs.docker.com/compose/compose-file/#service-configuration-reference # https://docs.docker.com/samples/ # https://docs.docker.com/compose/gettingstarted/ # https://docs.docker.com/compose/django/ # https://docs.docker.com/compose/wordpress/ # TODO: podman pod logs --color -n -f pod_testlogs import sys import os import getpass import argparse import itertools import subprocess import time import re import hashlib import random import json import glob from threading import Thread import shlex try: from shlex import quote as cmd_quote except ImportError: from pipes import quote as cmd_quote # import fnmatch # fnmatch.fnmatchcase(env, "*_HOST") import yaml from dotenv import dotenv_values __version__ = "1.0.6" script = os.path.realpath(sys.argv[0]) # helper functions is_str = lambda s: isinstance(s, str) is_dict = lambda d: isinstance(d, dict) is_list = lambda l: not is_str(l) and not is_dict(l) and hasattr(l, "__iter__") # identity filter filteri = lambda a: filter(lambda i: i, a) def try_int(i, fallback=None): try: return int(i) except ValueError: pass except TypeError: pass return fallback def try_float(i, fallback=None): try: return float(i) except ValueError: pass except TypeError: pass return fallback def log(*msgs, sep=" ", end="\n"): line = (sep.join([str(msg) for msg in msgs])) + end sys.stderr.write(line) sys.stderr.flush() dir_re = re.compile(r"^[~/\.]") propagation_re = re.compile( "^(?:z|Z|O|U|r?shared|r?slave|r?private|r?unbindable|r?bind|(?:no)?(?:exec|dev|suid))$" ) norm_re = re.compile("[^-_a-z0-9]") num_split_re = re.compile(r"(\d+|\D+)") PODMAN_CMDS = ( "pull", "push", "build", "inspect", "run", "start", "stop", "rm", "volume", ) t_re = re.compile(r"^(?:(\d+)[m:])?(?:(\d+(?:\.\d+)?)s?)?$") STOP_GRACE_PERIOD = "10" def str_to_seconds(txt): if not txt: return None if isinstance(txt, (int, float)): return txt match = t_re.match(txt.strip()) if not match: return None mins, sec = match[1], match[2] mins = int(mins) if mins else 0 sec = float(sec) if sec else 0 # "podman stop" takes only int # Error: invalid argument "3.0" for "-t, --time" flag: strconv.ParseUint: parsing "3.0": invalid syntax return int(mins * 60.0 + sec) def ver_as_list(a): return [try_int(i, i) for i in num_split_re.findall(a)] def strverscmp_lt(a, b): a_ls = ver_as_list(a or "") b_ls = ver_as_list(b or "") return a_ls < b_ls def parse_short_mount(mount_str, basedir): mount_a = mount_str.split(":") mount_opt_dict = {} mount_opt = None if len(mount_a) == 1: # Anonymous: Just specify a path and let the engine creates the volume # - /var/lib/mysql mount_src, mount_dst = None, mount_str elif len(mount_a) == 2: mount_src, mount_dst = mount_a # dest must start with / like /foo:/var/lib/mysql # otherwise it's option like /var/lib/mysql:rw if not mount_dst.startswith("/"): mount_dst, mount_opt = mount_a mount_src = None elif len(mount_a) == 3: mount_src, mount_dst, mount_opt = mount_a else: raise ValueError("could not parse mount " + mount_str) if mount_src and dir_re.match(mount_src): # Specify an absolute path mapping # - /opt/data:/var/lib/mysql # Path on the host, relative to the Compose file # - ./cache:/tmp/cache # User-relative path # - ~/configs:/etc/configs/:ro mount_type = "bind" mount_src = os.path.abspath( os.path.join(basedir, os.path.expanduser(mount_src)) ) else: # Named volume # - datavolume:/var/lib/mysql mount_type = "volume" mount_opts = filteri((mount_opt or "").split(",")) propagation_opts = [] for opt in mount_opts: if opt == "ro": mount_opt_dict["read_only"] = True elif opt == "rw": mount_opt_dict["read_only"] = False elif opt in ("consistent", "delegated", "cached"): mount_opt_dict["consistency"] = opt elif propagation_re.match(opt): propagation_opts.append(opt) else: # TODO: ignore raise ValueError("unknown mount option " + opt) mount_opt_dict["bind"] = {"propagation": ",".join(propagation_opts)} return { "type": mount_type, "source": mount_src, "target": mount_dst, **mount_opt_dict, } # NOTE: if a named volume is used but not defined it # gives ERROR: Named volume "abc" is used in service "xyz" # but no declaration was found in the volumes section. # unless it's anonymous-volume def fix_mount_dict(compose, mount_dict, proj_name, srv_name): """ in-place fix mount dictionary to: - define _vol to be the corresponding top-level volume - if name is missing it would be source prefixed with project - if no source it would be generated """ # if already applied nothing todo if "_vol" in mount_dict: return mount_dict if mount_dict["type"] == "volume": vols = compose.vols source = mount_dict.get("source", None) vol = (vols.get(source, None) or {}) if source else {} name = vol.get("name", None) mount_dict["_vol"] = vol # handle anonymouse or implied volume if not source: # missing source vol["name"] = "_".join( [ proj_name, srv_name, hashlib.sha256(mount_dict["target"].encode("utf-8")).hexdigest(), ] ) elif not name: external = vol.get("external", None) if isinstance(external, dict): vol["name"] = external.get("name", f"{source}") elif external: vol["name"] = f"{source}" else: vol["name"] = f"{proj_name}_{source}" return mount_dict # docker and docker-compose support subset of bash variable substitution # https://docs.docker.com/compose/compose-file/#variable-substitution # https://docs.docker.com/compose/env-file/ # https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html # $VARIABLE # ${VARIABLE} # ${VARIABLE:-default} default if not set or empty # ${VARIABLE-default} default if not set # ${VARIABLE:?err} raise error if not set or empty # ${VARIABLE?err} raise error if not set # $$ means $ var_re = re.compile( r""" \$(?: (?P\$) | (?P[_a-zA-Z][_a-zA-Z0-9]*) | (?:{ (?P[_a-zA-Z][_a-zA-Z0-9]*) (?:(?P:)?(?: (?:-(?P[^}]*)) | (?:\?(?P[^}]*)) ))? }) ) """, re.VERBOSE, ) def rec_subs(value, subs_dict): """ do bash-like substitution in value and if list of dictionary do that recursively """ if is_dict(value): value = {k: rec_subs(v, subs_dict) for k, v in value.items()} elif is_str(value): def convert(m): if m.group("escaped") is not None: return "$" name = m.group("named") or m.group("braced") value = subs_dict.get(name) if value == "" and m.group("empty"): value = None if value is not None: return str(value) if m.group("err") is not None: raise RuntimeError(m.group("err")) return m.group("default") or "" value = var_re.sub(convert, value) elif hasattr(value, "__iter__"): value = [rec_subs(i, subs_dict) for i in value] return value def norm_as_list(src): """ given a dictionary {key1:value1, key2: None} or list return a list of ["key1=value1", "key2"] """ if src is None: dst = [] elif is_dict(src): dst = [(f"{k}={v}" if v is not None else k) for k, v in src.items()] elif is_list(src): dst = list(src) else: dst = [src] return dst def norm_as_dict(src): """ given a list ["key1=value1", "key2"] return a dictionary {key1:value1, key2: None} """ if src is None: dst = {} elif is_dict(src): dst = dict(src) elif is_list(src): dst = [i.split("=", 1) for i in src if i] dst = [(a if len(a) == 2 else (a[0], None)) for a in dst] dst = dict(dst) elif is_str(src): key, value = src.split("=", 1) if "=" in src else (src, None) dst = {key: value} else: raise ValueError("dictionary or iterable is expected") return dst def norm_ulimit(inner_value): if is_dict(inner_value): if not inner_value.keys() & {"soft", "hard"}: raise ValueError("expected at least one soft or hard limit") soft = inner_value.get("soft", inner_value.get("hard", None)) hard = inner_value.get("hard", inner_value.get("soft", None)) return f"{soft}:{hard}" if is_list(inner_value): return norm_ulimit(norm_as_dict(inner_value)) # if int or string return as is return inner_value # def tr_identity(project_name, given_containers): # pod_name = f'pod_{project_name}' # pod = dict(name=pod_name) # containers = [] # for cnt in given_containers: # containers.append(dict(cnt, pod=pod_name)) # return [pod], containers def transform(args, project_name, given_containers): if not args.in_pod: pod_name = None pods = [] else: pod_name = f"pod_{project_name}" pod = {"name": pod_name} pods = [pod] containers = [] for cnt in given_containers: containers.append(dict(cnt, pod=pod_name)) return pods, containers def assert_volume(compose, mount_dict): """ inspect volume to get directory create volume if needed """ vol = mount_dict.get("_vol", None) if mount_dict["type"] == "bind": basedir = os.path.realpath(compose.dirname) mount_src = mount_dict["source"] mount_src = os.path.realpath( os.path.join(basedir, os.path.expanduser(mount_src)) ) if not os.path.exists(mount_src): try: os.makedirs(mount_src, exist_ok=True) except OSError: pass return if mount_dict["type"] != "volume" or not vol or not vol.get("name", None): return proj_name = compose.project_name vol_name = vol["name"] is_ext = vol.get("external", None) log(f"podman volume inspect {vol_name} || podman volume create {vol_name}") # TODO: might move to using "volume list" # podman volume list --format '{{.Name}}\t{{.MountPoint}}' -f 'label=io.podman.compose.project=HERE' try: _ = compose.podman.output([], "volume", ["inspect", vol_name]).decode("utf-8") except subprocess.CalledProcessError as e: if is_ext: raise RuntimeError(f"External volume [{vol_name}] does not exists") from e labels = vol.get("labels", None) or [] args = [ "create", "--label", f"io.podman.compose.project={proj_name}", "--label", f"com.docker.compose.project={proj_name}", ] for item in norm_as_list(labels): args.extend(["--label", item]) driver = vol.get("driver", None) if driver: args.extend(["--driver", driver]) driver_opts = vol.get("driver_opts", None) or {} for opt, value in driver_opts.items(): args.extend(["--opt", f"{opt}={value}"]) args.append(vol_name) compose.podman.output([], "volume", args) _ = compose.podman.output([], "volume", ["inspect", vol_name]).decode("utf-8") def mount_desc_to_mount_args( compose, mount_desc, srv_name, cnt_name ): # pylint: disable=unused-argument mount_type = mount_desc.get("type", None) vol = mount_desc.get("_vol", None) if mount_type == "volume" else None source = vol["name"] if vol else mount_desc.get("source", None) target = mount_desc["target"] opts = [] if mount_desc.get(mount_type, None): # TODO: we might need to add mount_dict[mount_type]["propagation"] = "z" mount_prop = mount_desc.get(mount_type, {}).get("propagation", None) if mount_prop: opts.append(f"{mount_type}-propagation={mount_prop}") if mount_desc.get("read_only", False): opts.append("ro") if mount_type == "tmpfs": tmpfs_opts = mount_desc.get("tmpfs", {}) tmpfs_size = tmpfs_opts.get("size", None) if tmpfs_size: opts.append(f"tmpfs-size={tmpfs_size}") tmpfs_mode = tmpfs_opts.get("mode", None) if tmpfs_mode: opts.append(f"tmpfs-mode={tmpfs_mode}") opts = ",".join(opts) if mount_type == "bind": return f"type=bind,source={source},destination={target},{opts}".rstrip(",") if mount_type == "volume": return f"type=volume,source={source},destination={target},{opts}".rstrip(",") if mount_type == "tmpfs": return f"type=tmpfs,destination={target},{opts}".rstrip(",") raise ValueError("unknown mount type:" + mount_type) def container_to_ulimit_args(cnt, podman_args): ulimit = cnt.get("ulimits", []) if ulimit is not None: # ulimit can be a single value, i.e. ulimit: host if is_str(ulimit): podman_args.extend(["--ulimit", ulimit]) # or a dictionary or list: else: ulimit = norm_as_dict(ulimit) ulimit = [ "{}={}".format(ulimit_key, norm_ulimit(inner_value)) for ulimit_key, inner_value in ulimit.items() ] for i in ulimit: podman_args.extend(["--ulimit", i]) def mount_desc_to_volume_args( compose, mount_desc, srv_name, cnt_name ): # pylint: disable=unused-argument mount_type = mount_desc["type"] if mount_type not in ("bind", "volume"): raise ValueError("unknown mount type:" + mount_type) vol = mount_desc.get("_vol", None) if mount_type == "volume" else None source = vol["name"] if vol else mount_desc.get("source", None) if not source: raise ValueError(f"missing mount source for {mount_type} on {srv_name}") target = mount_desc["target"] opts = [] propagations = set( filteri(mount_desc.get(mount_type, {}).get("propagation", "").split(",")) ) if mount_type != "bind": propagations.update( filteri(mount_desc.get("bind", {}).get("propagation", "").split(",")) ) opts.extend(propagations) # --volume, -v[=[[SOURCE-VOLUME|HOST-DIR:]CONTAINER-DIR[:OPTIONS]]] # [rw|ro] # [z|Z] # [[r]shared|[r]slave|[r]private]|[r]unbindable # [[r]bind] # [noexec|exec] # [nodev|dev] # [nosuid|suid] # [O] # [U] read_only = mount_desc.get("read_only", None) if read_only is not None: opts.append("ro" if read_only else "rw") args = f"{source}:{target}" if opts: args += ":" + ",".join(opts) return args def get_mnt_dict(compose, cnt, volume): proj_name = compose.project_name srv_name = cnt["_service"] basedir = compose.dirname if is_str(volume): volume = parse_short_mount(volume, basedir) return fix_mount_dict(compose, volume, proj_name, srv_name) def get_mount_args(compose, cnt, volume): volume = get_mnt_dict(compose, cnt, volume) # proj_name = compose.project_name srv_name = cnt["_service"] mount_type = volume["type"] assert_volume(compose, volume) if compose.prefer_volume_over_mount: if mount_type == "tmpfs": # TODO: --tmpfs /tmp:rw,size=787448k,mode=1777 args = volume["target"] tmpfs_opts = volume.get("tmpfs", {}) opts = [] size = tmpfs_opts.get("size", None) if size: opts.append(f"size={size}") mode = tmpfs_opts.get("mode", None) if mode: opts.append(f"mode={mode}") if opts: args += ":" + ",".join(opts) return ["--tmpfs", args] args = mount_desc_to_volume_args(compose, volume, srv_name, cnt["name"]) return ["-v", args] args = mount_desc_to_mount_args(compose, volume, srv_name, cnt["name"]) return ["--mount", args] def get_secret_args(compose, cnt, secret): secret_name = secret if is_str(secret) else secret.get("source", None) if not secret_name or secret_name not in compose.declared_secrets.keys(): raise ValueError( f'ERROR: undeclared secret: "{secret}", service: {cnt["_service"]}' ) declared_secret = compose.declared_secrets[secret_name] source_file = declared_secret.get("file", None) dest_file = "" secret_opts = "" target = None if is_str(secret) else secret.get("target", None) uid = None if is_str(secret) else secret.get("uid", None) gid = None if is_str(secret) else secret.get("gid", None) mode = None if is_str(secret) else secret.get("mode", None) if source_file: if not target: dest_file = f"/run/secrets/{secret_name}" elif not target.startswith("/"): sec = target if target else secret_name dest_file = f"/run/secrets/{sec}" else: dest_file = target basedir = compose.dirname source_file = os.path.realpath( os.path.join(basedir, os.path.expanduser(source_file)) ) volume_ref = ["--volume", f"{source_file}:{dest_file}:ro,rprivate,rbind"] if uid or gid or mode: sec = target if target else secret_name log( f'WARNING: Service {cnt["_service"]} uses secret "{sec}" with uid, gid, or mode.' + " These fields are not supported by this implementation of the Compose file" ) return volume_ref # v3.5 and up added external flag, earlier the spec # only required a name to be specified. # docker-compose does not support external secrets outside of swarm mode. # However accessing these via podman is trivial # since these commands are directly translated to # podman-create commands, albiet we can only support a 1:1 mapping # at the moment if declared_secret.get("external", False) or declared_secret.get("name", None): secret_opts += f",uid={uid}" if uid else "" secret_opts += f",gid={gid}" if gid else "" secret_opts += f",mode={mode}" if mode else "" # The target option is only valid for type=env, # which in an ideal world would work # for type=mount as well. # having a custom name for the external secret # has the same problem as well ext_name = declared_secret.get("name", None) err_str = 'ERROR: Custom name/target reference "{}" for mounted external secret "{}" is not supported' if ext_name and ext_name != secret_name: raise ValueError(err_str.format(secret_name, ext_name)) if target and target != secret_name: raise ValueError(err_str.format(target, secret_name)) if target: log( 'WARNING: Service "{}" uses target: "{}" for secret: "{}".'.format( cnt["_service"], target, secret_name ) + " That is un-supported and a no-op and is ignored." ) return ["--secret", "{}{}".format(secret_name, secret_opts)] raise ValueError( 'ERROR: unparseable secret: "{}", service: "{}"'.format( secret_name, cnt["_service"] ) ) def container_to_res_args(cnt, podman_args): # v2: https://docs.docker.com/compose/compose-file/compose-file-v2/#cpu-and-other-resources # cpus, cpu_shares, mem_limit, mem_reservation cpus_limit_v2 = try_float(cnt.get("cpus", None), None) cpu_shares_v2 = try_int(cnt.get("cpu_shares", None), None) mem_limit_v2 = cnt.get("mem_limit", None) mem_res_v2 = cnt.get("mem_reservation", None) # v3: https://docs.docker.com/compose/compose-file/compose-file-v3/#resources # spec: https://github.com/compose-spec/compose-spec/blob/master/deploy.md#resources # deploy.resources.{limits,reservations}.{cpus, memory} deploy = cnt.get("deploy", None) or {} res = deploy.get("resources", None) or {} limits = res.get("limits", None) or {} cpus_limit_v3 = try_float(limits.get("cpus", None), None) mem_limit_v3 = limits.get("memory", None) reservations = res.get("reservations", None) or {} # cpus_res_v3 = try_float(reservations.get('cpus', None), None) mem_res_v3 = reservations.get("memory", None) # add args cpus = cpus_limit_v3 or cpus_limit_v2 if cpus: podman_args.extend( ( "--cpus", str(cpus), ) ) if cpu_shares_v2: podman_args.extend( ( "--cpu-shares", str(cpu_shares_v2), ) ) mem = mem_limit_v3 or mem_limit_v2 if mem: podman_args.extend( ( "-m", str(mem).lower(), ) ) mem_res = mem_res_v3 or mem_res_v2 if mem_res: podman_args.extend( ( "--memory-reservation", str(mem_res).lower(), ) ) def port_dict_to_str(port_desc): # NOTE: `mode: host|ingress` is ignored cnt_port = port_desc.get("target", None) published = port_desc.get("published", None) or "" host_ip = port_desc.get("host_ip", None) protocol = port_desc.get("protocol", None) or "tcp" if not cnt_port: raise ValueError("target container port must be specified") if host_ip: ret = f"{host_ip}:{published}:{cnt_port}" else: ret = f"{published}:{cnt_port}" if published else f"{cnt_port}" if protocol != "tcp": ret += f"/{protocol}" return ret def norm_ports(ports_in): if not ports_in: ports_in = [] if isinstance(ports_in, str): ports_in = [ports_in] ports_out = [] for port in ports_in: if isinstance(port, dict): port = port_dict_to_str(port) elif isinstance(port, int): port = str(port) elif not isinstance(port, str): raise TypeError("port should be either string or dict") ports_out.append(port) return ports_out def assert_cnt_nets(compose, cnt): """ create missing networks """ net = cnt.get("network_mode", None) if net and not net.startswith("bridge"): return proj_name = compose.project_name nets = compose.networks default_net = compose.default_net cnt_nets = cnt.get("networks", None) if cnt_nets and is_dict(cnt_nets): cnt_nets = list(cnt_nets.keys()) cnt_nets = norm_as_list(cnt_nets or default_net) for net in cnt_nets: net_desc = nets[net] or {} is_ext = net_desc.get("external", None) ext_desc = is_ext if is_dict(is_ext) else {} default_net_name = net if is_ext else f"{proj_name}_{net}" net_name = ( ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name ) try: compose.podman.output([], "network", ["exists", net_name]) except subprocess.CalledProcessError as e: if is_ext: raise RuntimeError( f"External network [{net_name}] does not exists" ) from e args = [ "create", "--label", f"io.podman.compose.project={proj_name}", "--label", f"com.docker.compose.project={proj_name}", ] # TODO: add more options here, like driver, internal, ..etc labels = net_desc.get("labels", None) or [] for item in norm_as_list(labels): args.extend(["--label", item]) if net_desc.get("internal", None): args.append("--internal") driver = net_desc.get("driver", None) if driver: args.extend(("--driver", driver)) driver_opts = net_desc.get("driver_opts", None) or {} for key, value in driver_opts.items(): args.extend(("--opt", f"{key}={value}")) ipam_config_ls = (net_desc.get("ipam", None) or {}).get( "config", None ) or [] if is_dict(ipam_config_ls): ipam_config_ls = [ipam_config_ls] for ipam in ipam_config_ls: subnet = ipam.get("subnet", None) ip_range = ipam.get("ip_range", None) gateway = ipam.get("gateway", None) if subnet: args.extend(("--subnet", subnet)) if ip_range: args.extend(("--ip-range", ip_range)) if gateway: args.extend(("--gateway", gateway)) args.append(net_name) compose.podman.output([], "network", args) compose.podman.output([], "network", ["exists", net_name]) def get_net_args(compose, cnt): service_name = cnt["service_name"] net_args = [] mac_address = cnt.get("mac_address", None) if mac_address: net_args.extend(["--mac-address", mac_address]) is_bridge = False net = cnt.get("network_mode", None) if net: if net == "none": is_bridge = False elif net == "host": net_args.extend(["--network", net]) elif net.startswith("slirp4netns:"): net_args.extend(["--network", net]) elif net.startswith("service:"): other_srv = net.split(":", 1)[1].strip() other_cnt = compose.container_names_by_service[other_srv][0] net_args.extend(["--network", f"container:{other_cnt}"]) elif net.startswith("container:"): other_cnt = net.split(":", 1)[1].strip() net_args.extend(["--network", f"container:{other_cnt}"]) elif net.startswith("bridge"): is_bridge = True else: print(f"unknown network_mode [{net}]") sys.exit(1) else: is_bridge = True proj_name = compose.project_name default_net = compose.default_net nets = compose.networks cnt_nets = cnt.get("networks", None) aliases = [service_name] # NOTE: from podman manpage: # NOTE: A container will only have access to aliases on the first network that it joins. This is a limitation that will be removed in a later release. ip = None ip6 = None if cnt_nets and is_dict(cnt_nets): prioritized_cnt_nets = [] # cnt_nets is {net_key: net_value, ...} for net_key, net_value in cnt_nets.items(): net_value = net_value or {} aliases.extend(norm_as_list(net_value.get("aliases", None))) if not ip: ip = net_value.get("ipv4_address", None) if not ip6: ip6 = net_value.get("ipv6_address", None) net_priority = net_value.get("priority", 0) prioritized_cnt_nets.append( ( net_priority, net_key, ) ) # sort dict by priority prioritized_cnt_nets.sort(reverse=True) cnt_nets = [net_key for _, net_key in prioritized_cnt_nets] cnt_nets = norm_as_list(cnt_nets or default_net) net_names = [] for net in cnt_nets: net_desc = nets[net] or {} is_ext = net_desc.get("external", None) ext_desc = is_ext if is_dict(is_ext) else {} default_net_name = net if is_ext else f"{proj_name}_{net}" net_name = ( ext_desc.get("name", None) or net_desc.get("name", None) or default_net_name ) net_names.append(net_name) net_names_str = ",".join(net_names) if is_bridge: net_args.extend(["--net", net_names_str, "--network-alias", ",".join(aliases)]) if ip: net_args.append(f"--ip={ip}") if ip6: net_args.append(f"--ip6={ip6}") return net_args def container_to_args(compose, cnt, detached=True): # TODO: double check -e , --add-host, -v, --read-only dirname = compose.dirname pod = cnt.get("pod", None) or "" name = cnt["name"] podman_args = [f"--name={name}"] if detached: podman_args.append("-d") if pod: podman_args.append(f"--pod={pod}") deps = [] for dep_srv in cnt.get("_deps", None) or []: deps.extend(compose.container_names_by_service.get(dep_srv, None) or []) if deps: deps_csv = ",".join(deps) podman_args.append(f"--requires={deps_csv}") sec = norm_as_list(cnt.get("security_opt", None)) for sec_item in sec: podman_args.extend(["--security-opt", sec_item]) ann = norm_as_list(cnt.get("annotations", None)) for a in ann: podman_args.extend(["--annotation", a]) if cnt.get("read_only", None): podman_args.append("--read-only") for i in cnt.get("labels", []): podman_args.extend(["--label", i]) for c in cnt.get("cap_add", []): podman_args.extend(["--cap-add", c]) for c in cnt.get("cap_drop", []): podman_args.extend(["--cap-drop", c]) for item in cnt.get("group_add", []): podman_args.extend(["--group-add", item]) for item in cnt.get("devices", []): podman_args.extend(["--device", item]) for item in norm_as_list(cnt.get("dns", None)): podman_args.extend(["--dns", item]) for item in norm_as_list(cnt.get("dns_opt", None)): podman_args.extend(["--dns-opt", item]) for item in norm_as_list(cnt.get("dns_search", None)): podman_args.extend(["--dns-search", item]) env_file = cnt.get("env_file", []) if is_str(env_file): env_file = [env_file] for i in env_file: i = os.path.realpath(os.path.join(dirname, i)) podman_args.extend(["--env-file", i]) env = norm_as_list(cnt.get("environment", {})) for e in env: podman_args.extend(["-e", e]) tmpfs_ls = cnt.get("tmpfs", []) if is_str(tmpfs_ls): tmpfs_ls = [tmpfs_ls] for i in tmpfs_ls: podman_args.extend(["--tmpfs", i]) for volume in cnt.get("volumes", []): podman_args.extend(get_mount_args(compose, cnt, volume)) assert_cnt_nets(compose, cnt) podman_args.extend(get_net_args(compose, cnt)) logging = cnt.get("logging", None) if logging is not None: podman_args.append(f'--log-driver={logging.get("driver", "k8s-file")}') log_opts = logging.get("options") or {} podman_args += [f"--log-opt={name}={value}" for name, value in log_opts.items()] for secret in cnt.get("secrets", []): podman_args.extend(get_secret_args(compose, cnt, secret)) for i in cnt.get("extra_hosts", []): podman_args.extend(["--add-host", i]) for i in cnt.get("expose", []): podman_args.extend(["--expose", i]) if cnt.get("publishall", None): podman_args.append("-P") ports = cnt.get("ports", None) or [] if isinstance(ports, str): ports = [ports] for port in ports: if isinstance(port, dict): port = port_dict_to_str(port) elif not isinstance(port, str): raise TypeError("port should be either string or dict") podman_args.extend(["-p", port]) userns_mode = cnt.get("userns_mode", None) if userns_mode is not None: podman_args.extend(["--userns", userns_mode]) user = cnt.get("user", None) if user is not None: podman_args.extend(["-u", user]) if cnt.get("working_dir", None) is not None: podman_args.extend(["-w", cnt["working_dir"]]) if cnt.get("hostname", None): podman_args.extend(["--hostname", cnt["hostname"]]) if cnt.get("shm_size", None): podman_args.extend(["--shm-size", str(cnt["shm_size"])]) if cnt.get("stdin_open", None): podman_args.append("-i") if cnt.get("stop_signal", None): podman_args.extend(["--stop-signal", cnt["stop_signal"]]) for i in cnt.get("sysctls", []): podman_args.extend(["--sysctl", i]) if cnt.get("tty", None): podman_args.append("--tty") if cnt.get("privileged", None): podman_args.append("--privileged") pull_policy = cnt.get("pull_policy", None) if pull_policy is not None and pull_policy != "build": podman_args.extend(["--pull", pull_policy]) if cnt.get("restart", None) is not None: podman_args.extend(["--restart", cnt["restart"]]) container_to_ulimit_args(cnt, podman_args) container_to_res_args(cnt, podman_args) # currently podman shipped by fedora does not package this if cnt.get("init", None): podman_args.append("--init") if cnt.get("init-path", None): podman_args.extend(["--init-path", cnt["init-path"]]) entrypoint = cnt.get("entrypoint", None) if entrypoint is not None: if is_str(entrypoint): entrypoint = shlex.split(entrypoint) podman_args.extend(["--entrypoint", json.dumps(entrypoint)]) platform = cnt.get("platform", None) if platform is not None: podman_args.extend(["--platform", platform]) # WIP: healthchecks are still work in progress healthcheck = cnt.get("healthcheck", None) or {} if not is_dict(healthcheck): raise ValueError("'healthcheck' must be an key-value mapping") healthcheck_disable = healthcheck.get("disable", False) healthcheck_test = healthcheck.get("test", None) if healthcheck_disable: healthcheck_test = ["NONE"] if healthcheck_test: # If it's a string, it's equivalent to specifying CMD-SHELL if is_str(healthcheck_test): # podman does not add shell to handle command with whitespace podman_args.extend( ["--healthcheck-command", "/bin/sh -c " + cmd_quote(healthcheck_test)] ) elif is_list(healthcheck_test): healthcheck_test = healthcheck_test.copy() # If it's a list, first item is either NONE, CMD or CMD-SHELL. healthcheck_type = healthcheck_test.pop(0) if healthcheck_type == "NONE": podman_args.append("--no-healthcheck") elif healthcheck_type == "CMD": cmd_q = "' '".join([cmd_quote(i) for i in healthcheck_test]) podman_args.extend(["--healthcheck-command", "/bin/sh -c " + cmd_q]) elif healthcheck_type == "CMD-SHELL": if len(healthcheck_test) != 1: raise ValueError("'CMD_SHELL' takes a single string after it") cmd_q = cmd_quote(healthcheck_test[0]) podman_args.extend(["--healthcheck-command", "/bin/sh -c " + cmd_q]) else: raise ValueError( f"unknown healthcheck test type [{healthcheck_type}],\ expecting NONE, CMD or CMD-SHELL." ) else: raise ValueError("'healthcheck.test' either a string or a list") # interval, timeout and start_period are specified as durations. if "interval" in healthcheck: podman_args.extend(["--healthcheck-interval", healthcheck["interval"]]) if "timeout" in healthcheck: podman_args.extend(["--healthcheck-timeout", healthcheck["timeout"]]) if "start_period" in healthcheck: podman_args.extend(["--healthcheck-start-period", healthcheck["start_period"]]) # convert other parameters to string if "retries" in healthcheck: podman_args.extend(["--healthcheck-retries", str(healthcheck["retries"])]) podman_args.append(cnt["image"]) # command, ..etc. command = cnt.get("command", None) if command is not None: if is_str(command): podman_args.extend(shlex.split(command)) else: podman_args.extend([str(i) for i in command]) return podman_args def rec_deps(services, service_name, start_point=None): """ return all dependencies of service_name recursively """ if not start_point: start_point = service_name deps = services[service_name]["_deps"] for dep_name in deps.copy(): # avoid A depens on A if dep_name == service_name: continue dep_srv = services.get(dep_name, None) if not dep_srv: continue # NOTE: avoid creating loops, A->B->A if start_point and start_point in dep_srv["_deps"]: continue new_deps = rec_deps(services, dep_name, start_point) deps.update(new_deps) return deps def flat_deps(services, with_extends=False): """ create dependencies "_deps" or update it recursively for all services """ for name, srv in services.items(): deps = set() srv["_deps"] = deps if with_extends: ext = srv.get("extends", {}).get("service", None) if ext: if ext != name: deps.add(ext) continue deps_ls = srv.get("depends_on", None) or [] if is_str(deps_ls): deps_ls = [deps_ls] elif is_dict(deps_ls): deps_ls = list(deps_ls.keys()) deps.update(deps_ls) # parse link to get service name and remove alias links_ls = srv.get("links", None) or [] if not is_list(links_ls): links_ls = [links_ls] deps.update([(c.split(":")[0] if ":" in c else c) for c in links_ls]) for name, srv in services.items(): rec_deps(services, name) ################### # podman and compose classes ################### class Podman: def __init__(self, compose, podman_path="podman", dry_run=False): self.compose = compose self.podman_path = podman_path self.dry_run = dry_run def output(self, podman_args, cmd="", cmd_args=None): cmd_args = cmd_args or [] xargs = self.compose.get_podman_args(cmd) if cmd else [] cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args log(cmd_ls) return subprocess.check_output(cmd_ls) def exec( self, podman_args, cmd="", cmd_args=None, ): cmd_args = list(map(str, cmd_args or [])) xargs = self.compose.get_podman_args(cmd) if cmd else [] cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args log(" ".join([str(i) for i in cmd_ls])) os.execlp(self.podman_path, *cmd_ls) def run( self, podman_args, cmd="", cmd_args=None, wait=True, sleep=1, obj=None, log_formatter=None, ): if obj is not None: obj.exit_code = None cmd_args = list(map(str, cmd_args or [])) xargs = self.compose.get_podman_args(cmd) if cmd else [] cmd_ls = [self.podman_path, *podman_args, cmd] + xargs + cmd_args log(" ".join([str(i) for i in cmd_ls])) if self.dry_run: return None # subprocess.Popen(args, bufsize = 0, executable = None, stdin = None, stdout = None, stderr = None, preexec_fn = None, close_fds = False, shell = False, cwd = None, env = None, universal_newlines = False, startupinfo = None, creationflags = 0) if log_formatter is not None: # Pipe podman process output through log_formatter (which can add colored prefix) p = subprocess.Popen( cmd_ls, stdout=subprocess.PIPE ) # pylint: disable=consider-using-with _ = subprocess.Popen( log_formatter, stdin=p.stdout ) # pylint: disable=consider-using-with p.stdout.close() # Allow p_process to receive a SIGPIPE if logging process exits. else: p = subprocess.Popen(cmd_ls) # pylint: disable=consider-using-with if wait: exit_code = p.wait() log("exit code:", exit_code) if obj is not None: obj.exit_code = exit_code if sleep: time.sleep(sleep) return p def volume_ls(self, proj=None): if not proj: proj = self.compose.project_name output = self.output( [], "volume", [ "ls", "--noheading", "--filter", f"label=io.podman.compose.project={proj}", "--format", "{{.Name}}", ], ).decode("utf-8") volumes = output.splitlines() return volumes def normalize_service(service, sub_dir=""): # make `build.context` relative to sub_dir # TODO: should we make volume and secret relative too? if sub_dir and "build" in service: build = service["build"] context = build if is_str(build) else build.get("context", None) context = context or "" if context or sub_dir: if context.startswith("./"): context = context[2:] if sub_dir: context = os.path.join(sub_dir, context) context = context.rstrip("/") if not context: context = "." if is_str(build): service["build"] = context else: service["build"]["context"] = context for key in ("env_file", "security_opt", "volumes"): if key not in service: continue if is_str(service[key]): service[key] = [service[key]] if "security_opt" in service: sec_ls = service["security_opt"] for ix, item in enumerate(sec_ls): if item in ("seccomp:unconfined", "apparmor:unconfined"): sec_ls[ix] = item.replace(":", "=") for key in ("environment", "labels"): if key not in service: continue service[key] = norm_as_dict(service[key]) if "extends" in service: extends = service["extends"] if is_str(extends): extends = {"service": extends} service["extends"] = extends return service def normalize(compose): """ convert compose dict of some keys from string or dicts into arrays """ services = compose.get("services", None) or {} for service in services.values(): normalize_service(service) return compose def clone(value): return value.copy() if is_list(value) or is_dict(value) else value def rec_merge_one(target, source): """ update target from source recursively """ done = set() for key, value in source.items(): if key in target: continue target[key] = clone(value) done.add(key) for key, value in target.items(): if key in done: continue if key not in source: continue value2 = source[key] if key == "command": target[key] = clone(value2) continue if not isinstance(value2, type(value)): value_type = type(value) value2_type = type(value2) raise ValueError( f"can't merge value of {key} of type {value_type} and {value2_type}" ) if is_list(value2): if key == "volumes": # clean duplicate mount targets pts = {v.split(":", 1)[1] for v in value2 if ":" in v} del_ls = [ ix for (ix, v) in enumerate(value) if ":" in v and v.split(":", 1)[1] in pts ] for ix in reversed(del_ls): del value[ix] value.extend(value2) else: value.extend(value2) elif is_dict(value2): rec_merge_one(value, value2) else: target[key] = value2 return target def rec_merge(target, *sources): """ update target recursively from sources """ for source in sources: ret = rec_merge_one(target, source) return ret def resolve_extends(services, service_names, environ): for name in service_names: service = services[name] ext = service.get("extends", {}) if is_str(ext): ext = {"service": ext} from_service_name = ext.get("service", None) if not from_service_name: continue filename = ext.get("file", None) if filename: if filename.startswith("./"): filename = filename[2:] with open(filename, "r", encoding="utf-8") as f: content = yaml.safe_load(f) or {} if "services" in content: content = content["services"] subdirectory = os.path.dirname(filename) content = rec_subs(content, environ) from_service = content.get(from_service_name, {}) normalize_service(from_service, subdirectory) else: from_service = services.get(from_service_name, {}).copy() del from_service["_deps"] try: del from_service["extends"] except KeyError: pass new_service = rec_merge({}, from_service, service) services[name] = new_service def dotenv_to_dict(dotenv_path): if not os.path.isfile(dotenv_path): return {} return dotenv_values(dotenv_path) COMPOSE_DEFAULT_LS = [ "compose.yaml", "compose.yml", "compose.override.yaml", "compose.override.yml", "podman-compose.yaml", "podman-compose.yml", "docker-compose.yml", "docker-compose.yaml", "docker-compose.override.yml", "docker-compose.override.yaml", "container-compose.yml", "container-compose.yaml", "container-compose.override.yml", "container-compose.override.yaml", ] class PodmanCompose: def __init__(self): self.podman = None self.podman_version = None self.environ = {} self.exit_code = None self.commands = {} self.global_args = None self.project_name = None self.dirname = None self.pods = None self.containers = None self.vols = None self.networks = {} self.default_net = "default" self.declared_secrets = None self.container_names_by_service = None self.container_by_name = None self.services = None self.all_services = set() self.prefer_volume_over_mount = True self.merged_yaml = None self.yaml_hash = "" self.console_colors = [ "\x1B[1;32m", "\x1B[1;33m", "\x1B[1;34m", "\x1B[1;35m", "\x1B[1;36m", ] def assert_services(self, services): if is_str(services): services = [services] given = set(services or []) missing = given - self.all_services if missing: missing_csv = ",".join(missing) log(f"missing services [{missing_csv}]") sys.exit(1) def get_podman_args(self, cmd): xargs = [] for args in self.global_args.podman_args: xargs.extend(shlex.split(args)) cmd_norm = cmd if cmd != "create" else "run" cmd_args = self.global_args.__dict__.get(f"podman_{cmd_norm}_args", None) or [] for args in cmd_args: xargs.extend(shlex.split(args)) return xargs def run(self): log("podman-compose version: " + __version__) args = self._parse_args() podman_path = args.podman_path if podman_path != "podman": if os.path.isfile(podman_path) and os.access(podman_path, os.X_OK): podman_path = os.path.realpath(podman_path) else: # this also works if podman hasn't been installed now if args.dry_run is False: log(f"Binary {podman_path} has not been found.") sys.exit(1) self.podman = Podman(self, podman_path, args.dry_run) if not args.dry_run: # just to make sure podman is running try: self.podman_version = ( self.podman.output(["--version"], "", []).decode("utf-8").strip() or "" ) self.podman_version = (self.podman_version.split() or [""])[-1] except subprocess.CalledProcessError: self.podman_version = None if not self.podman_version: log("it seems that you do not have `podman` installed") sys.exit(1) log("using podman version: " + self.podman_version) cmd_name = args.command compose_required = cmd_name != "version" and ( cmd_name != "systemd" or args.action != "create-unit" ) if compose_required: self._parse_compose_file() cmd = self.commands[cmd_name] cmd(self, args) def _parse_compose_file(self): args = self.global_args # cmd = args.command dirname = os.environ.get("COMPOSE_PROJECT_DIR", None) if dirname and os.path.isdir(dirname): os.chdir(dirname) pathsep = os.environ.get("COMPOSE_PATH_SEPARATOR", None) or os.pathsep if not args.file: default_str = os.environ.get("COMPOSE_FILE", None) if default_str: default_ls = default_str.split(pathsep) else: default_ls = COMPOSE_DEFAULT_LS args.file = list(filter(os.path.exists, default_ls)) files = args.file if not files: log( "no compose.yaml, docker-compose.yml or container-compose.yml file found, pass files with -f" ) sys.exit(-1) ex = map(os.path.exists, files) missing = [fn0 for ex0, fn0 in zip(ex, files) if not ex0] if missing: log("missing files: ", missing) sys.exit(1) # make absolute relative_files = files files = list(map(os.path.realpath, files)) filename = files[0] project_name = args.project_name # no_ansi = args.no_ansi # no_cleanup = args.no_cleanup # dry_run = args.dry_run # host_env = None dirname = os.path.realpath(os.path.dirname(filename)) dir_basename = os.path.basename(dirname) self.dirname = dirname # TODO: remove next line os.chdir(dirname) dotenv_path = os.path.join(dirname, args.env_file) dotenv_dict = dotenv_to_dict(dotenv_path) os.environ.update( { key: value for key, value in dotenv_dict.items() if key.startswith("PODMAN_") } ) self.environ = dict(os.environ) self.environ.update(dotenv_dict) # see: https://docs.docker.com/compose/reference/envvars/ # see: https://docs.docker.com/compose/env-file/ self.environ.update( { "COMPOSE_PROJECT_DIR": dirname, "COMPOSE_FILE": pathsep.join(relative_files), "COMPOSE_PATH_SEPARATOR": pathsep, } ) compose = {} for filename in files: with open(filename, "r", encoding="utf-8") as f: content = yaml.safe_load(f) # log(filename, json.dumps(content, indent = 2)) if not isinstance(content, dict): sys.stderr.write( "Compose file does not contain a top level object: %s\n" % filename ) sys.exit(1) content = normalize(content) # log(filename, json.dumps(content, indent = 2)) content = rec_subs(content, self.environ) rec_merge(compose, content) self.merged_yaml = yaml.safe_dump(compose) merged_json_b = json.dumps(compose, separators=(",", ":")).encode("utf-8") self.yaml_hash = hashlib.sha256(merged_json_b).hexdigest() compose["_dirname"] = dirname # debug mode if len(files) > 1: log(" ** merged:\n", json.dumps(compose, indent=2)) # ver = compose.get('version', None) if not project_name: project_name = compose.get("name", None) if project_name is None: # More strict then actually needed for simplicity: podman requires [a-zA-Z0-9][a-zA-Z0-9_.-]* project_name = ( os.environ.get("COMPOSE_PROJECT_NAME", None) or dir_basename.lower() ) project_name = norm_re.sub("", project_name) if not project_name: raise RuntimeError( f"Project name [{dir_basename}] normalized to empty" ) self.project_name = project_name self.environ.update({"COMPOSE_PROJECT_NAME": self.project_name}) services = compose.get("services", None) if services is None: services = {} log("WARNING: No services defined") # NOTE: maybe add "extends.service" to _deps at this stage flat_deps(services, with_extends=True) service_names = sorted( [(len(srv["_deps"]), name) for name, srv in services.items()] ) service_names = [name for _, name in service_names] resolve_extends(services, service_names, self.environ) flat_deps(services) service_names = sorted( [(len(srv["_deps"]), name) for name, srv in services.items()] ) service_names = [name for _, name in service_names] nets = compose.get("networks", None) or {} if not nets: nets["default"] = None self.networks = nets if len(self.networks) == 1: self.default_net = list(nets.keys())[0] elif "default" in nets: self.default_net = "default" else: self.default_net = None default_net = self.default_net allnets = set() for name, srv in services.items(): srv_nets = srv.get("networks", None) or default_net srv_nets = ( list(srv_nets.keys()) if is_dict(srv_nets) else norm_as_list(srv_nets) ) allnets.update(srv_nets) given_nets = set(nets.keys()) missing_nets = allnets - given_nets unused_nets = given_nets - allnets - set(["default"]) if len(unused_nets): unused_nets_str = ",".join(unused_nets) log(f"WARNING: unused networks: {unused_nets_str}") if len(missing_nets): missing_nets_str = ",".join(missing_nets) raise RuntimeError(f"missing networks: {missing_nets_str}") # volumes: [...] self.vols = compose.get("volumes", {}) podman_compose_labels = [ "io.podman.compose.config-hash=" + self.yaml_hash, "io.podman.compose.project=" + project_name, "io.podman.compose.version=" + __version__, f"PODMAN_SYSTEMD_UNIT=podman-compose@{project_name}.service", "com.docker.compose.project=" + project_name, "com.docker.compose.project.working_dir=" + dirname, "com.docker.compose.project.config_files=" + ",".join(relative_files), ] # other top-levels: # networks: {driver: ...} # configs: {...} self.declared_secrets = compose.get("secrets", {}) given_containers = [] container_names_by_service = {} self.services = services for service_name, service_desc in services.items(): replicas = try_int(service_desc.get("deploy", {}).get("replicas", "1")) container_names_by_service[service_name] = [] for num in range(1, replicas + 1): name0 = f"{project_name}_{service_name}_{num}" if num == 1: name = service_desc.get("container_name", name0) else: name = name0 container_names_by_service[service_name].append(name) # log(service_name,service_desc) cnt = { "name": name, "num": num, "service_name": service_name, **service_desc, } if "image" not in cnt: cnt["image"] = f"{project_name}_{service_name}" labels = norm_as_list(cnt.get("labels", None)) cnt["ports"] = norm_ports(cnt.get("ports", None)) labels.extend(podman_compose_labels) labels.extend( [ f"com.docker.compose.container-number={num}", "com.docker.compose.service=" + service_name, ] ) cnt["labels"] = labels cnt["_service"] = service_name cnt["_project"] = project_name given_containers.append(cnt) volumes = cnt.get("volumes", None) or [] for volume in volumes: mnt_dict = get_mnt_dict(self, cnt, volume) if ( mnt_dict.get("type", None) == "volume" and mnt_dict["source"] and mnt_dict["source"] not in self.vols ): vol_name = mnt_dict["source"] raise RuntimeError( f"volume [{vol_name}] not defined in top level" ) self.container_names_by_service = container_names_by_service self.all_services = set(container_names_by_service.keys()) container_by_name = {c["name"]: c for c in given_containers} # log("deps:", [(c["name"], c["_deps"]) for c in given_containers]) given_containers = list(container_by_name.values()) given_containers.sort(key=lambda c: len(c.get("_deps", None) or [])) # log("sorted:", [c["name"] for c in given_containers]) pods, containers = transform(args, project_name, given_containers) self.pods = pods self.containers = containers self.container_by_name = {c["name"]: c for c in containers} def _parse_args(self): parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) self._init_global_parser(parser) subparsers = parser.add_subparsers(title="command", dest="command") subparser = subparsers.add_parser("help", help="show help") for cmd_name, cmd in self.commands.items(): subparser = subparsers.add_parser( cmd_name, help=cmd.desc ) # pylint: disable=protected-access for cmd_parser in cmd._parse_args: # pylint: disable=protected-access cmd_parser(subparser) self.global_args = parser.parse_args() if self.global_args.version: self.global_args.command = "version" if not self.global_args.command or self.global_args.command == "help": parser.print_help() sys.exit(-1) return self.global_args @staticmethod def _init_global_parser(parser): parser.add_argument("-v", "--version", help="show version", action="store_true") parser.add_argument( "--in-pod", help="pod creation", metavar="in_pod", type=bool, default=False, ) parser.add_argument( "--pod-args", help="custom arguments to be passed to `podman pod`", metavar="pod_args", type=str, default="--infra=false --share=", ) parser.add_argument( "--env-file", help="Specify an alternate environment file", metavar="env_file", type=str, default=".env", ) parser.add_argument( "-f", "--file", help="Specify an alternate compose file (default: docker-compose.yml)", metavar="file", action="append", default=[], ) parser.add_argument( "-p", "--project-name", help="Specify an alternate project name (default: directory name)", type=str, default=None, ) parser.add_argument( "--podman-path", help="Specify an alternate path to podman (default: use location in $PATH variable)", type=str, default="podman", ) parser.add_argument( "--podman-args", help="custom global arguments to be passed to `podman`", metavar="args", action="append", default=[], ) for podman_cmd in PODMAN_CMDS: parser.add_argument( f"--podman-{podman_cmd}-args", help=f"custom arguments to be passed to `podman {podman_cmd}`", metavar="args", action="append", default=[], ) parser.add_argument( "--no-ansi", help="Do not print ANSI control characters", action="store_true", ) parser.add_argument( "--no-cleanup", help="Do not stop and remove existing pod & containers", action="store_true", ) parser.add_argument( "--dry-run", help="No action; perform a simulation of commands", action="store_true", ) podman_compose = PodmanCompose() ################### # decorators to add commands and parse options ################### class cmd_run: # pylint: disable=invalid-name,too-few-public-methods def __init__(self, compose, cmd_name, cmd_desc=None): self.compose = compose self.cmd_name = cmd_name self.cmd_desc = cmd_desc def __call__(self, func): def wrapped(*args, **kw): return func(*args, **kw) wrapped._compose = self.compose # Trim extra indentation at start of multiline docstrings. wrapped.desc = self.cmd_desc or re.sub(r"^\s+", "", func.__doc__) wrapped._parse_args = [] self.compose.commands[self.cmd_name] = wrapped return wrapped class cmd_parse: # pylint: disable=invalid-name,too-few-public-methods def __init__(self, compose, cmd_names): self.compose = compose self.cmd_names = cmd_names if is_list(cmd_names) else [cmd_names] def __call__(self, func): def wrapped(*args, **kw): return func(*args, **kw) for cmd_name in self.cmd_names: self.compose.commands[cmd_name]._parse_args.append(wrapped) return wrapped ################### # actual commands ################### @cmd_run(podman_compose, "version", "show version") def compose_version(compose, args): if getattr(args, "short", False): print(__version__) return if getattr(args, "format", "pretty") == "json": res = {"version": __version__} print(json.dumps(res)) return print("podman-compose version", __version__) compose.podman.run(["--version"], "", [], sleep=0) def is_local(container: dict) -> bool: """Test if a container is local, i.e. if it is * prefixed with localhost/ * has a build section and is not prefixed """ return ( not "/" in container["image"] if "build" in container else container["image"].startswith("localhost/") ) @cmd_run(podman_compose, "wait", "wait running containers to stop") def compose_wait(compose, args): # pylint: disable=unused-argument containers = [cnt["name"] for cnt in compose.containers] cmd_args = ["--"] cmd_args.extend(containers) compose.podman.exec([], "wait", cmd_args) @cmd_run(podman_compose, "systemd") def compose_systemd(compose, args): """ create systemd unit file and register its compose stacks When first installed type `sudo podman-compose systemd -a create-unit` later you can add a compose stack by running `podman-compose systemd -a register` then you can start/stop your stack with `systemctl --user start podman-compose@` """ stacks_dir = ".config/containers/compose/projects" if args.action == "register": proj_name = compose.project_name fn = os.path.expanduser(f"~/{stacks_dir}/{proj_name}.env") os.makedirs(os.path.dirname(fn), exist_ok=True) print(f"writing [{fn}]: ...") with open(fn, "w", encoding="utf-8") as f: for k, v in compose.environ.items(): if k.startswith("COMPOSE_") or k.startswith("PODMAN_"): f.write(f"{k}={v}\n") print(f"writing [{fn}]: done.") print("\n\ncreating the pod without starting it: ...\n\n") process = subprocess.run([script, "up", "--no-start"], check=False) print("\nfinal exit code is ", process.returncode) username = getpass.getuser() print( f""" you can use systemd commands like enable, start, stop, status, cat all without `sudo` like this: \t\tsystemctl --user enable --now 'podman-compose@{proj_name}' \t\tsystemctl --user status 'podman-compose@{proj_name}' \t\tjournalctl --user -xeu 'podman-compose@{proj_name}' and for that to work outside a session you might need to run the following command *once* \t\tsudo loginctl enable-linger '{username}' you can use podman commands like: \t\tpodman pod ps \t\tpodman pod stats 'pod_{proj_name}' \t\tpodman pod logs --tail=10 -f 'pod_{proj_name}' """ ) elif args.action in ("list", "ls"): ls = glob.glob(os.path.expanduser(f"~/{stacks_dir}/*.env")) for i in ls: print(os.path.basename(i[:-4])) elif args.action == "create-unit": fn = "/etc/systemd/user/podman-compose@.service" out = f"""\ # {fn} [Unit] Description=%i rootless pod (podman-compose) [Service] Type=simple EnvironmentFile=%h/{stacks_dir}/%i.env ExecStartPre=-{script} up --no-start ExecStartPre=/usr/bin/podman pod start pod_%i ExecStart={script} wait ExecStop=/usr/bin/podman pod stop pod_%i [Install] WantedBy=default.target """ if os.access(os.path.dirname(fn), os.W_OK): print(f"writing [{fn}]: ...") with open(fn, "w", encoding="utf-8") as f: f.write(out) print(f"writing [{fn}]: done.") print( """ while in your project type `podman-compose systemd -a register` """ ) else: print(out) log(f"Could not write to [{fn}], use 'sudo'") @cmd_run(podman_compose, "pull", "pull stack images") def compose_pull(compose, args): img_containers = [cnt for cnt in compose.containers if "image" in cnt] if args.services: services = set(args.services) img_containers = [cnt for cnt in img_containers if cnt["_service"] in services] images = {cnt["image"] for cnt in img_containers} if not args.force_local: local_images = {cnt["image"] for cnt in img_containers if is_local(cnt)} images -= local_images for image in images: compose.podman.run([], "pull", [image], sleep=0) @cmd_run(podman_compose, "push", "push stack images") def compose_push(compose, args): services = set(args.services) for cnt in compose.containers: if "build" not in cnt: continue if services and cnt["_service"] not in services: continue compose.podman.run([], "push", [cnt["image"]], sleep=0) def build_one(compose, args, cnt): if "build" not in cnt: return if getattr(args, "if_not_exists", None): try: img_id = compose.podman.output( [], "inspect", ["-t", "image", "-f", "{{.Id}}", cnt["image"]] ) except subprocess.CalledProcessError: img_id = None if img_id: return build_desc = cnt["build"] if not hasattr(build_desc, "items"): build_desc = {"context": build_desc} ctx = build_desc.get("context", ".") dockerfile = build_desc.get("dockerfile", None) if dockerfile: dockerfile = os.path.join(ctx, dockerfile) else: dockerfile_alts = [ "Containerfile", "ContainerFile", "containerfile", "Dockerfile", "DockerFile", "dockerfile", ] for dockerfile in dockerfile_alts: dockerfile = os.path.join(ctx, dockerfile) if os.path.exists(dockerfile): break if not os.path.exists(dockerfile): raise OSError("Dockerfile not found in " + ctx) build_args = ["-f", dockerfile, "-t", cnt["image"]] for secret in build_desc.get("secrets", []): build_args.extend(get_secret_args(compose, cnt, secret)) for tag in build_desc.get("tags", []): build_args.extend(["-t", tag]) if "target" in build_desc: build_args.extend(["--target", build_desc["target"]]) container_to_ulimit_args(cnt, build_args) if getattr(args, "no_cache", None): build_args.append("--no-cache") if getattr(args, "pull_always", None): build_args.append("--pull-always") elif getattr(args, "pull", None): build_args.append("--pull") args_list = norm_as_list(build_desc.get("args", {})) for build_arg in args_list + args.build_arg: build_args.extend( ( "--build-arg", build_arg, ) ) build_args.append(ctx) compose.podman.run([], "build", build_args, sleep=0) @cmd_run(podman_compose, "build", "build stack images") def compose_build(compose, args): if args.services: container_names_by_service = compose.container_names_by_service compose.assert_services(args.services) for service in args.services: cnt = compose.container_by_name[container_names_by_service[service][0]] build_one(compose, args, cnt) else: for cnt in compose.containers: build_one(compose, args, cnt) def create_pods(compose, args): # pylint: disable=unused-argument for pod in compose.pods: podman_args = [ "create", "--name=" + pod["name"], ] if args.pod_args: podman_args.extend(shlex.split(args.pod_args)) # if compose.podman_version and not strverscmp_lt(compose.podman_version, "3.4.0"): # podman_args.append("--infra-name={}_infra".format(pod["name"])) ports = pod.get("ports", None) or [] if isinstance(ports, str): ports = [ports] for i in ports: podman_args.extend(["-p", str(i)]) compose.podman.run([], "pod", podman_args) def get_excluded(compose, args): excluded = set() if args.services: excluded = set(compose.services) for service in args.services: excluded -= compose.services[service]["_deps"] excluded.discard(service) log("** excluding: ", excluded) return excluded @cmd_run( podman_compose, "up", "Create and start the entire stack or some of its services" ) def compose_up(compose, args): proj_name = compose.project_name excluded = get_excluded(compose, args) if not args.no_build: # `podman build` does not cache, so don't always build build_args = argparse.Namespace(if_not_exists=(not args.build), **args.__dict__) compose.commands["build"](compose, build_args) hashes = ( compose.podman.output( [], "ps", [ "--filter", f"label=io.podman.compose.project={proj_name}", "-a", "--format", '{{ index .Labels "io.podman.compose.config-hash"}}', ], ) .decode("utf-8") .splitlines() ) diff_hashes = [i for i in hashes if i and i != compose.yaml_hash] if args.force_recreate or len(diff_hashes): log("recreating: ...") down_args = argparse.Namespace(**dict(args.__dict__, volumes=False)) compose.commands["down"](compose, down_args) log("recreating: done\n\n") # args.no_recreate disables check for changes (which is not implemented) podman_command = "run" if args.detach and not args.no_start else "create" create_pods(compose, args) for cnt in compose.containers: if cnt["_service"] in excluded: log("** skipping: ", cnt["name"]) continue podman_args = container_to_args(compose, cnt, detached=args.detach) subproc = compose.podman.run([], podman_command, podman_args) if podman_command == "run" and subproc and subproc.returncode: compose.podman.run([], "start", [cnt["name"]]) if args.no_start or args.detach or args.dry_run: return # TODO: handle already existing # TODO: if error creating do not enter loop # TODO: colors if sys.stdout.isatty() exit_code_from = args.__dict__.get("exit_code_from", None) if exit_code_from: args.abort_on_container_exit = True threads = [] max_service_length = 0 for cnt in compose.containers: curr_length = len(cnt["_service"]) max_service_length = ( curr_length if curr_length > max_service_length else max_service_length ) has_sed = os.path.isfile("/bin/sed") for i, cnt in enumerate(compose.containers): # Add colored service prefix to output by piping output through sed color_idx = i % len(compose.console_colors) color = compose.console_colors[color_idx] space_suffix = " " * (max_service_length - len(cnt["_service"]) + 1) log_formatter = "s/^/{}[{}]{}|\x1B[0m\\ /;".format( color, cnt["_service"], space_suffix ) log_formatter = ["sed", "-e", log_formatter] if has_sed else None if cnt["_service"] in excluded: log("** skipping: ", cnt["name"]) continue # TODO: remove sleep from podman.run obj = compose if exit_code_from == cnt["_service"] else None thread = Thread( target=compose.podman.run, args=[[], "start", ["-a", cnt["name"]]], kwargs={"obj": obj, "log_formatter": log_formatter}, daemon=True, name=cnt["name"], ) thread.start() threads.append(thread) time.sleep(1) while threads: to_remove = [] for thread in threads: thread.join(timeout=1.0) if not thread.is_alive(): to_remove.append(thread) if args.abort_on_container_exit: time.sleep(1) exit_code = ( compose.exit_code if compose.exit_code is not None else -1 ) sys.exit(exit_code) for thread in to_remove: threads.remove(thread) def get_volume_names(compose, cnt): proj_name = compose.project_name basedir = compose.dirname srv_name = cnt["_service"] ls = [] for volume in cnt.get("volumes", []): if is_str(volume): volume = parse_short_mount(volume, basedir) volume = fix_mount_dict(compose, volume, proj_name, srv_name) mount_type = volume["type"] if mount_type != "volume": continue volume_name = (volume.get("_vol", None) or {}).get("name", None) ls.append(volume_name) return ls @cmd_run(podman_compose, "down", "tear down entire stack") def compose_down(compose, args): excluded = get_excluded(compose, args) podman_args = [] timeout_global = getattr(args, "timeout", None) containers = list(reversed(compose.containers)) for cnt in containers: if cnt["_service"] in excluded: continue podman_stop_args = [*podman_args] timeout = timeout_global if timeout is None: timeout_str = cnt.get("stop_grace_period", None) or STOP_GRACE_PERIOD timeout = str_to_seconds(timeout_str) if timeout is not None: podman_stop_args.extend(["-t", str(timeout)]) compose.podman.run([], "stop", [*podman_stop_args, cnt["name"]], sleep=0) for cnt in containers: if cnt["_service"] in excluded: continue compose.podman.run([], "rm", [cnt["name"]], sleep=0) if args.remove_orphans: names = ( compose.podman.output( [], "ps", [ "--filter", f"label=io.podman.compose.project={compose.project_name}", "-a", "--format", "{{ .Names }}", ], ) .decode("utf-8") .splitlines() ) for name in names: compose.podman.run([], "stop", [*podman_args, name], sleep=0) for name in names: compose.podman.run([], "rm", [name], sleep=0) if args.volumes: vol_names_to_keep = set() for cnt in containers: if cnt["_service"] not in excluded: continue vol_names_to_keep.update(get_volume_names(compose, cnt)) log("keep", vol_names_to_keep) for volume_name in compose.podman.volume_ls(): if volume_name in vol_names_to_keep: continue compose.podman.run([], "volume", ["rm", volume_name]) if excluded: return for pod in compose.pods: compose.podman.run([], "pod", ["rm", pod["name"]], sleep=0) @cmd_run(podman_compose, "ps", "show status of containers") def compose_ps(compose, args): proj_name = compose.project_name if args.quiet is True: compose.podman.run( [], "ps", [ "-a", "--format", "{{.ID}}", "--filter", f"label=io.podman.compose.project={proj_name}", ], ) else: compose.podman.run( [], "ps", ["-a", "--filter", f"label=io.podman.compose.project={proj_name}"] ) @cmd_run( podman_compose, "run", "create a container similar to a service to run a one-off command", ) def compose_run(compose, args): create_pods(compose, args) compose.assert_services(args.service) container_names = compose.container_names_by_service[args.service] container_name = container_names[0] cnt = dict(compose.container_by_name[container_name]) deps = cnt["_deps"] if deps and not args.no_deps: up_args = argparse.Namespace( **dict( args.__dict__, detach=True, services=deps, # defaults no_build=False, build=None, force_recreate=False, no_start=False, no_cache=False, build_arg=[], ) ) compose.commands["up"](compose, up_args) # adjust one-off container options name0 = "{}_{}_tmp{}".format( compose.project_name, args.service, random.randrange(0, 65536) ) cnt["name"] = args.name or name0 if args.entrypoint: cnt["entrypoint"] = args.entrypoint if args.user: cnt["user"] = args.user if args.workdir: cnt["working_dir"] = args.workdir env = dict(cnt.get("environment", {})) if args.env: additional_env_vars = dict(map(lambda each: each.split("="), args.env)) env.update(additional_env_vars) cnt["environment"] = env if not args.service_ports: for k in ("expose", "publishall", "ports"): try: del cnt[k] except KeyError: pass if args.volume: # TODO: handle volumes volumes = clone(cnt.get("volumes", None) or []) volumes.extend(args.volume) cnt["volumes"] = volumes cnt["tty"] = not args.T if args.cnt_command is not None and len(args.cnt_command) > 0: cnt["command"] = args.cnt_command # can't restart and --rm if args.rm and "restart" in cnt: del cnt["restart"] # run podman podman_args = container_to_args(compose, cnt, args.detach) if not args.detach: podman_args.insert(1, "-i") if args.rm: podman_args.insert(1, "--rm") p = compose.podman.run([], "run", podman_args, sleep=0) sys.exit(p.returncode) @cmd_run(podman_compose, "exec", "execute a command in a running container") def compose_exec(compose, args): compose.assert_services(args.service) container_names = compose.container_names_by_service[args.service] container_name = container_names[args.index - 1] cnt = compose.container_by_name[container_name] podman_args = ["--interactive"] if args.privileged: podman_args += ["--privileged"] if args.user: podman_args += ["--user", args.user] if args.workdir: podman_args += ["--workdir", args.workdir] if not args.T: podman_args += ["--tty"] env = dict(cnt.get("environment", {})) if args.env: additional_env_vars = dict( map(lambda each: each.split("=") if "=" in each else (each, None), args.env) ) env.update(additional_env_vars) for name, value in env.items(): podman_args += ["--env", f"{name}" if value is None else f"{name}={value}"] podman_args += [container_name] if args.cnt_command is not None and len(args.cnt_command) > 0: podman_args += args.cnt_command p = compose.podman.run([], "exec", podman_args, sleep=0) sys.exit(p.returncode) def transfer_service_status(compose, args, action): # TODO: handle dependencies, handle creations container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() compose.assert_services(args.services) targets = [] for service in args.services: if service not in container_names_by_service: raise ValueError("unknown service: " + service) targets.extend(container_names_by_service[service]) if action in ["stop", "restart"]: targets = list(reversed(targets)) podman_args = [] timeout_global = getattr(args, "timeout", None) for target in targets: if action != "start": timeout = timeout_global if timeout is None: timeout_str = ( compose.container_by_name[target].get("stop_grace_period", None) or STOP_GRACE_PERIOD ) timeout = str_to_seconds(timeout_str) if timeout is not None: podman_args.extend(["-t", str(timeout)]) compose.podman.run([], action, podman_args + [target], sleep=0) @cmd_run(podman_compose, "start", "start specific services") def compose_start(compose, args): transfer_service_status(compose, args, "start") @cmd_run(podman_compose, "stop", "stop specific services") def compose_stop(compose, args): transfer_service_status(compose, args, "stop") @cmd_run(podman_compose, "restart", "restart specific services") def compose_restart(compose, args): transfer_service_status(compose, args, "restart") @cmd_run(podman_compose, "logs", "show logs from services") def compose_logs(compose, args): container_names_by_service = compose.container_names_by_service if not args.services and not args.latest: args.services = container_names_by_service.keys() compose.assert_services(args.services) targets = [] for service in args.services: targets.extend(container_names_by_service[service]) podman_args = [] if args.follow: podman_args.append("-f") if args.latest: podman_args.append("-l") if args.names: podman_args.append("-n") if args.since: podman_args.extend(["--since", args.since]) # the default value is to print all logs which is in podman = 0 and not # needed to be passed if args.tail and args.tail != "all": podman_args.extend(["--tail", args.tail]) if args.timestamps: podman_args.append("-t") if args.until: podman_args.extend(["--until", args.until]) for target in targets: podman_args.append(target) compose.podman.run([], "logs", podman_args) @cmd_run(podman_compose, "config", "displays the compose file") def compose_config(compose, args): if args.services: for service in compose.services: print(service) return print(compose.merged_yaml) @cmd_run(podman_compose, "port", "Prints the public port for a port binding.") def compose_port(compose, args): # TODO - deal with pod index compose.assert_services(args.service) containers = compose.container_names_by_service[args.service] container_ports = list( itertools.chain(*(compose.container_by_name[c]["ports"] for c in containers)) ) def _published_target(port_string): published, target = port_string.split(":")[-2:] return int(published), int(target) select_udp = args.protocol == "udp" published, target = None, None for p in container_ports: is_udp = p[-4:] == "/udp" if select_udp and is_udp: published, target = _published_target(p[-4:]) if not select_udp and not is_udp: published, target = _published_target(p) if target == args.private_port: print(published) return @cmd_run(podman_compose, "pause", "Pause all running containers") def compose_pause(compose, args): container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() targets = [] for service in args.services: targets.extend(container_names_by_service[service]) compose.podman.run([], "pause", targets) @cmd_run(podman_compose, "unpause", "Unpause all running containers") def compose_unpause(compose, args): container_names_by_service = compose.container_names_by_service if not args.services: args.services = container_names_by_service.keys() targets = [] for service in args.services: targets.extend(container_names_by_service[service]) compose.podman.run([], "unpause", targets) @cmd_run( podman_compose, "kill", "Kill one or more running containers with a specific signal" ) def compose_kill(compose, args): # to ensure that the user did not execute the command by mistake if not args.services and not args.all: print( "Error: you must provide at least one service name or use (--all) to kill all services" ) sys.exit() container_names_by_service = compose.container_names_by_service podman_args = [] if args.signal: podman_args.extend(["--signal", args.signal]) if args.all is True: services = container_names_by_service.keys() targets = [] for service in services: targets.extend(container_names_by_service[service]) for target in targets: podman_args.append(target) compose.podman.run([], "kill", podman_args) if args.services: targets = [] for service in args.services: targets.extend(container_names_by_service[service]) for target in targets: podman_args.append(target) compose.podman.run([], "kill", podman_args) ################### # command arguments parsing ################### @cmd_parse(podman_compose, "version") def compose_version_parse(parser): parser.add_argument( "-f", "--format", choices=["pretty", "json"], default="pretty", help="Format the output", ) parser.add_argument( "--short", action="store_true", help="Shows only Podman Compose's version number", ) @cmd_parse(podman_compose, "up") def compose_up_parse(parser): parser.add_argument( "-d", "--detach", action="store_true", help="Detached mode: Run container in the background, print new container name. Incompatible with --abort-on-container-exit.", ) parser.add_argument( "--no-color", action="store_true", help="Produce monochrome output." ) parser.add_argument( "--quiet-pull", action="store_true", help="Pull without printing progress information.", ) parser.add_argument( "--no-deps", action="store_true", help="Don't start linked services." ) parser.add_argument( "--force-recreate", action="store_true", help="Recreate containers even if their configuration and image haven't changed.", ) parser.add_argument( "--always-recreate-deps", action="store_true", help="Recreate dependent containers. Incompatible with --no-recreate.", ) parser.add_argument( "--no-recreate", action="store_true", help="If containers already exist, don't recreate them. Incompatible with --force-recreate and -V.", ) parser.add_argument( "--no-build", action="store_true", help="Don't build an image, even if it's missing.", ) parser.add_argument( "--no-start", action="store_true", help="Don't start the services after creating them.", ) parser.add_argument( "--build", action="store_true", help="Build images before starting containers." ) parser.add_argument( "--abort-on-container-exit", action="store_true", help="Stops all containers if any container was stopped. Incompatible with -d.", ) parser.add_argument( "-t", "--timeout", type=int, default=None, help="Use this timeout in seconds for container shutdown when attached or when containers are already running. (default: 10)", ) parser.add_argument( "-V", "--renew-anon-volumes", action="store_true", help="Recreate anonymous volumes instead of retrieving data from the previous containers.", ) parser.add_argument( "--remove-orphans", action="store_true", help="Remove containers for services not defined in the Compose file.", ) parser.add_argument( "--scale", metavar="SERVICE=NUM", action="append", help="Scale SERVICE to NUM instances. Overrides the `scale` setting in the Compose file if present.", ) parser.add_argument( "--exit-code-from", metavar="SERVICE", type=str, default=None, help="Return the exit code of the selected service container. Implies --abort-on-container-exit.", ) @cmd_parse(podman_compose, "down") def compose_down_parse(parser): parser.add_argument( "-v", "--volumes", action="store_true", default=False, help="Remove named volumes declared in the `volumes` section of the Compose file and " "anonymous volumes attached to containers.", ) parser.add_argument( "--remove-orphans", action="store_true", help="Remove containers for services not defined in the Compose file.", ) @cmd_parse(podman_compose, "run") def compose_run_parse(parser): parser.add_argument( "-d", "--detach", action="store_true", help="Detached mode: Run container in the background, print new container name.", ) parser.add_argument( "--name", type=str, default=None, help="Assign a name to the container" ) parser.add_argument( "--entrypoint", type=str, default=None, help="Override the entrypoint of the image.", ) parser.add_argument( "-e", "--env", metavar="KEY=VAL", action="append", help="Set an environment variable (can be used multiple times)", ) parser.add_argument( "-l", "--label", metavar="KEY=VAL", action="append", help="Add or override a label (can be used multiple times)", ) parser.add_argument( "-u", "--user", type=str, default=None, help="Run as specified username or uid" ) parser.add_argument( "--no-deps", action="store_true", help="Don't start linked services" ) parser.add_argument( "--rm", action="store_true", help="Remove container after run. Ignored in detached mode.", ) parser.add_argument( "-p", "--publish", action="append", help="Publish a container's port(s) to the host (can be used multiple times)", ) parser.add_argument( "--service-ports", action="store_true", help="Run command with the service's ports enabled and mapped to the host.", ) parser.add_argument( "-v", "--volume", action="append", help="Bind mount a volume (can be used multiple times)", ) parser.add_argument( "-T", action="store_true", help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.", ) parser.add_argument( "-w", "--workdir", type=str, default=None, help="Working directory inside the container", ) parser.add_argument("service", metavar="service", nargs=None, help="service name") parser.add_argument( "cnt_command", metavar="command", nargs=argparse.REMAINDER, help="command and its arguments", ) @cmd_parse(podman_compose, "exec") def compose_exec_parse(parser): parser.add_argument( "-d", "--detach", action="store_true", help="Detached mode: Run container in the background, print new container name.", ) parser.add_argument( "--privileged", action="store_true", default=False, help="Give the process extended Linux capabilities inside the container", ) parser.add_argument( "-u", "--user", type=str, default=None, help="Run as specified username or uid" ) parser.add_argument( "-T", action="store_true", help="Disable pseudo-tty allocation. By default `podman-compose run` allocates a TTY.", ) parser.add_argument( "--index", type=int, default=1, help="Index of the container if there are multiple instances of a service", ) parser.add_argument( "-e", "--env", metavar="KEY=VAL", action="append", help="Set an environment variable (can be used multiple times)", ) parser.add_argument( "-w", "--workdir", type=str, default=None, help="Working directory inside the container", ) parser.add_argument("service", metavar="service", nargs=None, help="service name") parser.add_argument( "cnt_command", metavar="command", nargs=argparse.REMAINDER, help="command and its arguments", ) @cmd_parse(podman_compose, ["down", "stop", "restart"]) def compose_parse_timeout(parser): parser.add_argument( "-t", "--timeout", help="Specify a shutdown timeout in seconds. ", type=int, default=None, ) @cmd_parse(podman_compose, ["logs"]) def compose_logs_parse(parser): parser.add_argument( "-f", "--follow", action="store_true", help="Follow log output. The default is false", ) parser.add_argument( "-l", "--latest", action="store_true", help="Act on the latest container podman is aware of", ) parser.add_argument( "-n", "--names", action="store_true", help="Output the container name in the log", ) parser.add_argument( "--since", help="Show logs since TIMESTAMP", type=str, default=None ) parser.add_argument( "-t", "--timestamps", action="store_true", help="Show timestamps." ) parser.add_argument( "--tail", help="Number of lines to show from the end of the logs for each " "container.", type=str, default="all", ) parser.add_argument( "--until", help="Show logs until TIMESTAMP", type=str, default=None ) parser.add_argument( "services", metavar="services", nargs="*", default=None, help="service names" ) @cmd_parse(podman_compose, "systemd") def compose_systemd_parse(parser): parser.add_argument( "-a", "--action", choices=["register", "create-unit", "list", "ls"], default="register", help="create systemd unit file or register compose stack to it", ) @cmd_parse(podman_compose, "pull") def compose_pull_parse(parser): parser.add_argument( "--force-local", action="store_true", default=False, help="Also pull unprefixed images for services which have a build section", ) parser.add_argument( "services", metavar="services", nargs="*", help="services to pull" ) @cmd_parse(podman_compose, "push") def compose_push_parse(parser): parser.add_argument( "--ignore-push-failures", action="store_true", help="Push what it can and ignores images with push failures. (not implemented)", ) parser.add_argument( "services", metavar="services", nargs="*", help="services to push" ) @cmd_parse(podman_compose, "ps") def compose_ps_parse(parser): parser.add_argument( "-q", "--quiet", help="Only display container IDs", action="store_true" ) @cmd_parse(podman_compose, ["build", "up"]) def compose_build_up_parse(parser): parser.add_argument( "--pull", help="attempt to pull a newer version of the image", action="store_true", ) parser.add_argument( "--pull-always", help="attempt to pull a newer version of the image, Raise an error even if the image is present locally.", action="store_true", ) parser.add_argument( "--build-arg", metavar="key=val", action="append", default=[], help="Set build-time variables for services.", ) parser.add_argument( "--no-cache", help="Do not use cache when building the image.", action="store_true", ) @cmd_parse(podman_compose, ["build", "up", "down", "start", "stop", "restart"]) def compose_build_parse(parser): parser.add_argument( "services", metavar="services", nargs="*", default=None, help="affected services", ) @cmd_parse(podman_compose, "config") def compose_config_parse(parser): parser.add_argument( "--services", help="Print the service names, one per line.", action="store_true" ) @cmd_parse(podman_compose, "port") def compose_port_parse(parser): parser.add_argument( "--index", type=int, default=1, help="index of the container if there are multiple instances of a service", ) parser.add_argument( "--protocol", choices=["tcp", "udp"], default="tcp", help="tcp or udp", ) parser.add_argument("service", metavar="service", nargs=None, help="service name") parser.add_argument( "private_port", metavar="private_port", nargs=None, type=int, help="private port", ) @cmd_parse(podman_compose, ["pause", "unpause"]) def compose_pause_unpause_parse(parser): parser.add_argument( "services", metavar="services", nargs="*", default=None, help="service names" ) @cmd_parse(podman_compose, ["kill"]) def compose_kill_parse(parser): parser.add_argument( "services", metavar="services", nargs="*", default=None, help="service names" ) parser.add_argument( "-s", "--signal", type=str, help="Signal to send to the container (default 'KILL')", ) parser.add_argument( "-a", "--all", help="Signal all running containers", action="store_true", ) def main(): podman_compose.run() if __name__ == "__main__": main() podman-compose-1.0.6/pytests/000077500000000000000000000000001441451546200161605ustar00rootroot00000000000000podman-compose-1.0.6/pytests/test_volumes.py000066400000000000000000000007161441451546200212670ustar00rootroot00000000000000import pytest from podman_compose import parse_short_mount @pytest.fixture def multi_propagation_mount_str(): return "/foo/bar:/baz:U,Z" def test_parse_short_mount_multi_propagation(multi_propagation_mount_str): expected = { "type": "bind", "source": "/foo/bar", "target": "/baz", "bind": { "propagation": "U,Z", }, } assert parse_short_mount(multi_propagation_mount_str, "/") == expected podman-compose-1.0.6/requirements.txt000066400000000000000000000003511441451546200177300ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pyyaml python-dotenv podman-compose-1.0.6/scripts/000077500000000000000000000000001441451546200161345ustar00rootroot00000000000000podman-compose-1.0.6/scripts/clean_up.sh000077500000000000000000000003641441451546200202640ustar00rootroot00000000000000#!/usr/bin/env bash find . -name "*.pyc" -delete find . -name "__pycache__" -delete find . -name "*.orig" -delete rm -rf .cache/ rm -rf build/ rm -rf builddocs/ rm -rf dist/ rm -rf deb_dist/ rm src/podman_compose.egg-info -rf rm builddocs.zip podman-compose-1.0.6/scripts/make_release.sh000077500000000000000000000002221441451546200211040ustar00rootroot00000000000000#!/usr/bin/env bash ./scripts/uninstall.sh ./scripts/clean_up.sh python3 setup.py register python3 setup.py sdist bdist_wheel twine upload dist/* podman-compose-1.0.6/scripts/uninstall.sh000077500000000000000000000001131441451546200204770ustar00rootroot00000000000000#!/usr/bin/env bash pip3 uninstall podman-compose -y ./scripts/clean_up.sh podman-compose-1.0.6/setup.cfg000066400000000000000000000001231441451546200162620ustar00rootroot00000000000000[bdist_wheel] universal = 1 [metadata] version = attr: podman_compose.__version__ podman-compose-1.0.6/setup.py000066400000000000000000000032431441451546200161610ustar00rootroot00000000000000import os from setuptools import setup try: readme = open(os.path.join(os.path.dirname(__file__), "README.md")).read() except: readme = "" setup( name="podman-compose", description="A script to run docker-compose.yml using podman", long_description=readme, long_description_content_type="text/markdown", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Intended Audience :: Developers", "Operating System :: OS Independent", "Development Status :: 3 - Alpha", "Topic :: Software Development :: Build Tools", "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", ], keywords="podman, podman-compose", author="Muayyad Alsadi", author_email="alsadi@gmail.com", url="https://github.com/containers/podman-compose", py_modules=["podman_compose"], entry_points={"console_scripts": ["podman-compose = podman_compose:main"]}, include_package_data=True, license="GPL-2.0-only", install_requires=[ "pyyaml", "python-dotenv", ], extras_require={ "devel": [ "flake8", "black", "pylint", "pre-commit", ] } # test_suite='tests', # tests_require=[ # 'coverage', # 'pytest-cov', # 'pytest', # 'tox', # ] ) podman-compose-1.0.6/test-requirements.txt000066400000000000000000000003701441451546200207060ustar00rootroot00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. coverage pytest-cov pytest tox black podman-compose-1.0.6/tests/000077500000000000000000000000001441451546200156075ustar00rootroot00000000000000podman-compose-1.0.6/tests/build/000077500000000000000000000000001441451546200167065ustar00rootroot00000000000000podman-compose-1.0.6/tests/build/README.md000066400000000000000000000007541441451546200201730ustar00rootroot00000000000000# Test podman-compose with build ``` podman-compose build podman-compose up -d curl http://localhost:8080/index.txt curl http://localhost:8000/index.txt podman inspect my-busybox-httpd2 podman-compose down ``` expected output would be something like ``` 2019-09-03T15:16:38+0000 ALT buildno=2 port 8000 2019-09-03T15:16:38+0000 { ... } ``` as you can see we were able to override buildno to be 2 instead of 1, and httpd_port to 8000. NOTE: build labels are not passed to `podman build` podman-compose-1.0.6/tests/build/context/000077500000000000000000000000001441451546200203725ustar00rootroot00000000000000podman-compose-1.0.6/tests/build/context/Dockerfile000066400000000000000000000002271441451546200223650ustar00rootroot00000000000000FROM busybox RUN mkdir -p /var/www/html/ && date -Iseconds > /var/www/html/index.txt CMD ["busybox", "httpd", "-f", "-p", "80", "-h", "/var/www/html"] podman-compose-1.0.6/tests/build/context/Dockerfile-alt000066400000000000000000000004661441451546200231500ustar00rootroot00000000000000FROM busybox ARG buildno=1 ARG httpd_port=80 ARG other_variable=not_set ENV httpd_port ${httpd_port} ENV other_variable ${other_variable} RUN mkdir -p /var/www/html/ && \ echo "ALT buildno=$buildno port=$httpd_port `date -Iseconds`" > /var/www/html/index.txt CMD httpd -f -p "$httpd_port" -h /var/www/html podman-compose-1.0.6/tests/build/docker-compose.yml000066400000000000000000000010131441451546200223360ustar00rootroot00000000000000version: "3" services: web1: build: ./context image: my-busybox-httpd ports: - 8080:80 web2: build: context: ./context dockerfile: Dockerfile-alt labels: mykey: myval args: buildno: 2 httpd_port: 8000 image: my-busybox-httpd2 ports: - 8000:8000 test_build_arg_argument: build: context: ./context dockerfile: Dockerfile-alt image: my-busybox-httpd2 command: env podman-compose-1.0.6/tests/deps/000077500000000000000000000000001441451546200165425ustar00rootroot00000000000000podman-compose-1.0.6/tests/deps/README.md000066400000000000000000000001321441451546200200150ustar00rootroot00000000000000 ``` podman-compose run --rm sleep /bin/sh -c 'wget -O - http://localhost:8000/hosts' ``` podman-compose-1.0.6/tests/deps/docker-compose.yaml000066400000000000000000000007621441451546200223450ustar00rootroot00000000000000version: "3.7" services: web: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/etc/", "-p", "8000"] tmpfs: - /run - /tmp sleep: image: busybox command: ["/bin/busybox", "sh", "-c", "sleep 3600"] depends_on: "web" tmpfs: - /run - /tmp sleep2: image: busybox command: ["/bin/busybox", "sh", "-c", "sleep 3600"] depends_on: - sleep tmpfs: - /run - /tmp podman-compose-1.0.6/tests/env-tests/000077500000000000000000000000001441451546200175375ustar00rootroot00000000000000podman-compose-1.0.6/tests/env-tests/README.md000066400000000000000000000001611441451546200210140ustar00rootroot00000000000000running the following command should give myval2 ``` podman_compose run -l monkey -e ZZVAR1=myval2 env-test ``` podman-compose-1.0.6/tests/env-tests/container-compose.yml000066400000000000000000000002031441451546200237020ustar00rootroot00000000000000version: '3' services: env-test: image: busybox command: sh -c "export | grep ZZ" environment: - ZZVAR1=myval1 podman-compose-1.0.6/tests/exit-from/000077500000000000000000000000001441451546200175215ustar00rootroot00000000000000podman-compose-1.0.6/tests/exit-from/README.md000066400000000000000000000003651441451546200210040ustar00rootroot00000000000000We have service named sh1 that exits with code 1 and sh2 that exists with code 2 ``` podman-compose up --exit-code-from=sh1 echo $? ``` the above should give 1. ``` podman-compose up --exit-code-from=sh2 echo $? ``` the above should give 2. podman-compose-1.0.6/tests/exit-from/docker-compose.yaml000066400000000000000000000006701441451546200233220ustar00rootroot00000000000000version: "3" services: too_long: image: busybox command: ["/bin/busybox", "sh", "-c", "sleep 3600; exit 0"] tmpfs: - /run - /tmp sh1: image: busybox command: ["/bin/busybox", "sh", "-c", "sleep 5; exit 1"] tmpfs: - /run - /tmp sh2: image: busybox command: ["/bin/busybox", "sh", "-c", "sleep 5; exit 2"] tmpfs: - /run - /tmp podman-compose-1.0.6/tests/extends/000077500000000000000000000000001441451546200172615ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends/docker-compose.yaml000066400000000000000000000011511441451546200230550ustar00rootroot00000000000000version: "3" services: echo: image: busybox command: ["/bin/busybox", "echo", "Zero"] ports: - '1234:1234' environment: - FOO=original - BAR=original # volumes: # - ./original:/foo # - ./original:/bar echo1: extends: service: echo command: ["/bin/busybox", "echo", "One"] ports: - '12345:12345' # volumes: # - ./local:/bar # - ./local:/baz env1: extends: service: echo command: ["/bin/busybox", "env"] environment: - BAR=local - BAZ=local podman-compose-1.0.6/tests/extends_w_file/000077500000000000000000000000001441451546200206065ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends_w_file/common-services.yml000066400000000000000000000001101441451546200244320ustar00rootroot00000000000000webapp: build: . ports: - "8000:8000" volumes: - "/data" podman-compose-1.0.6/tests/extends_w_file/docker-compose.yml000066400000000000000000000003131441451546200242400ustar00rootroot00000000000000version: "3" services: web: extends: file: common-services.yml service: webapp environment: - DEBUG=1 cpu_shares: 5 important_web: extends: web cpu_shares: 10 podman-compose-1.0.6/tests/extends_w_file_subdir/000077500000000000000000000000001441451546200221565ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends_w_file_subdir/docker-compose.yml000066400000000000000000000002041441451546200256070ustar00rootroot00000000000000version: "3" services: web: extends: file: sub/docker-compose.yml service: webapp environment: - DEBUG=1podman-compose-1.0.6/tests/extends_w_file_subdir/sub/000077500000000000000000000000001441451546200227475ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends_w_file_subdir/sub/docker-compose.yml000066400000000000000000000003101441451546200263760ustar00rootroot00000000000000version: "3" services: webapp: build: context: docker/example dockerfile: Dockerfile image: localhost/subdir_test:me ports: - "8000:8000" volumes: - "/data" podman-compose-1.0.6/tests/extends_w_file_subdir/sub/docker/000077500000000000000000000000001441451546200242165ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends_w_file_subdir/sub/docker/example/000077500000000000000000000000001441451546200256515ustar00rootroot00000000000000podman-compose-1.0.6/tests/extends_w_file_subdir/sub/docker/example/Dockerfile000066400000000000000000000000251441451546200276400ustar00rootroot00000000000000FROM busybox as base podman-compose-1.0.6/tests/interpolation/000077500000000000000000000000001441451546200204765ustar00rootroot00000000000000podman-compose-1.0.6/tests/interpolation/.env000066400000000000000000000000621441451546200212650ustar00rootroot00000000000000DOT_ENV_VARIABLE=This value is from the .env file podman-compose-1.0.6/tests/interpolation/docker-compose-colon-question-error.yml000066400000000000000000000003521441451546200302370ustar00rootroot00000000000000version: "3.7" services: variables: image: busybox command: ["/bin/busybox", "sh", "-c", "export | grep EXAMPLE"] environment: EXAMPLE_COLON_QUESTION_ERROR: ${NOT_A_VARIABLE:?Missing variable} podman-compose-1.0.6/tests/interpolation/docker-compose-question-error.yml000066400000000000000000000003431441451546200271270ustar00rootroot00000000000000version: "3.7" services: variables: image: busybox command: ["/bin/busybox", "sh", "-c", "export | grep EXAMPLE"] environment: EXAMPLE_QUESTION_ERROR: ${NOT_A_VARIABLE?Missing variable} podman-compose-1.0.6/tests/interpolation/docker-compose.yml000066400000000000000000000010241441451546200241300ustar00rootroot00000000000000version: "3.7" services: variables: image: busybox command: ["/bin/busybox", "sh", "-c", "export | grep EXAMPLE"] environment: EXAMPLE_VARIABLE: "Host user: $USER" EXAMPLE_BRACES: "Host user: ${USER}" EXAMPLE_COLON_DASH_DEFAULT: ${NOT_A_VARIABLE:-My default} EXAMPLE_DASH_DEFAULT: ${NOT_A_VARIABLE-My other default} EXAMPLE_DOT_ENV: $DOT_ENV_VARIABLE EXAMPLE_LITERAL: This is a $$literal EXAMPLE_EMPTY: $NOT_A_VARIABLE podman-compose-1.0.6/tests/multicompose/000077500000000000000000000000001441451546200203275ustar00rootroot00000000000000podman-compose-1.0.6/tests/multicompose/README.md000066400000000000000000000011331441451546200216040ustar00rootroot00000000000000# Multiple compose files to make sure we get results similar to ``` docker-compose -f d1/docker-compose.yml -f d2/docker-compose.yml up -d docker exec -ti d1_web1_1 sh -c 'set' docker exec -ti d1_web2_1 sh -c 'set' curl http://${d1_web1_1}:8001/index.txt curl http://${d1_web1_1}:8002/index.txt ``` we need to verify - project base directory and project name is `d1` - `var12='d1/12.env'` which means `enf_file` was appended not replaced (which means that we normalize to array before merge) - `var2='d1/2.env'` which means that paths inside `d2/docker-compose.yml` directory are relative to `d1` podman-compose-1.0.6/tests/multicompose/d1/000077500000000000000000000000001441451546200206335ustar00rootroot00000000000000podman-compose-1.0.6/tests/multicompose/d1/1.env000066400000000000000000000000161441451546200215020ustar00rootroot00000000000000var1=d1/1.env podman-compose-1.0.6/tests/multicompose/d1/12.env000066400000000000000000000000201441451546200215570ustar00rootroot00000000000000var12=d1/12.env podman-compose-1.0.6/tests/multicompose/d1/2.env000066400000000000000000000000161441451546200215030ustar00rootroot00000000000000var2=d1/2.env podman-compose-1.0.6/tests/multicompose/d1/docker-compose.yml000066400000000000000000000003661441451546200242750ustar00rootroot00000000000000version: '3' services: web1: image: busybox command: busybox httpd -h /var/www/html/ -f -p 8001 volumes: - ./1.env:/var/www/html/index.txt:z env_file: ./1.env labels: l1: v1 environment: - mykey1=myval1 podman-compose-1.0.6/tests/multicompose/d2/000077500000000000000000000000001441451546200206345ustar00rootroot00000000000000podman-compose-1.0.6/tests/multicompose/d2/12.env000066400000000000000000000000201441451546200215600ustar00rootroot00000000000000var12=d2/12.env podman-compose-1.0.6/tests/multicompose/d2/2.env000066400000000000000000000000161441451546200215040ustar00rootroot00000000000000var2=d2/2.env podman-compose-1.0.6/tests/multicompose/d2/docker-compose.yml000066400000000000000000000005221441451546200242700ustar00rootroot00000000000000version: '3' services: web1: image: busybox env_file: ./12.env labels: - l1=v2 - l2=v2 environment: mykey1: myval2 mykey2: myval2 web2: image: busybox command: busybox httpd -h /var/www/html/ -f -p 8002 volumes: - ./2.env:/var/www/html/index.txt:z env_file: ./2.env podman-compose-1.0.6/tests/nethost/000077500000000000000000000000001441451546200172735ustar00rootroot00000000000000podman-compose-1.0.6/tests/nethost/docker-compose.yaml000066400000000000000000000001601441451546200230660ustar00rootroot00000000000000version: '3' services: web: image: busybox command: httpd -f -p 8123 -h /etc/ network_mode: host podman-compose-1.0.6/tests/netprio/000077500000000000000000000000001441451546200172675ustar00rootroot00000000000000podman-compose-1.0.6/tests/netprio/docker-compose.yaml000066400000000000000000000004441441451546200230670ustar00rootroot00000000000000--- # https://github.com/compose-spec/compose-spec/blob/master/spec.md#priority services: app: image: busybox command: top networks: app_net_1: app_net_2: priority: 1000 app_net_3: priority: 100 networks: app_net_1: app_net_2: app_net_3: podman-compose-1.0.6/tests/nets_test1/000077500000000000000000000000001441451546200177005ustar00rootroot00000000000000podman-compose-1.0.6/tests/nets_test1/docker-compose.yml000066400000000000000000000010601441451546200233320ustar00rootroot00000000000000version: "3" services: web1: image: busybox hostname: web1 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8001:8001 volumes: - ./test1.txt:/var/www/html/index.txt:ro,z web2: image: busybox hostname: web2 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8002:8001 volumes: - ./test2.txt:/var/www/html/index.txt:ro,z podman-compose-1.0.6/tests/nets_test1/test1.txt000066400000000000000000000000061441451546200214750ustar00rootroot00000000000000test1 podman-compose-1.0.6/tests/nets_test1/test2.txt000066400000000000000000000000061441451546200214760ustar00rootroot00000000000000test2 podman-compose-1.0.6/tests/nets_test2/000077500000000000000000000000001441451546200177015ustar00rootroot00000000000000podman-compose-1.0.6/tests/nets_test2/docker-compose.yml000066400000000000000000000011071441451546200233350ustar00rootroot00000000000000version: "3" networks: mystack: services: web1: image: busybox hostname: web1 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8001:8001 volumes: - ./test1.txt:/var/www/html/index.txt:ro,z web2: image: busybox hostname: web2 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8002:8001 volumes: - ./test2.txt:/var/www/html/index.txt:ro,z podman-compose-1.0.6/tests/nets_test2/test1.txt000066400000000000000000000000061441451546200214760ustar00rootroot00000000000000test1 podman-compose-1.0.6/tests/nets_test2/test2.txt000066400000000000000000000000061441451546200214770ustar00rootroot00000000000000test2 podman-compose-1.0.6/tests/nets_test3/000077500000000000000000000000001441451546200177025ustar00rootroot00000000000000podman-compose-1.0.6/tests/nets_test3/docker-compose.yml000066400000000000000000000020611441451546200233360ustar00rootroot00000000000000version: "3" networks: net1: net2: services: web1: image: busybox #container_name: web1 hostname: web1 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html networks: - net1 ports: - 8001:8001 volumes: - ./test1.txt:/var/www/html/index.txt:ro,z web2: image: busybox #container_name: web2 hostname: web2 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html networks: - net1 - net2 ports: - 8002:8001 volumes: - ./test2.txt:/var/www/html/index.txt:ro,z web3: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html networks: net1: aliases: - alias11 - alias12 net2: aliases: - alias21 volumes: - ./test2.txt:/var/www/html/index.txt:ro,z podman-compose-1.0.6/tests/nets_test3/test1.txt000066400000000000000000000000061441451546200214770ustar00rootroot00000000000000test1 podman-compose-1.0.6/tests/nets_test3/test2.txt000066400000000000000000000000061441451546200215000ustar00rootroot00000000000000test2 podman-compose-1.0.6/tests/no_services/000077500000000000000000000000001441451546200201265ustar00rootroot00000000000000podman-compose-1.0.6/tests/no_services/docker-compose.yaml000066400000000000000000000001661441451546200237270ustar00rootroot00000000000000version: '3' networks: shared-network: driver: bridge ipam: config: - subnet: 172.19.0.0/24 podman-compose-1.0.6/tests/ports/000077500000000000000000000000001441451546200167565ustar00rootroot00000000000000podman-compose-1.0.6/tests/ports/docker-compose.yml000066400000000000000000000016201441451546200224120ustar00rootroot00000000000000version: "3" services: web1: image: busybox hostname: web1 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html ports: - 8001:8001 volumes: - ./test1.txt:/var/www/html/index.txt:ro,z web2: image: busybox hostname: web2 command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] working_dir: /var/www/html ports: - 8002:8002 - target: 8003 host_ip: 127.0.0.1 published: 8003 protocol: udp - target: 8004 host_ip: 127.0.0.1 published: 8004 protocol: tcp - target: 8005 published: 8005 - target: 8006 protocol: udp - target: 8007 host_ip: 127.0.0.1 volumes: - ./test2.txt:/var/www/html/index.txt:ro,z podman-compose-1.0.6/tests/ports/test1.txt000066400000000000000000000000061441451546200205530ustar00rootroot00000000000000test1 podman-compose-1.0.6/tests/ports/test2.txt000066400000000000000000000000061441451546200205540ustar00rootroot00000000000000test2 podman-compose-1.0.6/tests/seccomp/000077500000000000000000000000001441451546200172405ustar00rootroot00000000000000podman-compose-1.0.6/tests/seccomp/docker-compose.yml000066400000000000000000000003531441451546200226760ustar00rootroot00000000000000version: "3" services: web1: image: busybox command: httpd -f -p 80 -h /var/www/html volumes: - ./docker-compose.yml:/var/www/html/index.html ports: - "8080:80" security_opt: - seccomp:unconfined podman-compose-1.0.6/tests/secrets/000077500000000000000000000000001441451546200172575ustar00rootroot00000000000000podman-compose-1.0.6/tests/secrets/bad_external_name/000077500000000000000000000000001441451546200227075ustar00rootroot00000000000000podman-compose-1.0.6/tests/secrets/bad_external_name/docker-compose.yaml000066400000000000000000000004061441451546200265050ustar00rootroot00000000000000version: "3.8" services: test: image: busybox command: - cat - /run/secrets/new_secret tmpfs: - /run - /tmp secrets: - new_secret secrets: new_secret: external: true name: my_secret podman-compose-1.0.6/tests/secrets/bad_external_target/000077500000000000000000000000001441451546200232555ustar00rootroot00000000000000podman-compose-1.0.6/tests/secrets/bad_external_target/docker-compose.yaml000066400000000000000000000004261441451546200270550ustar00rootroot00000000000000version: "3.8" services: test: image: busybox command: - cat - /run/secrets/my_secret_2 tmpfs: - /run - /tmp secrets: - source: my_secret target: new_secret secrets: my_secret: external: true podman-compose-1.0.6/tests/secrets/docker-compose.yaml000066400000000000000000000017761441451546200230700ustar00rootroot00000000000000--- # echo "sec" | podman secret create my_secret - # echo "sec2" | podman secret create my_secret_2 - # echo "sec3" | podman secret create my_secret_3 - version: "3.8" services: test: image: busybox command: - /tmp/print_secrets.sh tmpfs: - /run - /tmp volumes: - ./print_secrets.sh:/tmp/print_secrets.sh:z secrets: - my_secret - my_secret_2 - source: my_secret_3 target: my_secret_3 uid: '103' gid: '103' mode: 400 - file_secret - source: file_secret target: custom_name - source: file_secret target: /etc/custom_location - source: file_secret target: unused_params_warning uid: '103' gid: '103' mode: 400 secrets: my_secret: external: true my_secret_2: external: true name: my_secret_2 my_secret_3: external: true name: my_secret_3 file_secret: file: ./my_secret podman-compose-1.0.6/tests/secrets/my_secret000066400000000000000000000000361441451546200211730ustar00rootroot00000000000000important-secret-is-important podman-compose-1.0.6/tests/secrets/print_secrets.sh000077500000000000000000000001511441451546200224770ustar00rootroot00000000000000#!/bin/sh ls -la /run/secrets/* ls -la /etc/custom_location cat /run/secrets/* cat /etc/custom_location podman-compose-1.0.6/tests/short/000077500000000000000000000000001441451546200167465ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/data/000077500000000000000000000000001441451546200176575ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/data/redis/000077500000000000000000000000001441451546200207655ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/data/redis/.keep000066400000000000000000000000001441451546200217000ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/data/web/000077500000000000000000000000001441451546200204345ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/data/web/.keep000066400000000000000000000000001441451546200213470ustar00rootroot00000000000000podman-compose-1.0.6/tests/short/docker-compose.yaml000066400000000000000000000022031441451546200225410ustar00rootroot00000000000000version: "3" services: redis: image: redis:alpine command: ["redis-server", "--appendonly yes", "--notify-keyspace-events", "Ex"] volumes: - ./data/redis:/data:z tmpfs: /run1 ports: - "6379" environment: - SECRET_KEY=aabbcc - ENV_IS_SET web: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8000"] working_dir: /var/www/html volumes: - /var/www/html tmpfs: - /run - /tmp web1: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] working_dir: /var/www/html volumes: - ./data/web:/var/www/html:ro,z web2: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] working_dir: /var/www/html volumes: - ~/Downloads/www:/var/www/html:ro,z web3: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8003"] working_dir: /var/www/html volumes: - /var/www/html:/var/www/html:ro,z podman-compose-1.0.6/tests/test_podman_compose.py000066400000000000000000000030241441451546200222220ustar00rootroot00000000000000from pathlib import Path import subprocess def capture(command): proc = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = proc.communicate() return out, err, proc.returncode def test_podman_compose_extends_w_file_subdir(): """ Test that podman-compose can execute podman-compose -f up with extended File which includes a build context :return: """ main_path = Path(__file__).parent.parent command_up = [ "python3", str(main_path.joinpath("podman_compose.py")), "-f", str(main_path.joinpath("tests", "extends_w_file_subdir", "docker-compose.yml")), "up", "-d", ] command_check_container = [ "podman", "container", "ps", "--all", "--format", '"{{.Image}}"', ] command_down = [ "podman", "rmi", "--force", "localhost/subdir_test:me", "docker.io/library/busybox", ] out, err, returncode = capture(command_up) assert 0 == returncode # check container was created and exists out, err, returncode = capture(command_check_container) assert 0 == returncode assert out == b'"localhost/subdir_test:me"\n' out, err, returncode = capture(command_down) # cleanup test image(tags) assert 0 == returncode # check container did not exists anymore out, err, returncode = capture(command_check_container) assert 0 == returncode assert out == b"" podman-compose-1.0.6/tests/testlogs/000077500000000000000000000000001441451546200174535ustar00rootroot00000000000000podman-compose-1.0.6/tests/testlogs/docker-compose.yml000066400000000000000000000004341441451546200231110ustar00rootroot00000000000000version: "3" services: loop1: image: busybox command: ["/bin/sh", "-c", "for i in `seq 1 10000`; do echo \"loop1: $$i\"; sleep 1; done"] loop2: image: busybox command: ["/bin/sh", "-c", "for i in `seq 1 10000`; do echo \"loop2: $$i\"; sleep 3; done"] podman-compose-1.0.6/tests/ulimit/000077500000000000000000000000001441451546200171125ustar00rootroot00000000000000podman-compose-1.0.6/tests/ulimit/Dockerfile000066400000000000000000000000561441451546200211050ustar00rootroot00000000000000FROM busybox COPY ./ulimit.sh /bin/ulimit.sh podman-compose-1.0.6/tests/ulimit/docker-compose.yaml000066400000000000000000000011311441451546200227040ustar00rootroot00000000000000version: "3" services: ulimit1: image: ulimit_test command: ["ulimit.sh" ] ulimits: nofile=1001 build: context: ./ dockerfile: Dockerfile ulimit2: image: ulimit_test command: ["ulimit.sh" ] ulimits: - nproc=1002:2002 - nofile=1002 build: context: ./ dockerfile: Dockerfile ulimit3: image: ulimit_test command: [ "ulimit.sh" ] ulimits: nofile: 1003 nproc: soft: 1003 hard: 2003 build: context: ./ dockerfile: Dockerfile podman-compose-1.0.6/tests/ulimit/ulimit.sh000077500000000000000000000002451441451546200207550ustar00rootroot00000000000000#!/bin/sh echo "soft process limit" ulimit -S -u echo "hard process limit" ulimit -H -u echo "soft nofile limit" ulimit -S -n echo "hard nofile limit" ulimit -H -n podman-compose-1.0.6/tests/vol/000077500000000000000000000000001441451546200164075ustar00rootroot00000000000000podman-compose-1.0.6/tests/vol/README.md000066400000000000000000000002231441451546200176630ustar00rootroot00000000000000# to test create the two external volumes ``` podman volume create my-app-data podman volume create actual-name-of-volume podman-compose up ``` podman-compose-1.0.6/tests/vol/docker-compose.yaml000066400000000000000000000022621441451546200222070ustar00rootroot00000000000000version: "3" services: web: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8000"] working_dir: /var/www/html restart: always volumes: - /var/www/html tmpfs: - /run - /tmp web1: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8001"] restart: unless-stopped working_dir: /var/www/html volumes: - myvol1:/var/www/html:ro,z web2: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8002"] working_dir: /var/www/html volumes: - myvol2:/var/www/html:ro web3: image: busybox command: ["/bin/busybox", "httpd", "-f", "-h", "/var/www/html", "-p", "8003"] working_dir: /var/www/html volumes: - myvol2:/var/www/html - data:/var/www/html_data - data2:/var/www/html_data2 - data3:/var/www/html_data3 volumes: myvol1: myvol2: labels: mylabel: myval data: name: my-app-data external: true data2: external: name: actual-name-of-volume data3: name: my-app-data3 podman-compose-1.0.6/tests/yamlmagic/000077500000000000000000000000001441451546200175525ustar00rootroot00000000000000podman-compose-1.0.6/tests/yamlmagic/docker-compose.yml000066400000000000000000000007771441451546200232220ustar00rootroot00000000000000version: '3.6' x-deploy-base: &deploy-base restart_policy: delay: 2s x-common: &common network: host deploy: <<: *deploy-base networks: hostnet: {} networks: hostnet: external: true name: host volumes: node-red_data: services: node-red: <<: *common image: busybox command: busybox httpd -h /data -f -p 8080 deploy: <<: *deploy-base resources: limits: cpus: '0.5' memory: 32M volumes: - node-red_data:/data