pax_global_header00006660000000000000000000000064147471142440014522gustar00rootroot0000000000000052 comment=54c625c380ef5500f17460981a3c67b109b6a847 mkosi-25.3/000077500000000000000000000000001474711424400125755ustar00rootroot00000000000000mkosi-25.3/.codespellrc000066400000000000000000000000531474711424400150730ustar00rootroot00000000000000[codespell] skip = ./.git,./docs/style.css mkosi-25.3/.dir-locals.el000066400000000000000000000015511474711424400152300ustar00rootroot00000000000000; Sets emacs variables based on mode. ; A list of (major-mode . ((var1 . value1) (var2 . value2))) ; Mode can be nil, which gives default values. ; Note that we set a wider line width source files, but for everything else we ; stick to a more conservative 79 characters. ; NOTE: Keep this file in sync with .editorconfig. ((python-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 109) (python-indent-def-block-scale . 1))) (python-ts-mode . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 109) (python-indent-def-block-scale . 1))) (sh-mode . ((sh-basic-offset . 4) (sh-indentation . 4))) (markdown-mode . ((fill-column . 109))) (nil . ((indent-tabs-mode . nil) (tab-width . 4) (fill-column . 79)))) mkosi-25.3/.editorconfig000066400000000000000000000003261474711424400152530ustar00rootroot00000000000000root = true [*] end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true charset = utf-8 indent_style = space indent_size = 4 [*.{py,md}] max_line_length = 109 [*.yaml,*.yml] indent_size = 2 mkosi-25.3/.github/000077500000000000000000000000001474711424400141355ustar00rootroot00000000000000mkosi-25.3/.github/ISSUE_TEMPLATE/000077500000000000000000000000001474711424400163205ustar00rootroot00000000000000mkosi-25.3/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000047641474711424400212260ustar00rootroot00000000000000name: Bug Report description: A report of an error in mkosi labels: ["bug"] body: - type: markdown attributes: value: Thanks for taking the time to fill out this bug report! - type: input id: version attributes: label: mkosi commit the issue has been seen with description: | Please do not submit bug reports against older releases, but use your distribution bug tracker. Please also test whether your bug has been already resolved on the current git main. placeholder: 'main' validations: required: true - type: input id: hostdistro attributes: label: Used host distribution description: Used distribution on the host (or in the tools tree) and its version placeholder: Fedora 39 validations: required: false - type: input id: targetdistro attributes: label: Used target distribution description: Used distribution for the image and its version placeholder: Fedora 39 validations: required: false - type: input id: kernel attributes: label: Linux kernel version used description: | Please use `uname -r` to get linux kernel version. placeholder: kernel-6.6.8-200.fc39.x86_64 validations: required: false - type: dropdown id: architecture attributes: label: CPU architectures issue was seen on options: - aarch64 - alpha - arm - i686 - ia64 - loongarch - mips - parisc - ppc (big endian) - ppc64 (big endian) - ppc64le - riscv64 - s390x - sparc - sparc64 - x86_64 - other validations: required: false - type: textarea id: unexpected-behaviour attributes: label: Unexpected behaviour you saw validations: required: false - type: textarea id: config attributes: label: Used mkosi config description: | Please add a, preferably minimised, mkosi config to reproduce the issue here. placeholder: This will be automatically formatted into code, so no need for backticks. render: ini validations: required: false - type: textarea id: logs attributes: label: mkosi output description: | Please paste the full mkosi debug output here. placeholder: This will be automatically formatted into code, so no need for backticks. render: sh validations: required: false mkosi-25.3/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000004251474711424400203110ustar00rootroot00000000000000--- # vi: ts=2 sw=2 et: # SPDX-License-Identifier: LGPL-2.1-or-later blank_issues_enabled: true contact_links: - name: mkosi Matrix room url: https://matrix.to/#/#mkosi:matrix.org about: Please ask (and answer) questions here, use the issue tracker only for issues. mkosi-25.3/.github/dependabot.yml000066400000000000000000000003051474711424400167630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "monthly" open-pull-requests-limit: 2 mkosi-25.3/.github/workflows/000077500000000000000000000000001474711424400161725ustar00rootroot00000000000000mkosi-25.3/.github/workflows/ci.yml000066400000000000000000000153721474711424400173200ustar00rootroot00000000000000name: CI on: pull_request: branches: - main jobs: unit-test: runs-on: ubuntu-24.04 concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: Install run: | sudo apt-get install --assume-yes --no-install-recommends pandoc python3-pytest shellcheck python3 -m pip install --break-system-packages --upgrade setuptools wheel pip python3 -m pip install --break-system-packages codespell mypy reuse ruff npm install -g pyright - name: Run ruff check run: | ruff --version ruff check mkosi/ tests/ kernel-install/*.install - name: Run ruff format run: | ruff --version if ! ruff format --check --quiet mkosi/ tests/ kernel-install/*.install then echo "Please run 'ruff format' on the above files or apply the diffs below manually" ruff format --check --quiet --diff mkosi/ tests/ kernel-install/*.install fi - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' - name: Spell Checking (codespell) run: | codespell --version codespell - name: License Checking (reuse) run: | reuse --version reuse lint - name: Type Checking (mypy) run: | python3 -m mypy --version python3 -m mypy mkosi/ tests/ kernel-install/*.install - name: Type Checking (pyright) run: | pyright --version pyright mkosi/ tests/ kernel-install/*.install - name: Unit Tests run: | python3 -m pytest --version python3 -m pytest -sv tests/ - name: Test execution from current working directory run: python3 -m mkosi -h - name: Test execution from current working directory (sudo call) run: sudo python3 -m mkosi -h - name: Test venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install . testvenv/bin/mkosi -h rm -rf testvenv - name: Test editable venv installation run: | python3 -m venv testvenv testvenv/bin/python3 -m pip install --upgrade setuptools wheel pip testvenv/bin/python3 -m pip install --editable . testvenv/bin/mkosi -h rm -rf testvenv - name: Test zipapp creation run: | ./tools/generate-zipapp.sh ./builddir/mkosi -h ./builddir/mkosi documentation - name: Run shellcheck on scripts run: | bash -c 'shopt -s globstar; shellcheck bin/mkosi tools/*.sh' bin/mkosi completion bash | shellcheck - - name: Test man page generation run: tools/make-man-page.sh integration-test: runs-on: ubuntu-24.04 needs: unit-test concurrency: group: ${{ github.workflow }}-${{ matrix.distro }}-${{ matrix.tools }}-${{ github.ref }} cancel-in-progress: true strategy: fail-fast: false matrix: distro: - arch - centos - debian - fedora - opensuse - ubuntu tools: - arch - centos - debian - fedora - opensuse - ubuntu exclude: # pacman is not packaged in EPEL. - distro: arch tools: centos # apt and debian-keyring are not packaged in EPEL. - distro: debian tools: centos - distro: ubuntu tools: centos # pacman is not packaged in openSUSE. - distro: arch tools: opensuse # apt, debian-keyring and ubuntu-keyring are not packaged in openSUSE. - distro: debian tools: opensuse - distro: ubuntu tools: opensuse steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - uses: ./ # Freeing up disk space with rm -rf can take multiple minutes. Since we don't need the extra free space # immediately, we remove the files in the background. However, we first move them to a different location so that # nothing tries to use anything in these directories anymore while we're busy deleting them. - name: Free disk space run: | sudo mv /usr/local /usr/local.trash sudo mv /opt/hostedtoolcache /opt/hostedtoolcache.trash sudo systemd-run rm -rf /usr/local.trash /opt/hostedtoolcache.trash # Make sure the latest changes from the pull request are used. - name: Install run: sudo ln -svf $PWD/bin/mkosi /usr/bin/mkosi working-directory: ./ - name: Configure run: | tee mkosi.local.conf <&2 cat EOF chmod +x mkosi.configure # prepare and postinst are already used in CI for script in sync build finalize postoutput clean do [[ -f "mkosi.${script}" ]] && exit 1 tee "mkosi.${script}" <&2 TOK chmod +x "mkosi.${script}" done - name: Build tools tree run: sudo mkosi -f sandbox true - name: Build image run: sudo mkosi --distribution ${{ matrix.distro }} -f - name: Run integration tests run: | sudo mkosi sandbox \ timeout -k 30 1h \ python3 -m pytest \ --tb=no \ --capture=no \ --verbose \ -m integration \ --distribution ${{ matrix.distro }} \ tests/ mkosi-25.3/.github/workflows/codeql.yml000066400000000000000000000020321474711424400201610ustar00rootroot00000000000000--- # vi: ts=2 sw=2 et: # name: "CodeQL" on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-24.04 concurrency: group: ${{ github.workflow }}-${{ matrix.language }}-${{ github.ref }} cancel-in-progress: true permissions: actions: read security-events: write strategy: fail-fast: false matrix: language: ['python'] steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: Initialize CodeQL uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 with: languages: ${{ matrix.language }} queries: +security-extended,security-and-quality - name: Autobuild uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 mkosi-25.3/.github/workflows/differential-shellcheck.yml000066400000000000000000000012421474711424400234530ustar00rootroot00000000000000--- # https://github.com/redhat-plumbers-in-action/differential-shellcheck#readme name: Differential ShellCheck on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: lint: runs-on: ubuntu-24.04 permissions: security-events: write steps: - name: Repository checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: fetch-depth: 0 - name: Differential ShellCheck uses: redhat-plumbers-in-action/differential-shellcheck@cc6721c45a8800cc666de45493545a07a638d121 with: token: ${{ secrets.GITHUB_TOKEN }} mkosi-25.3/.gitignore000066400000000000000000000006401474711424400145650ustar00rootroot00000000000000.venv *.cache-pre-dev *.cache-pre-inst .cache .mkosi.1 .mkosi-addon.1 .mkosi-initrd.1 .mkosi-sandbox.1 .mypy_cache/ .project .pydevproject .pytest_cache/ /.mkosi-* /SHA256SUMS /SHA256SUMS.gpg /build /dist /mkosi.build /mkosi.egg-info /mkosi.cache /mkosi.output /mkosi.nspawn /mkosi.rootpw mkosi.local mkosi.local.conf mkosi.tools /mkosi.key /mkosi.crt __pycache__ blog/output blog/pelicanconf.py blog/publishconf.py mkosi-25.3/.mailmap000066400000000000000000000001461474711424400142170ustar00rootroot00000000000000Jörg Behrmann Neal Gompa (ニール・ゴンパ) mkosi-25.3/LICENSES/000077500000000000000000000000001474711424400140025ustar00rootroot00000000000000mkosi-25.3/LICENSES/GPL-2.0-only.txt000066400000000000000000000431031474711424400164420ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. mkosi-25.3/LICENSES/LGPL-2.1-or-later.txt000066400000000000000000000636421474711424400173350ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! mkosi-25.3/LICENSES/OFL-1.1.txt000066400000000000000000000105721474711424400154650ustar00rootroot00000000000000Copyright 2014 The Heebo Project Authors (https://github.com/OdedEzer/heebo) This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is copied below, and is also available with a FAQ at: https://scripts.sil.org/OFL ----------------------------------------------------------- SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 ----------------------------------------------------------- PREAMBLE The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. DEFINITIONS "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. "Reserved Font Name" refers to any names specified as such after the copyright statement(s). "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. PERMISSION & CONDITIONS Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. TERMINATION This license becomes null and void if any of the above conditions are not met. DISCLAIMER THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. mkosi-25.3/LICENSES/PSF-2.0.txt000066400000000000000000000045731474711424400155010ustar00rootroot00000000000000PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated documentation. 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019 Python Software Foundation; All Rights Reserved" are retained in Python alone or in any derivative version prepared by Licensee. 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python. 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement. mkosi-25.3/MANIFEST.in000066400000000000000000000000201474711424400143230ustar00rootroot00000000000000include LICENSE mkosi-25.3/NEWS.md000077700000000000000000000000001474711424400223612mkosi/resources/man/mkosi.news.7.mdustar00rootroot00000000000000mkosi-25.3/README.md000066400000000000000000000135131474711424400140570ustar00rootroot00000000000000# mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. For a longer description and available features and options, see the [man page](mkosi/resources/man/mkosi.1.md). Packaging status # Installation You can install mkosi from your distribution using its package manager or install the development version from git. If you install mkosi using your distribution's package manager, make sure it installs at least mkosi v16 or newer (Use `mkosi --version` to check). If your distribution only packages an older version of mkosi, it is recommended to install mkosi using one of the alternative installation methods listed below instead. ## Running mkosi from the repository To run mkosi straight from its git repository, you can invoke the shim `bin/mkosi`. The `MKOSI_INTERPRETER` environment variable can be set when using the `bin/mkosi` shim to configure the python interpreter used to execute mkosi. The shim can be symlinked to e.g. `~/.local/bin` to make it accessible from the `PATH`. Note that to make this work you might have to add `~/.local/bin` to your user's `PATH`. ```shell git clone https://github.com/systemd/mkosi ln -s $PWD/mkosi/bin/mkosi ~/.local/bin/mkosi mkosi --version ``` ## Python installation methods mkosi can also be installed straight from the git repository url using `pipx`: ```shell pipx install git+https://github.com/systemd/mkosi.git mkosi --version ``` which will transparently install mkosi into a Python virtual environment and a mkosi binary to `~/.local/bin`. This is, up to the path of the virtual environment and the mkosi binary, equivalent to ```shell python3 -m venv mkosivenv mkosivenv/bin/pip install git+https://github.com/systemd/mkosi.git mkosivenv/bin/mkosi --version ``` You can also package mkosi as a [zipapp](https://docs.python.org/3/library/zipapp.html) that you can deploy anywhere in your `PATH`. Running this will leave a `mkosi` binary in `builddir/` ```shell git clone https://github.com/systemd/mkosi cd mkosi tools/generate-zipapp.sh builddir/mkosi --version ``` Besides the mkosi binary, you can also call mkosi via ```shell python3 -m mkosi ``` when not installed as a zipapp. Please note, that the python module exists solely for the usage of the mkosi binary and is not to be considered a public API. ## kernel-install plugins mkosi can also be used as a kernel-install plugin to build initrds and addons. It is recommended to use only one of these two plugins at a given time. ### UKI plugin To enable this feature, install `kernel-install/50-mkosi.install` into `/usr/lib/kernel/install.d`. Extra distro configuration for the initrd can be configured in `/usr/lib/mkosi-initrd`. Users can add their own customizations in `/etc/mkosi-initrd`. A full self-contained UKI will be built and installed. Once installed, the mkosi plugin can be enabled by writing `initrd_generator=mkosi-initrd` and `layout=uki` to `/usr/lib/kernel/install.conf` or to `/etc/kernel/install.conf`. ### Addon plugin To enable this feature, install `kernel-install/51-mkosi-addon.install` into `/usr/lib/kernel/install.d`. Extra distro configuration for the addon can be configured in `/usr/lib/mkosi-addon`. Users can add their own customizations in `/etc/mkosi-addon` and `/run/mkosi-addon`. Note that unless at least one of the last two directories are present, the plugin will not operate. This plugin is useful to enhance a vendor-provided UKI with local-only modifications. # Hacking on mkosi To hack on mkosi itself you will also need [mypy](https://github.com/python/mypy), for type checking, and [pytest](https://github.com/pytest-dev/pytest), to run tests. We check tests and typing in CI (see `.github/workflows`), but you can run the tests locally as well. # References * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [A re-introduction to mkosi — A Tool for Generating OS Images](https://0pointer.net/blog/a-re-introduction-to-mkosi-a-tool-for-generating-os-images.html) * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN (2017) * [systemd-repart: Building Discoverable Disk Images](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) and [mkosi: Building Bespoke Operating System Images](https://media.ccc.de/v/all-systems-go-2023-190-mkosi-building-bespoke-operating-system-images) talks at All Systems Go! 2023 * [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) an article in Fedora Magazine (2023) * [Building USIs with mkosi](https://overhead.neocities.org/blog/build-usi-mkosi/) * [Constellation 💖 mkosi — Minimal TCB, tailor-made for measured boot](https://www.edgeless.systems/blog/constellation-mkosi-minimal-tcb-tailor-made-for-measured-boot/) * [Streamlining kernel hacking with mkosi-kernel](https://video.fosdem.org/2024/ub5132/fosdem-2024-2209-streamlining-kernel-hacking-with-mkosi-kernel.av1.webm) * [mkosi-initrd: Building initrds out of distribution packages](https://video.fosdem.org/2024/ua2118/fosdem-2024-2888-mkosi-initrd-building-initrds-out-of-distribution-packages.av1.webm) * [Running systemd integration tests with mkosi](https://video.fosdem.org/2024/ud2208/fosdem-2024-3431-running-systemd-integration-tests-with-mkosi.av1.webm) * [Arch Linux rescue image with mkosi](https://swsnr.de/archlinux-rescue-image-with-mkosi) * [Building vagrant images with mkosi](https://vdwaa.nl/mkosi-vagrant-images.html#mkosi-vagrant-images) ## Community Find us on Matrix at [#mkosi:matrix.org](https://matrix.to/#/#mkosi:matrix.org). mkosi-25.3/REUSE.toml000066400000000000000000000031161474711424400143560ustar00rootroot00000000000000# NOTE: This project does not attribute contributors individually. Instead refer to `git log --format="%an <%aE>" | sort -u` for a list of individual contributors. version = 1 SPDX-PackageName = "mkosi" SPDX-PackageSupplier = "systemd" SPDX-PackageDownloadLocation = "https://github.com/systemd/mkosi" [[annotations]] path = [ ".codespellrc", ".dir-locals.el", ".editorconfig", "bin/mkosi", "docs/CNAME", "**.gitignore", "**.bash", "**.chroot", "**.conf", "**.css", "**.html", "**.in", "**.install", "**.json", "**.mailmap", "**.md", "**.png", "**.postinst", "**.prepare", "**.preset", "**.py", "**.service", "**.sources", "**.sh", "**.svg", "**.toml", "**.yaml", "**.yml", "**.zsh", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "LGPL-2.1-or-later" [[annotations]] path = [ "mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-md.rules", "mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "GPL-2.0-only" [[annotations]] path = [ "mkosi/backport.py", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "PSF-2.0" [[annotations]] path = [ "docs/fonts/heebo-bold.woff", "docs/fonts/heebo-regular.woff", ] precedence = "aggregate" SPDX-FileCopyrightText = "Mkosi Contributors" SPDX-License-Identifier = "OFL-1.1" mkosi-25.3/action.yaml000066400000000000000000000061301474711424400147360ustar00rootroot00000000000000name: setup-mkosi description: Install mkosi runs: using: composite steps: - name: Permit unprivileged access to kvm, vhost-vsock and vhost-net devices shell: bash run: | sudo mkdir -p /etc/tmpfiles.d sudo cp /usr/lib/tmpfiles.d/static-nodes-permissions.conf /etc/tmpfiles.d/ sudo sed -i '/kvm/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo sed -i '/vhost/s/0660/0666/g' /etc/tmpfiles.d/static-nodes-permissions.conf sudo tee /etc/udev/rules.d/99-kvm4all.rules <<- EOF KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm" KERNEL=="vhost-vsock", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-vsock" KERNEL=="vhost-net", GROUP="kvm", MODE="0666", OPTIONS+="static_node=vhost-net" EOF sudo udevadm control --reload-rules sudo modprobe kvm sudo modprobe vhost_vsock sudo modprobe vhost_net [[ -e /dev/kvm ]] && sudo udevadm trigger --name-match=kvm sudo udevadm trigger --name-match=vhost-vsock sudo udevadm trigger --name-match=vhost-net [[ -e /dev/kvm ]] && sudo chmod 666 /dev/kvm sudo chmod 666 /dev/vhost-vsock sudo chmod 666 /dev/vhost-net lsmod [[ -e /dev/kvm ]] && ls -l /dev/kvm ls -l /dev/vhost-* id - name: Check clock source shell: bash run: cat /sys/devices/system/clocksource/clocksource0/current_clocksource - name: Show environment shell: bash run: env - name: Show CPU shell: bash run: lscpu - name: Show memory shell: bash run: lsmem - name: Enable unprivileged user namespaces shell: bash run: | sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_unconfined=0 sudo sysctl --ignore --write kernel.apparmor_restrict_unprivileged_userns=0 - name: Create missing mountpoints shell: bash run: | for p in /etc/pki /etc/ssl /etc/ca-certificates /var/lib/ca-certificates /etc/crypto-policies; do if [[ ! -e "$p" ]]; then sudo mkdir -p "$p" fi done # Both the unix-chkpwd and swtpm profiles are broken (https://gitlab.com/apparmor/apparmor/-/issues/402) so let's # just disable and remove apparmor completely. It's not relevant in this context anyway. # TODO: Remove if https://github.com/actions/runner-images/issues/10015 is ever fixed. - name: Disable and mask apparmor service shell: bash run: | # This command fails with a non-zero error code even though it unloads the apparmor profiles. # https://gitlab.com/apparmor/apparmor/-/issues/403 sudo aa-teardown || true sudo apt-get remove apparmor - name: Install shell: bash run: sudo ln -svf ${{ github.action_path }}/bin/mkosi /usr/bin/mkosi - name: Dependencies shell: bash run: | sudo apt-get install --assume-yes --no-install-recommends \ debian-archive-keyring \ dnf \ makepkg \ pacman-package-manager \ zypper mkosi-25.3/bin/000077500000000000000000000000001474711424400133455ustar00rootroot00000000000000mkosi-25.3/bin/mkosi000077500000000000000000000013531474711424400144170ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e PYTHONPATH="$(dirname "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")" export PYTHONPATH command="$(basename "${BASH_SOURCE[0]//-/.}")" if [ -z "$MKOSI_INTERPRETER" ]; then # Note the check seems to be inverted here because the if branch is # executed when the exit status is 0 which is equal to False in Python. if python3 -c "import sys; sys.exit(sys.version_info < (3, 9))"; then MKOSI_INTERPRETER=python3 elif command -v python3.9 >/dev/null; then MKOSI_INTERPRETER=python3.9 else echo "mkosi needs python 3.9 or newer (found $(python3 --version))" exit 1 fi fi exec "$MKOSI_INTERPRETER" -B -m "$command" "$@" mkosi-25.3/bin/mkosi-addon000077700000000000000000000000001474711424400165352mkosiustar00rootroot00000000000000mkosi-25.3/bin/mkosi-initrd000077700000000000000000000000001474711424400167412mkosiustar00rootroot00000000000000mkosi-25.3/bin/mkosi-sandbox000077700000000000000000000000001474711424400171062mkosiustar00rootroot00000000000000mkosi-25.3/blog/000077500000000000000000000000001474711424400135205ustar00rootroot00000000000000mkosi-25.3/blog/content/000077500000000000000000000000001474711424400151725ustar00rootroot00000000000000mkosi-25.3/blog/content/a-reintroduction-to-mkosi.md000066400000000000000000000366561474711424400225620ustar00rootroot00000000000000Title: A re-introduction to mkosi -- A Tool for Generating OS Images Date: 2024-01-10 > This is a guest post written by Daan De Meyer, systemd and mkosi > maintainer Almost 7 years ago, Lennart first [wrote](https://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) about `mkosi` on this blog. Some years ago, I took over development and there's been a huge amount of changes and improvements since then. So I figure this is a good time to re-introduce `mkosi`. [`mkosi`](https://github.com/systemd/mkosi) stands for *Make Operating System Image*. It generates OS images that can be used for a variety of purposes. If you prefer watching a video over reading a blog post, you can also watch my [presentation](https://www.youtube.com/watch?v=6EelcbjbUa8) on `mkosi` at All Systems Go 2023. ## What is mkosi? `mkosi` was originally written as a tool to simplify hacking on systemd and for experimenting with images using many of the new concepts being introduced in systemd at the time. In the meantime, it has evolved into a general purpose image builder that can be used in a multitude of scenarios. Instructions to install `mkosi` can be found in its [readme](https://github.com/systemd/mkosi/blob/main/README.md). We recommend running the latest version to take advantage of all the latest features and bug fixes. You'll also need `bubblewrap` and the package manager of your favorite distribution to get started. At its core, the workflow of `mkosi` can be divided into 3 steps: 1. Generate an OS tree for some distribution by installing a set of packages. 2. Package up that OS tree in a variety of output formats. 3. (Optionally) Boot the resulting image in `qemu` or `systemd-nspawn`. Images can be built for any of the following distributions: - Fedora Linux - Ubuntu - OpenSUSE - Debian - Arch Linux - CentOS Stream - RHEL - Rocky Linux - Alma Linux And the following output formats are supported: - GPT disk images built with `systemd-repart` - Tar archives - CPIO archives (for building initramfs images) - USIs (Unified System Images which are full OS images packed in a UKI) - Sysext, confext and portable images - Directory trees For example, to build an Arch Linux GPT disk image and boot it in `qemu`, you can run the following command: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk qemu ``` To instead boot the image in systemd-nspawn, replace `qemu` with `boot`: ```sh $ mkosi -d arch -p systemd -p udev -p linux -t disk boot ``` The actual image can be found in the current working directory named `image.raw`. However, using a separate output directory is recommended which is as simple as running `mkdir mkosi.output`. To rebuild the image after it's already been built once, add `-f` to the command line before the verb to rebuild the image. Any arguments passed after the verb are forwarded to either `systemd-nspawn` or `qemu` itself. To build the image without booting it, pass `build` instead of `boot` or `qemu` or don't pass a verb at all. By default, the disk image will have an appropriately sized root partition and an ESP partition, but the partition layout and contents can be fully customized using `systemd-repart` by creating partition definition files in `mkosi.repart/`. This allows you to customize the partition as you see fit: - The root partition can be encrypted. - Partition sizes can be customized. - Partitions can be protected with signed dm-verity. - You can opt out of having a root partition and only have a /usr partition instead. - You can add various other partitions, e.g. an XBOOTLDR partition or a swap partition. - ... As part of building the image, we'll run various tools such as `systemd-sysusers`, `systemd-firstboot`, `depmod`, `systemd-hwdb` and more to make sure the image is set up correctly. ## Configuring mkosi image builds Naturally with extended use you don't want to specify all settings on the command line every time, so `mkosi` supports configuration files where the same settings that can be specified on the command line can be written down. For example, the command we used above can be written down in a configuration file `mkosi.conf`: ```conf [Distribution] Distribution=arch [Output] Format=disk [Content] Packages= systemd udev linux ``` Like systemd, `mkosi` uses INI configuration files. We also support dropins which can be placed in `mkosi.conf.d`. Configuration files can also be conditionalized using the `[Match]` section. For example, to only install a specific package on Arch Linux, you can write the following to `mkosi.conf.d/10-arch.conf`: ```conf [Match] Distribution=arch [Content] Packages=pacman ``` Because not everything you need will be supported in `mkosi`, we support running scripts at various points during the image build process where all extra image customization can be done. For example, if it is found, `mkosi.postinst` is called after packages have been installed. Scripts are executed on the host system by default (in a sandbox), but can be executed inside the image by suffixing the script with `.chroot`, so if `mkosi.postinst.chroot` is found it will be executed inside the image. To add extra files to the image, you can place them in `mkosi.extra` in the source directory and they will be automatically copied into the image after packages have been installed. ## Bootable images If the necessary packages are installed, `mkosi` will automatically generate a UEFI/BIOS bootable image. As `mkosi` is a systemd project, it will always build [UKIs](https://uapi-group.org/specifications/specs/unified_kernel_image/) (Unified Kernel Images), except if the image is BIOS-only (since UKIs cannot be used on BIOS). The initramfs is built like a regular image by installing distribution packages and packaging them up in a CPIO archive instead of a disk image. Specifically, we do not use `dracut`, `mkinitcpio` or `initramfs-tools` to generate the initramfs from the host system. `ukify` is used to assemble all the individual components into a UKI. If you don't want `mkosi` to generate a bootable image, you can set `Bootable=no` to explicitly disable this logic. ## Using mkosi for development The main requirements to use `mkosi` for development is that we can build our source code against the image we're building and install it into the image we're building. `mkosi` supports this via build scripts. If a script named `mkosi.build` (or `mkosi.build.chroot`) is found, we'll execute it as part of the build. Any files put by the build script into `$DESTDIR` will be installed into the image. Required build dependencies can be installed using the `BuildPackages=` setting. These packages are installed into an overlay which is put on top of the image when running the build script so the build packages are available when running the build script but don't end up in the final image. An example `mkosi.build.chroot` script for a project using `meson` could look as follows: ```sh #!/bin/sh meson setup "$BUILDDIR" "$SRCDIR" ninja -C "$BUILDDIR" if ((WITH_TESTS)); then meson test -C "$BUILDDIR" fi meson install -C "$BUILDDIR" ``` Now, every time the image is built, the build script will be executed and the results will be installed into the image. The `$BUILDDIR` environment variable points to a directory that can be used as the build directory for build artifacts to allow for incremental builds if the build system supports it. Of course, downloading all packages from scratch every time and re-installing them again every time the image is built is rather slow, so `mkosi` supports two modes of caching to speed things up. The first caching mode caches all downloaded packages so they don't have to be downloaded again on subsequent builds. Enabling this is as simple as running `mkdir mkosi.cache`. The second mode of caching caches the image after all packages have been installed but before running the build script. On subsequent builds, `mkosi` will copy the cache instead of reinstalling all packages from scratch. This mode can be enabled using the `Incremental=` setting. While there is some rudimentary cache invalidation, the cache can also forcibly be rebuilt by specifying `-ff` on the command line instead of `-f`. Note that when running on a btrfs filesystem, `mkosi` will automatically use subvolumes for the cached images which can be snapshotted on subsequent builds for even faster rebuilds. We'll also use reflinks to do copy-on-write copies where possible. With this setup, by running `mkosi -f qemu` in the systemd repository, it takes about 40 seconds to go from a source code change to a root shell in a virtual machine running the latest systemd with your change applied. This makes it very easy to test changes to systemd in a safe environment without risk of breaking your host system. Of course, while 40 seconds is not a very long time, it's still more than we'd like, especially if all we're doing is modifying the kernel command line. That's why we have the `KernelCommandLineExtra=` option to configure kernel command line options that are passed to the container or virtual machine at runtime instead of being embedded into the image. These extra kernel command line options are picked up when the image is booted with qemu's direct kernel boot (using `-append`), but also when booting a disk image in UEFI mode (using SMBIOS). The same applies to systemd credentials (using the `Credentials=` setting). These settings allow configuring the image without having to rebuild it, which means that you only have to run `mkosi qemu` or `mkosi boot` again afterwards to apply the new settings. ## Building images without root privileges and loop devices By using `newuidmap`/`newgidmap` and `systemd-repart`, `mkosi` is able to build images without needing root privileges. As long as proper subuid and subgid mappings are set up for your user in `/etc/subuid` and `/etc/subgid`, you can run `mkosi` as your regular user without having to switch to `root`. Note that as of the writing of this blog post this only applies to the `build` and `qemu` verbs. Booting the image in a `systemd-nspawn` container with `mkosi boot` still needs root privileges. We're hoping to fix this in an future systemd release. Regardless of whether you're running `mkosi` with root or without root, almost every tool we execute is invoked in a sandbox to isolate as much of the build process from the host as possible. For example, `/etc` and `/var` from the host are not available in this sandbox, to avoid host configuration inadvertently affecting the build. Because `systemd-repart` can build disk images without loop devices, `mkosi` can run from almost any environment, including containers. All that's needed is a UID range with 65536 UIDs available, either via running as the root user or via `/etc/subuid` and `newuidmap`. In a future systemd release, we're hoping to provide an alternative to `newuidmap` and `/etc/subuid` to allow running `mkosi` from all containers, even those with only a single UID available. ## Supporting older distributions mkosi depends on very recent versions of various systemd tools (v254 or newer). To support older distributions, we implemented so called tools trees. In short, `mkosi` can first build a tools image for you that contains all required tools to build the actual image. This can be enabled by adding `ToolsTree=default` to your mkosi configuration. Building a tools image does not require a recent version of systemd. In the systemd mkosi configuration, we automatically use a tools tree if we detect your distribution does not have the minimum required systemd version installed. ## Configuring variants of the same image using profiles Profiles can be defined in the `mkosi.profiles/` directory. The profile to use can be selected using the `Profile=` setting (or `--profile=`) on the command line. A profile allows you to bundle various settings behind a single recognizable name. Profiles can also be matched on if you want to apply some settings only to a few profiles. For example, you could have a `bootable` profile that sets `Bootable=yes`, adds the `linux` and `systemd-boot` packages and configures `Format=disk` to end up with a bootable disk image when passing `--profile bootable` on the kernel command line. ## Building system extension images [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with `mkosi`, we need a base image on top of which we can build our extension. To keep things manageable, we'll make use of `mkosi`'s support for building multiple images so that we can build our base image and system extension in one go. We start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```conf [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```conf [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` point to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key. We can generate one by running `mkosi genkey` which will generate files that are automatically picked up when building the image. Finally, you can build the base image and the extensions by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. ## Various other interesting features - To sign any generated UKIs for secure boot, put your secure boot key and certificate in `mkosi.key` and `mkosi.crt` and enable the `SecureBoot=` setting. You can also run `mkosi genkey` to have `mkosi` generate a key and certificate itself. - The `Ephemeral=` setting can be enabled to boot the image in an ephemeral copy that is thrown away when the container or virtual machine exits. - `ShimBootloader=` and `BiosBootloader=` settings are available to configure shim and grub installation if needed. - `mkosi` can boot directory trees in a virtual using `virtiofsd`. This is very useful for quickly rebuilding an image and booting it as the image does not have to be packed up as a disk image. - ... There's many more features that we won't go over in detail here in this blog post. Learn more about those by reading the [documentation](https://github.com/systemd/mkosi/blob/main/mkosi/resources/man/mkosi.1.md). ## Conclusion I'll finish with a bunch of links to more information about `mkosi` and related tooling: - [Github repository](https://github.com/systemd/mkosi) - [Building RHEL and RHEL UBI images with mkosi](https://fedoramagazine.org/create-images-directly-from-rhel-and-rhel-ubi-package-using-mkosi/) - [My presentation on systemd-repart at ASG 2023](https://media.ccc.de/v/all-systems-go-2023-191-systemd-repart-building-discoverable-disk-images) - [mkosi's Matrix channel](https://matrix.to/#/#mkosi:matrix.org). - [systemd's mkosi configuration](https://raw.githubusercontent.com/systemd/systemd/main/mkosi.conf) - [mkosi's mkosi configuration](https://github.com/systemd/systemd/tree/main/mkosi.conf.d) mkosi-25.3/docs/000077500000000000000000000000001474711424400135255ustar00rootroot00000000000000mkosi-25.3/docs/CNAME000066400000000000000000000000201474711424400142630ustar00rootroot00000000000000mkosi.systemd.iomkosi-25.3/docs/_data/000077500000000000000000000000001474711424400145755ustar00rootroot00000000000000mkosi-25.3/docs/_data/documentation_page.json000066400000000000000000000002761474711424400213420ustar00rootroot00000000000000[ { "category": "Documentation", "title": "A longer description and available features and options", "url": "https://github.com/systemd/mkosi/blob/main/mkosi/resources/man/mkosi.1.md" } ] mkosi-25.3/docs/_data/project_pages.json000066400000000000000000000007701474711424400203210ustar00rootroot00000000000000[ { "category": "Project", "title": "Brand", "url": "https://brand.systemd.io/" }, { "category": "Project", "title": "Releases", "url": "https://github.com/systemd/mkosi/releases" }, { "category": "Project", "title": "GitHub Project Page", "url": "https://github.com/systemd/mkosi" }, { "category": "Project", "title": "Issues", "url": "https://github.com/systemd/mkosi/issues" }, { "category": "Project", "title": "Pull Requests", "url": "https://github.com/systemd/mkosi/pulls" } ] mkosi-25.3/docs/_includes/000077500000000000000000000000001474711424400154725ustar00rootroot00000000000000mkosi-25.3/docs/_includes/footer.html000066400000000000000000000003211474711424400176520ustar00rootroot00000000000000 mkosi-25.3/docs/_includes/head.html000066400000000000000000000011441474711424400172610ustar00rootroot00000000000000 {% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %} mkosi-25.3/docs/_includes/header.html000066400000000000000000000004741474711424400176150ustar00rootroot00000000000000 mkosi-25.3/docs/_layouts/000077500000000000000000000000001474711424400153645ustar00rootroot00000000000000mkosi-25.3/docs/_layouts/default.html000066400000000000000000000004011474711424400176710ustar00rootroot00000000000000 {% include head.html %} {% include header.html %}
{{ content }}
{% include footer.html %} mkosi-25.3/docs/_layouts/forward.html000066400000000000000000000016041474711424400177170ustar00rootroot00000000000000 Redirecting to {{ page.target }} {% include header.html %}

This document has moved.
Redirecting to {{ page.target }}.

mkosi-25.3/docs/assets/000077500000000000000000000000001474711424400150275ustar00rootroot00000000000000mkosi-25.3/docs/assets/systemd-logo.svg000066400000000000000000000060601474711424400202000ustar00rootroot00000000000000 mkosi-25.3/docs/building-rpms-from-source.md000066400000000000000000000154341474711424400210710ustar00rootroot00000000000000--- title: Building RPMs from source with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building RPMs from source with mkosi If you want to build an RPM from source and install it within a mkosi image, you can do that with mkosi itself without using `mock`. The steps required are as follows: 1. Install `BuildRequires` dependencies in the build overlay 1. Install dynamic `BuildRequires` dependencies in the build overlay 1. Build the RPM with `rpmbuild` 1. Install the built rpms in the image In the following examples, we'll use mkosi itself and its Fedora RPM spec as an example. To keep things snappy, we execute the first 3 steps in a prepare script so that they're cached on subsequent runs of mkosi if the `Incremental=` setting is enabled. First, we need access to the upstream sources and the RPM spec and related files. These can be mounted into the current working directory when running mkosi scripts by using the `BuildSources=` setting. For example, in `mkosi.local.conf`, we could have the following settings: ```conf [Build] BuildSources=../mkosi:mkosi ../fedora/mkosi:mkosi/rpm BuildSourcesEphemeral=yes ``` Which instructs mkosi to mount the local version of the mkosi upstream repository at `../mkosi` to `mkosi` in the current working directory when running mkosi. The Fedora RPM spec is mounted at `mkosi/rpm`. We enable the `BuildSourcesEphemeral=` option as `rpmbuild` will write quite a few files to the source directory as part of building the rpm which we don't want to remain there after the build finishes. We use `rpmspec` and `rpmbuild`, but these do not really support running from outside of the image that the RPM is being built in, so we have to make sure they're available inside the image by adding the following to `mkosi.conf`: ```conf [Content] Packages=rpm-build # If you don't want rpm-build in the final image. RemovePackages=rpm-build ``` The prepare script `mkosi.prepare` then looks as follows: ```shell #!/bin/sh set -e if [ "$1" = "final" ]; then exit 0 fi mkosi-chroot \ env --chdir=mkosi \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ env --chdir=mkosi \ rpmbuild \ -bd \ --noprep \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done ``` To install non-dynamic dependencies, we use `rpmspec`. What's important is to set `_sourcedir` to the directory containing the RPM sources for the RPM spec that we want to build. We run `rpmspec` inside the image to make sure all the RPM macros have their expected values and then run `mkosi-install` outside the image to install the required dependencies. `mkosi-install` will invoke the package manager that's being used to build the image to install the given packages. We always set `_topdir` to `/var/tmp` to avoid polluting the image with `rpmbuild` artifacts. After installing non-dynamic `BuildRequires` dependencies, we have to install the dynamic `BuildRequires` dependencies by running `rpmbuild -bd` until it succeeds or fails with an exit code that's not `11`. After each run of `rpmbuild -bd` that exits with exit code `11`, there will be an SRPM in the `SRPMS` subdirectory of the rpm working directory (`_topdir`) of which the `BuildRequires` dependencies have to be installed. We retrieve the list of `BuildRequires` dependencies with `rpm` this time (because we're operating on a package instead of a spec), remove all `rpmlib` style dependencies which can't be installed and store them in a temporary file after filtering duplicates. Because the `BuildRequires` dependencies from the SRPM will also contain the non-dynamic `BuildRequires` dependencies, we have to filter those out as well. Now we have an image and build overlay with all the necessary dependencies installed to be able to build the RPM. Next is the build script. We suffix the build script with `.chroot` so that mkosi runs it entirely inside the image. In the build script, we invoke `rpmbuild -bb --build-in-place` to have `rpmbuild` build the RPM in place from the upstream sources. Because `--build-in-place` configures `_builddir` to the current working directory, we change directory to the upstream sources before invoking `rpmbuild`. Again, `_sourcedir` has to point to the RPM spec sources. We also have to override `_rpmdir` to point to the mkosi output directory (stored in `$OUTPUTDIR`). The build script `mkosi.build.chroot` then looks as follows: ```shell #!/bin/sh set -e env --chdir=mkosi \ rpmbuild \ -bb \ --noprep \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/mkosi/rpm" \ --define "_rpmdir $OUTPUTDIR" \ ${BUILDDIR:+--define} \ ${BUILDDIR:+"_vpath_builddir $BUILDDIR"} \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ --define "_binary_payload w.ufdio" \ --define "debug_package %{nil}" \ --define "__brp_strip %{nil}" \ --define "__brp_compress %{nil}" \ --define "__brp_mangle_shebangs %{nil}" \ --define "__brp_strip_comment_note %{nil}" \ --define "__brp_strip_static_archive %{nil}" \ rpm/mkosi.spec ``` The `_vpath_builddir` directory will be used to store out-of-tree build artifacts for build systems that support out-of-tree builds (CMake, Meson) so we set it to mkosi's out-of-tree build directory in `$BUILDDIR` if one is provided. This will make subsequent RPM builds much faster as CMake or Meson will be able to do an incremental build. Setting `_binary_payload` to `w.ufdio` disables compression to speed up the build. We also disable debug package generation using `debug_package` and various rpm build root policy scripts to speed up the build. Note that the build root policy macros we use here are CentOS/Fedora specific. After the build script finishes, the produced rpms will be located in `$OUTPUTDIR`. We can now install them from the `mkosi.postinst` post-installation script: ```shell #!/bin/sh set -e rpm --install "$OUTPUTDIR"/*mkosi*.rpm ``` mkosi-25.3/docs/distribution-policy.md000066400000000000000000000055401474711424400200670ustar00rootroot00000000000000--- title: Adding new distributions category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Adding new distributions Merging support for a new distribution in mkosi depends on a few factors. Not all of these are required but depending on how many of these requirements are satisfied, the chances of us merging support for your distribution will improve: 1. Is the distribution somewhat popular? mkosi's goal is not to support every distribution under the sun, the distribution should have a substantial amount of users. 2. Does the distribution differentiate itself somehow from the distributions that are already supported? We're generally not interested in supporting distributions that only consist of minimal configuration changes to another distribution. 3. Is there a long-term maintainer for the distribution in mkosi? When proposing support for a new distribution, we expect you to be the maintainer for the distribution and to respond when pinged for support on distribution specific issues. 4. Does the distribution use a custom package manager or one of the already supported ones (apt, dnf, pacman, zypper)? Supporting new package managers in mkosi is generally a lot of work. We can support new ones if needed for a new distribution, but we will insist on the package manager having a somewhat sane design, with official support for building in a chroot and running unprivileged in a user namespace being the bare minimum features we expect from any new package manager. We will only consider new distributions that satisfy all or most of these requirements. However, you can still use mkosi with the distribution by setting the `Distribution` setting to `custom` and implementing either providing the rootfs via a skeleton tree or base tree, or by providing the rootfs via a prepare script. # Implementing new distributions To actually implement a new distribution, the following checklist can be used: - Add the distribution to the `Distribution` enum - Add the implementation of the distribution in `mkosi/distributions`. If the distribution is a variant of an existing distribution, inherit from the existing distribution's installer class and only override the necessary methods. - Update any relevant methods on the `Distribution` enum to take the new distribution into account. - Update the documentation in `mkosi/resources/man/mkosi.1.md` - Update the default initrd, tools and default image configurations in `mkosi/resources/mkosi-initrd`, `mkosi/resources/mkosi-tools` and `mkosi.conf.d` respectively. If the distribution is a variant of another existing distribution, update the `[Match]` blocks for the existing distribution to also match against the new distribution. To test whether all necessary changes were made, you can run `mkosi -d --tools-tree -t disk -f vm`. mkosi-25.3/docs/favicon.png000066400000000000000000000006121474711424400156570ustar00rootroot00000000000000PNG  IHDRasBIT|d pHYs+tEXtSoftwarewww.inkscape.org<IDAT8œJP{ӤRbM@r!4W| ;Q|pq*NN5v"h]8ŧs#0!ՒI-i[aYq\QˀK +-` jT ua|4F8I{q1_[60ΎGz?H60YlW0٭PaB>)=4])+(虭+v(̂H0EAuBQGw{ hAJ9IENDB`mkosi-25.3/docs/fonts/000077500000000000000000000000001474711424400146565ustar00rootroot00000000000000mkosi-25.3/docs/fonts/heebo-bold.woff000066400000000000000000001235341474711424400175510ustar00rootroot00000000000000wOFF\8DSIGTGDEF>@GPOS w675GSUBH OS/2R`ыcmap|5"ηcvt bgfpgm k ^lgaspglyf|vƠhead66 2hhea!$ ?hmtxd>\{loca$_xmaxp jnameBE;ZOpostq prep d-f.n_<!gb2`bxc`d`H_ @dxW JVh ^xc`fejra&f:Ðtg`fF/;܏AA?GҟE@$8Ez @J 3 xڍ}LUe}9C ) + xJD^R 9,^Ǜ%3b -ȶfZmҭ5VQ{sōͻ}=y`y0L}:f6xt#ׂ*=U쩐ϥz5v2{:0.֋J.Vߑa\g|4Ck):Recl/I>K1/ [$y1s,[S? FN@Tzѐs)B﷙3ۉ>]7Z(qk\]q:*Os`{DbXF uC4e`"pc2B8_'H/X%sK1쯥5-j|{# PV ?dҸxi`G11by]Jvc|5R, :0CE4ضq /Ҍ! g,{@p.C.nxu-S%ㆱB (4rކ @Ąq 毌Odj430lFb>lN&$XXS?kϺuӬ3F%k:y=T8咏2jiUӣjJ#oZN=Ja8 d7M~Gm㈽ٜ7w2%JdJm6P!)ujmJ>> K>f_h$ H#Kݏo< :AZѮAy;QÇVUVXrýg?vPf=Ws9oA3(֖V =̗=ƹY}{YNDp€sDqOSˆYqtVexWY贒P k̠ `CG`q7:~$k^*rMى4ZVQ/% sWZ$;!x 9JI"ȍJC1/k|!ox͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` 0Z1Z1|a²_GҟE,>3|f 7@7A|K@a%vAg4o (E2DxH<R P|>j@t  > l > j 4 v L & J z F|.`0rjHn@Z(xP8tBhbDnXd, r !4!|!""`"""#*#Z##$|$$%f%&&&'Z'(())*8**+*+r++,<,t,--h-.<.z../B//00<0001$1b112242V2223@3|334(44545v566`667,7h7788@8v889989: :;*;J;n;;;<$<<<<==D===>>@>h>>?$??@ @|@@AA:AABBRBBBC(CdCCDDDDtDDEEREzEEF$FFFG"GHGlGGHHHHHII\IIIJHJlJJKK^KKKKLRRSSSSSSSST T T8T8TJThTTTTU*UXUvUUUV V2VbVVVWW0W^WWXXhXXYNYnYZ2ZbZZZ[.[N[[\(\\]F]]^^&^B^b^^__:_d_____` `B`f````aa,aJahaaab6bbbbcBcd dtde.e|eeeff,fPfggo^oop.pqBqr rJrrssRs|stt*tHtrtuq{\QDQ1Mzdd4M91a 2'Z͓1/p|%X,h** kCTp0pa!0N\d>EQ(UlMVDa"S Z\1:lHi>C-j>͚`f%?{ba =v6B7#XRR*o>//%f=ޭkΝ\ڵmS:uܔgvˬ̌T[ [dG=mp9pA aBiSU1ج5Fj4ZsXKU[^֙4f-6:s*-Jss\a`~CÇ ^>|.!S%x;J>q>pbfђ(0؃~H-I:}p!nhP!!!BE*R],pqBc{ u PE;ue0^@yL>koGNzietJ&Cm]ظ*@"+ hBCB5`@DVYS}:aQIsRT(Ԝȉ5@C6<j A !9.8]]$T^Ĕ*u1bc)!$KbHLHQ),=%^!ʼnW>G❨[T)UAXj8:L\rj iDrwXgnTH!UHM9OIvح qQ&6X @u``> X(Blc` RB5(TaPo!Qou[j XvK YOb+m{C=d/~.Gk&r?porDsV/@Hx C`&ä rB6<20TTa4f3Dhy:rs:xN,3qs8W5;o7ɭܛX{=-Y}_#+aX_3g+qsb5^p^Dep}K*:)ll!dNJZF!"7 76C/7mźh](9^cW,Sx׋>(ywxj^ï=jE 7pB0Ľ k`.ӑ9B  UXxNW!B(\aE=Kizaަ] 2PE ѩA4l@J(TF\(VG~RR34 TwNW,pyʌ\aޢ0$ sRۧ-¨8TcT@ߢdBX*`*G0S1BȨׅ5Aʘ` V*CN ce"+pk+qZ\zɵa>D^M! !ckQw刘S`e e".&T4_M0>|MqڲŃ7ċ[7FV@)vTV(o1d` "/T2[hB`JU[7+ s \k9[%od 86q m \@ V**>, 0]8R4,-YGQ /tb JQj "@L&PA0hc!i`uJSt(6KX\X+X9Pw|5̽p%Qie] G$Nih_fL'?ǵsJ¬/܇ak X,f.8^sjCNL@5-:EI( LD~™ Vt[N/?c="hl;n_/Ͼ˗bZAo\9Jl D}Z-yzr:#?9s[a?j`(P(Dft+Y 73_v|[=xR.0).x$1[T` :*D>$;~g\Ws P+8q5 (TAja@+ aI!UJ0EATH !)E]H=pko-[qv(g:-l3b:7eGxU Xii-z*k$%嚩xj賵mX.6?}׃_ cP! J ;ȀJ+1[q1&>,kF,Nt1L,>-ĞD\ 2i\ѹ|q䕝oپsUO.ww}ip}ʏ?^( )@6#i2$J: ffWRE^W++FT*j![XZP*9`4} ]|Rŋt:D* Ԭ,9MVA^9ӜBA3<޵ J;>ZKl#V*GEpdD!X >KJŀ$`20Pp+A:ٮK%1_,5Jd’sII9=KO/ڎc{rU5TL1[scldpa!?1R׍G`!&M}f_u>q@;9+>>6a{X?\MN@mclI %"@הfֆ4 czYCZAJJ;,4TB a* KSy=QpOޝ`-Hnp#!vs]U% ĸ`Ifo`,/ZiWF-nϒ<0xv6hA׽wsy{nÃ8:ö$z oEhT!ŕBݓG :6H954:/ n-[z !,vXXS]]ӥ'P`C%5'HQ%V#,* 2\EKxXt$T snTWx#rg|4`bv}$eoM'_y=uY6d(ť?)fAwW&s@$4)Fj^{l%~P!&:d5RE1"s9&hZ5Úa ƷVxY%0O4/II}w^YKX*9jDQ j{0fZ,Ϗ&p_Kt p]_p܃/K+X\ R^{ѐt:Y`~ "i<J!Pp[D].-c&C+#4c &6 pʅs1xa:wUJw!z#Нl|P&D 9׎ axLm{ \)34ˬ L#8bXN#;n9>rvg6SidxT)0/4WOwSlY)xExMVsGӧunfa Ơ3F0 s*+Utt%ڜa4_b0`2rs$͒`-HRZpĚLN8W?re'ĸm}~KL:s-H0>S>$eQ7O 4>@`!H/ڽ8t@9Ǻêw)m0ş1[mrtly(6mg\{SU͊p:q|?;J-,̏rQwkGrb;/=!> COmwCS_''tycf]7j$9Ns}#קw=&>p.da\VD.>O^;8Nz(YN_}ߘG>5}Lo\֊Gq9^G)x}ުwgZ" Ղo\p4bEES/O\ D_?ȑSp7R}N"ϾrjҖ~pKN<ɈwzwWer<,p)r~~48PiSкa=.j' DDj'R vzĥ?~nBJ.Bɻ4 afhiMIܲ$$>,b穃9FԨ@5FIG&!w}#"љ@EnNvˌx;ݥ,QPpY x+K4]xTw/7CiԚPTbo4νf}{JK{t.mT}#z Бk"juPY rRA .Cv?6߽ڀXgn]\j,afn;S^"2tt&#݆uX* kvϮIərJ`$?_//{*??gQUYiB3Vsq39ǭ&3+ս(RQ<H!K_F5( R[bvt<&,ɡtDMJ>WOX_X[Y4_뗏Ϟbo(>+2povۨla9ZZC?]g?y֌noZ.?=S_ukЖ2JG72J㜮,%0\pOʱxQ2  3f]>M?jzN!r Ë񋕳۴7`ڟ7%&{=cUH{ ?v6tCvuuRR.ƨou͍GذBTs R1h4ErݒbMԨ쩾w5)U* d+遇jbiٶe\K%!Vh) ͿGEFF4z )8Y) |8X})*d((D%-GVT5D f*Vh5`Eh%IV͠'wyt^Ͼ){t~n7?*1G-Ndd?%5dgNb8}l$El+-lkC)pMt$>y)T%) ${wAeLH w65+ hi^oZ,i/]5KL]P} -T*{' n!緟;9o[یZ3R|zS+|2"@wPKWUҰli hmW_ii:f{Qݷ;yN8xn.x4XϫQEg;uf (4W *[e4\(5H{#jl 6BwfJKK{nřCtBg|"mOMUr35U58Amd5U5h\Saq|(, NE[S4L2V\T H:AީJK<|ʡ9]UWW[˷_S?J sl#): `KiVθ(8[4fAZ{p#G֌vhYieg/^#L9r^1Y]V{NWJs6|<V#^9+gflΤӥb!$4lL>eJR,l8WF3Ur EG=LBvVo3=,SfSM̜W#䢭(LG1m+69v=;ԥ0M2ܝ=եw3ӭ v`bFW%)@wi1惰*=0/"Mо~lpJ\!RtZ}cß8L.fH ÷rW(nP{/X /)^r]! ^_h`?L[qTQul/ jz3z jHU,`[ qQ&lz;wA}CO`=uf:%3zÜ~kJ8c̔c_o0~]1>*'W/4FY$RKi0 jM`ڻHetSB! LnZ`ߓ`4 =Z1=0;՗EP&A(4B03?@Ā|L |_/9 vLj=Ffb1`oqc'O"j˥?n5>vû'{q(xoOV__lߚ#Q3oN \i{`65Qk~ȆQj`,?%٣7 v{A;f;rDtMi̊swQkԥ`I}kήC6_X׶b)+KKْ^^إk/ƀ-}\m\o 6rHБ84%m84'gCEA(cRIn/r6Jm9IJFybnyUMVvM?0~ďwvo?eKuruh xPlV2wTP;] 0\2g2[^QϘ'W3 8e~sg~ ,9C_ |Fr|Gk7 -;9z4 uf/*Q%JuS:eSV⣣Xg\skp04ALa/ {Kڣ2A8Ā>&+A*] $Tc%({jr…[DŪ*ErY5U(WYܡ,IjB]J3Zhy XYK׼3\[ԩ_ɄEMz{q?~`!s#Kvس}Cds27Mt8BLƱbT%+We8&H_k:/ǒf Bh OIYY$E[1眞׺G76ޢ}j^R򻶌ܩOo|`vN[S~rh|O֌c;/Nݱ9Mڧ0Υ)XfB!{e7m*33GV V@7ҩNY] ɻgjzv= m!jF9 f^i•P)f/A}Si@*Pq} oHRL )'sa|*"6*I)Co8_Uun3c6ݷ_7nʢV T }WkqsݺҾO^wm:nyo/ܹ^,|ŏx﨑Vl~ǢmV\z_qw{W*}ghۯEFy:?K% \z~Ҥ3gxly;f[67Wn}ϕ\e\ R kv+ԅ?-%NWnL|}Qe)o]_Ͳ>^&xƥUO;ʸ5h[ͩ:Sm]]gO+3rgnLm7n mO8xk1-ɱƑ[7LЖO;+yrqȚ9zkzh}+=CˆKcg8MmnoXtmg=G|`J@^EHRSVJD4N|軷SZ*׊Eh ly٪Ζ5qg%Z>\:l s50?]wq~FALH(a~s@w+8LI3r{XH(O+kQ H j,'*;84sUqdFdzJp|0h9;1IȩIRRTIIE*[VewK Q+G0X%-.EAAyˁTAJ lePUsWӳҳ2c@z0&ʎ&ti%dEڬb{ ;OW~L5hT%\ X%5H~w+tOC:&ײ0ŮDExfgA{F&(lO)rr&L+ | 8F-6KBdBCT t2"4%gJ1\٦1ǏRޅ8RxmlA$X" !bYox )e㊥1\2aD[ T݄bPiVZjԨIVҳ^}_9RK\Ș"*+$ 8.6IȋD^ \}pDXװW$02`HYC%7*owvmXº!&K1z`'JZߊ>PkKY7{j[L1pDV>Z4ΒJ)?nө])m$۶ga Wj4}f:^RgRl/-'~^& SNɨ8n"Djz k&^0jρ6u)*E^Yhe2;Ұ ʯ$߯p?믯z|=OST<3WrK0C'=u_ 5f b{B>Sȓe_e{s.#u? hk؊9yºSwգ:ݏKZxʯ_vunӻzz商xo^(j7_pc! %ݕº1 IJn| A+U:]͞G5]ce+_ř\6?\]-(pl;-"TuRe1.;k)֘Mn]Wh"{csxX+RcL TS%,A#)(j^[dv O=O~Oi"AӐj5lȫ4P!ޚsS+ܨj k)ȣYo JF_kM. F|utd=]Ђƹ4qX-L;?ū='};)*qi:Y\.;/Yz?WuQS,[1-kYbSviĎXW,U^^}^,.J_di.?.ť} z3%xI4~E{{CKWcoĥ s_9JΥ!cV/QOO%.Csut~~~.= nT.SywuOZ&4)~kX?v?g~!o8.5 ǴldvA1/i([t څz w8svm$OT9۷נam EwGvJ<%@vK0y`˽B?^ :1%`>h^ty5z^N"yT547Hn$yߢ7DlU<} S\N;_b&}wN+NBo_iaa9zonu`J~J}QjF"mMki tz;pY3/o9:ưCϜ֍;>kSdO{QF.)?ء{{{yޛCӇã)Q{E*Oycavw3.I2wfB踩;5P &~%[mןWk͵u,5c֟ P xi;.DJ WogJ/&f6_>yxxH!!x-!ճ[zdT |d9FπAs5`MO͆%~6g/ #S+%yTٴw.v#Θ8}`oLjUngӒ:9,91|ˤ  ]ZR9՞muBk1dG=7# M{n}s3RSAtLls~6qM~#?v#[v?~:^)-|ќ^z[ʼH$x-5e3$M<n uĞ0rMٗشL&ys>8cTYpv<]Qqz%;|dIHg| zlׇG? 0,87>$@{-FZ tYSf3U5z{gרqgc[aqqONdp8VOeDVݐ"k^ޘʛ+,{+u ?AOS>6K㶘ר?,3^āO{MmQ~Dxv={֒߱e>ýncy^1p X|޸M#;B" d.yIQ< Nd#y4KȁI:+E(fH&_8/~ɡsJA Τ 1h,hg`pϬ#`QW)(Ьb ])~5}FYݸI#g_##έC׀xaٰ!YB`y4Ӭ')(}_-I*P̝s獛2پ3+뗓cfآWv3 ֦QuYgw1i՘E4pcfo;~۠A=3h=]ÆSk]U}N^gZ ެ͗kt\b{ka ~5?Q0ɬB%-e+*2 PK\a111[^XTy3'TOYOSNhxg5;#f Q99ǔʇ[v>6<؝ֽ-ϕd$ezK^qɉ BK7$gBM|_.H$goe7N 3O ֦%5|惷0O.nmrtR+.Y&Ԕjn(^/77UQ/>3f5oL6ad0r$0hTbѕ2vkd?e'Δlhap.xkEqY#Zn늕.M iH7KjMWOy_XXP8N8gN3HW$Lh-#_|A{4v=%ODl$KוYo  0z _Hd>9)Fn .] &vdPGm)<&,˜io'.7[} {|>^@.mXYcb/-hK%RV-NkJ?RҶor \GӾ7N7Fՙ5D 0R.{ (1ޏ`$|ORIC$~ҁ8A>fDVgUm3/z{?PΚBrkc("vWU><4D1* {ר22glHfz!S;=GZ1+1 ]% QוӅXD }N#$Q fp>Rs8'I'0b$r>Jκ'xg ?Ig?Xw?@I^NTtG|tC*W w4YȺݭcw;r2Wt&W 'k@$S{ֵV@eZ>J3U_8r}}@Wpb"gGZ%ob)]&_*(2^7UD` TPO #ksӉDNC .)k׾,geJm=<`F.a'OjvRw\-O!~tj;)|͇{EU1;ԭҚb|/3lLWy7O8|Lq|oJXτ,ÑgyS~(Ln34 J5ӅӶ$Bj[Y0B5Gs0T0^^ GnP$x $RPw==|}ΐ¡κ37hGxz>6R3DD}f;c Ϣz~|}8"I~hK1p];Dž;vMӻwܧ<}l!0d0! .󑼏 %8P9>* (94Z)9Gn4DhYW=JZ~]n![(>G"FPX""mI7&2%%9x C|qb[nqܶVk;P8|Jo)3H\6{]Jʅr)P5+'W< \&Sφ1d`}FX p*tyAB~ݍu41晖I-'46gu92XTcIKj&i\WlKf+dKXGovXJXW1j zxZIi?>ۀMV(T:,irTo#R$gXy"X\WWiF> $=BP i*#Ubo7{UHӗ͎kZ3gSH%WTǢ_B-ó¼.JJߎ4y'ǫsO[r|G&}]~㨂 RPBbǧde $ոq'3,v`"=W: , 8*hyshy T%8dt"ч)3`\u6"f qP8ڍb"U MvO> RxPL5iaa97V0Q&majp=90L/Y?tj9+UVPgeŤV,)k7=f t7O?&ȌdLΦVZ'9K@%]\DF S%j5%SaJi}/zRyxaV*zxS*TxbKTXQ^Q҆$&ƄDvU$$J@Ikm HXdpdsU5弚g#i?a#`Ѝe*Jv;v\5 V cYn[,a7/Fi8 DKO\! aĀEoL%,*'ДwILO,-閴$lj6ͨYxәI-zqMYs+xv~'7p\Χ|ǂgjGot35iAhc1&#aZI;ɅgZ%/7QJ=Fc(^]qZrN6XN+KJ"<fZD! ˗6 OHm4,͕D1hsJ Yn gzzڋXA픳ߓGW2M2@Qx ><0#Q;e偊S2`" e\ZIIX--8 GZkfy{0Tuuڅ0eF(yQh>W$D5j>?k<+zi t waNeqr:9UҘ*:RURU816)b K$Ib_n#0}ܦd-,l(m`ѻ/ρ<砹<܊Tu99Ӆ_w#EEQʟxZzAv6E)S.o 09V4HaL[D̉w]ǯ-ikkd]7ȭBf5Ǝ$ͷ5=t[<Ļ޻}hlo9~<mU'絸҇dPU>=0A)2Ч#sGhjRRLśG8TZMh'5\'>g- Y2P}W|>yQGe˰".bҥEBG}c%])3HAbKy1s,HaZ1b79U$t-gP,(IbAFT$P(6ҪwPsbObG#eC"}'Y3gm/~ReW~33n_Ϟ_>w|&)w /Kh?b^{nfYRz ji:˕ōpV/__y# :t~ccƏ=I..t fN?}߲WƔ\= V=vXI2fUua4?8i]tL>L[*7ZpvG8өu=I.q%?uI sܚí_柯VoYOZdM Μ DISx#$/nEdǤF3F<7s|·vf֝w炙iq*GL֭X'yhqW}uɐћfϨIa맜P_,<Ѩ|}ҼFG#$­fG i(ҦL*=N eKA܈24nXsDn/.|/k6b3~|PN3z7lVi @gK5t7ևí3W wN\ g: AI:8`Arrll39;#v)IM{[Aډ<#sp@ !?% ?ݴM(;ݕޝ5<1xH>w%_½xt֊'V:~|ʞ_|x\&kIK;ng0N`{ ]:vuauf80XSp8i6ڻqoEoPr+='veps\<?vHћ8eőf]#2NF C)x{c܃Xq?{+EsR3)&a]v_oAN |%cQ(KJ k{F\mKP^ɵSk#хfmlք8 !qָQiR^W`+P _PRRr $L] И)wt_bNvKҹu׮v;/+=;5x䡲혛ߡСٹ).\5`=:{wNj$w̕̕s_fBdB!G! `T4@E# ʊ "" x "**yL&!wwLw*KlxXtߐqI3WiR&2uo*?uľ1z+oyc7+sܧVlذqg}O_Ws[v0>D (CO2 ROwϦ{ }ij$ձǺY,l[M\*/sy+ǖ5WI cӢ[h慡 8敤O7@OC9Iqr5D {IW\;;ۍҨ@0!TAAt}BZSWO!J,8d! F,*{/,<-ʜbJJzbv#_J\]!|;exf*9StOįH^'9tr5jq-BXƱ52 SwZzu ^Wi## tơLӰeHMپbLkڦK6Z>(ޖcKsSgΜjZeSҠED9Sn2E#s:~_Ji'zNKPFх&Ueuja xD ?/3 q_Ҝҹ\5$2?)1f /ۮߪgY=Z{Pݫ$Z`Xyf(ːOgwW %&F0kfp0 >|o#"H|QnטB7ш;EGِJ|=ufUضVP[E'bxVs[έTp;f<KД`bobJڗ3mx?ӆ{%^IǸBfg\!Ba(ZERa5/K$Yw w2=q4Ba4Jfb{&#\߄;1d[W=|esW0oHnKuSm<\BxCEfzHvS=?YZᑆ"Ad| |9Bݼ,P-,]e!# 2,~#K ( Ni" 2gơ1ő1$x- 4yI,k2MvǶ~l˝$"ߖYیؽ_,-)y( {.>t56 l=$ɄE=Ik >o AOJF%Ce11{wH.2zD*Omt8:|p' K $,wl;uB6KyV<EYpay^ɝNbS?(0{Aa~F\N|g]5UطCE$9nŷù剝vV!xM7s8FA1R~vڵ?`{@>^߮zo۹BPlG}';33=J8j`5U1ٺykߤ+['Ŀ\RP*C_ c[VYo E<#! +b3kNhkbexrʜLO^,6sI?ߚ޾_ṅrmwNPQꚽM*'Ltn&N<ǂB%$\xh: Bdr]ᕝbƈ:"r^'4XI܍H)!%ĿYlZ_qiv@d! |r=f1˹ZƯgieZȐ{rL^x_v_l&oJ&2: Y[5ߥR))Ҡcc!D2G 09`_޽aC9yOb~;Op*zicjIPJ @jGD8˭DHۍd8T!%xVtE0&C KV+IDJEPorӎ\Y]]55Cekö&M#O1yKo44m@xp$CC(PC6BNZ?ל/@#*ħx^(JnS˗ <+Op;T%o̼̽|YV;$`ђT(jj돤/l`#*52APenr_qxŞsݝ'dz)ׯ~R.f{́RQ*z>#ʾ' 'd4ɛ`~˽cz [U!Nmk}y=D/YhR)T IMbXaPa~jCqyՍTDZRVA*r?\[ٔ B=Q( $Xz%mL7G(qs`3סCPr?_+-C!CV cO"~]OGMrK;ujҢjOs5 }EF2W3b b8==g)DI|LI_>~Kq%p {mWH =!yR ^mp ['N*~<2[VaNY޲--g<N쨰r8 <4߽wexI= D4{n@;wUҵiv;+|WqU+FR JzKVWrki ]K!}krϜEC@ɕ @]X4`:aJIk }_P[)U/w%ؙIqq|*,#.sۯjCnXr\JzҊgN_]}[ %Dz1B`rLE:Ql3=aز кX}q}ГVٲ7+?ƵoBXl\(§.Bpe4!@8@!?Q׈r6MUZu!kݮ]n]o{3ik{fg0+]Ù9_IAE@ ߛ٩ת;gط1^i/WCBA'u9g3{;}BzhG8%ZFߞaR,Ka B6!Icq"b>7OR 32\:u_:O5exFy x:@}7:> V5\|`fm;?|`yqp ָi~'O՟y~"3OڴS>֙ї9jDCNJVD&4.R Dx1NB";=˕:A}8WRq<R)}fʼ"vfg'q).B+ۻo{{`KOFӐ7>7 wUHa1I8NNRH}\RKjō>g#߰7o$w&pgO?^~WQ23t?>VT( V|: ;B@Y :[R4L44um nk$(qR/VV#<0G+׌,irDS .U<;dQnhJ^ֿ9!+?2|L1oc9渁80%r  q6b[a$BDeJ|}  TY?_fp@zb[sֺ̥!yUUyCk][RW_H1gF4> MSH"!;1#!;~c[s%7np7 L)*zUlkʍF9}*n5ԾsjInGLW{c#^tk===y'7IMp&&m"~HYl<@[mT%9]8pRm(Hnl8h/;[AL_חiǸ},.;4;ѫ "a3ChPC ۸6}c`L{wpgdh݃G x7{I+|DB H=!^H{c:%fޙ=HuɅ +yxQqO8 ;ɢoO럖?6; 's̵eee*\-亥#mՕ\Tj,pU,%,p楹d#~ͼ324d*1 O+] wczs[<єkO(GNp!5VJbn$FWWT1yH򚫵5o%'1чa J{h<:X7K-]pg곜cߒT'G2]:#9eYU~/ufl׭+dܥ\cv 2>lˁ}*F˗/Ri$~8 MI+>* !*&y#pAs|;v[h4RA8;*K]hp!WZ%;~fwL|5lC6eГW  y~JdS( lSKtaQ!;SMYTH+cr15"s?{ۊM>upBV07)lvɻ\MruP?|gΰrd. H~6ZRG~]Xk{6AU *`?|;sWnwl<)Z@+?Js2N;I[1 )]xu]TLB3wŅxT]. KTݞ{gXl5dZcY QoR'3?P3Yl}tб[_GEECoq*[EeYO̪]S2}+4..sUxcaLX_E%,f|Y侉*\Oyc-0sJ2.H`d=Y F_{b Q*+cYR4 <% |#02ÕbIk_!LdAf}!>R^wQwl#km~޸omu5c =~vnoê>Oy3 ۵ y%]/7ÁGi|W.>qY1i 적PitWu|sG0 bj0 i3A/!LphPjaY1Q`PQ: hqyy'͒-~\k` nxt}ls!0C/@fZ+ ȴ%AsFDP2Y) ) tMQf^UE sFUֵ2 #[hbiHW_5377YfOtv Wn׿]w!vC/}0վX4ܨTZgޛO}@ OMfGȦqށ*Ϧ1nˎϹ\GQTdϷtq|'5Xm]A%Ѩ K|/Zׅ qXČH ,bԴ]'T_*:1WHXͯJC 1)AwVEAfwS}5vU\qHCbt 7kW!V63/^v恉_|`ږSw{4cz%Py6auKrz79-]b՝,ٓ'h{zD>_kx|ـ4aCɈF~KKE&q Mgabԕםn8LOCգۦoDUVzy){_|35 CM}fӋ֎.ƏGF˂}A|ӇnHb" d4 #I$7>!HbJgQ`Ƹ.Q}K>^5.+64rpf_!װ].3mx/~{ wBQD|>J#7. 9ih<;Dj77$r0ZAː`ɣhH/1bƾFrf @jx"/._ޤG ^Rϓ JcNH{1ȯdP {  ΁'t'3N 3 (nrǷvf>̠ g̽PJ 56(33DEH HN2qbw>ܚ˜▊^8P$9m)O TpGЛwc@4+_྽qB82\DPK`{Q !2EPuPU;}BvLv)Ikݕ>nG%3dw?\Ѩ ҂G4@eB/h5OPSn b5d'H/෱w?# Wi 43QMfQfw31%c9¤ew"I +M# !^f@JXo&`nha.\UU\O H"P#p N>@-Dȓ!wX-ޱ36ƁT~0ut l"yGQXԝGNVL1cL;WYmx!TSu]Of)|v\Zڸ{|zF\|zz<{6fZkmnfLL11f0_ѓ̀ =)}bvtN Mv Mh(JB}1 L<1$Y>Iѥ8 TXHؾ^5lR%ii)Q8fTjj#LRi&q?&ٜVˎW=J>퇫\Õ-c!%YG$b4ѭ_A>HuP*V# <(2'_ 5^=/;|iѩ ^K_pVZZC~c1'%-fBv sjf*\ 1bB0,Tt8]@K/ D qzl3wlT]m!'3Hz -c OshN:lX}1B3^|Oy'~ȒZ*%2%8EDveb)͊<#70d@Yzh6?LǬKj>=Ё|HM鱏wxHD* P3oOb2)ٷщ9˲.^l Sh/VAv!ȹϙZJN tOB]3/ŗ2n=ADL *b -Jx:?ou?<_ lbZDwV V^ot=0 K",N \q~Ou#v|XHNr9{m1niCp"|^ur?0@<[Qy.詁" d5ræEI һr.uiZDpx5څC(J YD P|μG$nxObv-;YU/_YZTkV\N\8 #[W_q&iT.ѿy$*_u;|%95-j$ZMƾV97"Q"Brm sic*Z޼.Q77W#u^g`*`T#^"50|8Hie2ɘ_2K!zV!m6;xc(\d%SwjL=Aפ)1=-q r*=>$W}ɉ%HO.A x9DV#ߛe5Ꭽ]M+mћn.1/=L/R>bezlGVB\*ȵtbݚ֔8F {~28jc%v7 {E'.s+_:BF&C xڝTMo1|"EB=z6Z R?"mZ(Wg.ڮ]i{G/8q3?_^yf왔JDFK4UT% ݥUSԥ#4Gg}v:oDnoTy%-pL+|*͗7Ohri:-TA*}Joj\dq?ܨWI~"eWa^@PG*}n$lZcų y,RfyR.Xjũguͯz5K{k"&t;a![iO-a׃2#25t/V;x2&mN`XX v&j 1RjcLti}+ ry}{ŴFGټKUwZ=USjanSD2?U]7-8b2bNy=vr%u`M-Fm\7OmmM Wjzton|}q?g~+xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX U\1 ɥfU2bՄGnɃ4&dh5UdY-[t\$),s=Y;ww/JDbպmNRV ^"|(K,qN$gE4X"5\r-E% DI:PT(.dZ&cQ(9ktW=Uo2#1̏洍l=meRnYjz.ȼ4Z#gI"|cOK_,gH\H r?&k q_|;ԡ7PP0N IWʡ*TRsxю ^D;P> wJ%J3cև2320# Znr>3Ɯ;ۃ90g3-|̝k>Yl!Z~ԔN=+͊OA8ҡ0Ar *ǡr]Z.}ZF-* UĠWAu=t߆,wJs䔇*e9PjXx-*)BE2L*Icփ|,v_"֪ߍ, .2~ŕa ;# EUuVNqeIj3|㥝/w5WI.,B^GH?C!\tc.1ݘ++1Y[#۟(zʷe!q?B v# xooA0KT TlA0T&AT]XT *R*Pz*aGPcx3,j쟵PVݪk ;q>+#t 5".DBtx:?s-D Bw"tg~S+UK2 fbIEdBȆZ>~1Ez '}#vUXX1d>q+|NĎLgUC7"{ pQ#//1Gqe6Ĺio.r-fo_K&*MC*g?| C6P ' \Spjd߿WDGoPq}<y./_]w~ NɌ3fgFgA2؀8}#2+EӰ[BocjoE÷&\c]=w Do(&bx ;`X\m=+a~$n=3f%_JH#QшpGxnǝ6|ʟq3y3d9tg$tkOV5 ɺtC7Yߦ閺-P;wݞ@}@39=hY@"WW5z Ct!1}R1&gbM|3M-S4jdн&t2݁z&դeQfdtf>-3f|nvo"sʜudPƅhe[ 7lcd[@xzl{>vllGk'jڙvB]IcniθWU\3ҵY_EZY#<-ut]˭ŕz\Xt}C~C/fz\Tv]p@Ox7JgNDF!Ye'߄:=1#_s_lפg%OQ'ȗN'TNU?|^#u瓟O~6L]TbΚuMx 9B(*%C4q*,g0 Ϡ-B~k9rرd<0q_Y̡{+2tGY R0Gg1'D_-M$_8fUP/}Xc}5ߗOp?Wj f)3Fx!TX^2I*˜ER?Q_諨L[T^@Lz$_KTXx?VǬ\*R!h@ϗp;+ƈ9_D.E2_mU+}[BVQ:0 kw2 /}ÚV/EWSq2/%sK}Qat,: )G񥬺nV^;QT3iR6NI:_Oz^Vo[zw)Vn>ϚȔř*S`n%sKLcdZ&t6=LR6 6C=\3j)jf9f!{fY`/k_s9hwwSlm5kk:66fmkَ+^omڴ_ mv"mFe畲Ev]i؍vaw}''ιXjzh \#5wܽ.ur]/ߥtYFn{MrnM'h9Uk]>˵}q8-FC^V$\N/Nۃj3-lџNVu0K¬hzֲx3&7{]|<#. 2<@{WK랶p>?WQ2'ߌ9 *9x0 F( JU% v+o IW|{ <ˮ>OiI *Z\x%ŏmWw;scYW,쐭Sh׀+jAF,ԢG1J>m\=rڇ_N&0}{T9❬[x񙠉KGINz?Ĝ6_Eѧ7{WY;9.L}[M GuY(\뤮$HC\k7$ͥ')oݤ2oYxK)c$O|Y e/*M U*SeTR#Q "^ր]IyAm(#V]/M@Ȯ,-#ˎ2(MF<9? G2_žmkosi-25.3/docs/fonts/heebo-regular.woff000066400000000000000000001232601474711424400202660ustar00rootroot00000000000000wOFF8lDSIGGDEF\>@GPOS 5GSUB OS/2Q`wwcmap5"ηcvt ^dfpgm k ^lgaspTglyf{B&head56 M¦hhea!$ Ghmtxd>'3loca$ dmaxp rnameKi%e,.Bف&:A:Ko|ah]ۯc/Tlmi( pG0\4#YtwLJph!=HK#V@w$H82cozb9 Ry9R ?ɓlCZ h0}0p$=;"C{'Q.o-_Q$I]+9Xe^nu N$?lFDO8̻P6#G]Xb %Eד[s[4~Z3Q8c oq\}Ӂ\y.O???o&UK=T@.Jj cZO[(sY9mL7= ʥEUT3d|"E'$<8_x{#ʄey͢F"Uտ&"Aw<@ sya[jQOcqx\E*e?Zz?2NQnA6wN.ݎ?px͓[PUU9c(f(t2Rr K/e7+SQAQ,-9Y4%!]&ǦQzM?[t饇Z{־AN'ؕh1 jLDWKFc#w}p?ӹ~#||_:[@A,E`z0;(O&-3bqKw.0rO,2|s„)aZ:QM~:#ga~¶.v:o` [SCS!88ոTCU oB2?1"|c}%Sr!Lxɣ :,'gueNs,itu:gNw|Fnܩ/F,s/Gӛ'{7biTo]< OK?ce,,?Zˊ)SOCX,6L*q4jYW"(.@ME*d^jI::杉j>92/9Jp;Ubj6=Ls'XɬG'uyţBNdQeo>byݮZEG`q&!hQf.+)+myz+MyS~-)1nddVڏެq(MR:vVqgKݨ,`tTPt酠Lk2-~K87N'/k:Q(Vn44_ha/8 ^뎙iET s,3@/ cЫzñs!0<1Ϡטgu::ĬAœU0h2A9 ֌]> c7]2v1zcw]3v1z1>KfD?Gt3&疅pl>;S32;>w|n!o L JOпZﰳCB&`!~\?X_CB&n!ݹL/Qe7nw_go /xcp"(b##c_Ɲ  ؝2000h\,,,PO&[&]6f^A>NN^(03lTa9eš#9$$\Y,YT9YYxv0oһe3k  z'xc` © SYf10KHeWn 0u#+ #~ M7kCg4o7 1SL|Xp4f4"Nt&P  @ n  N :  v  J r \ :HD jJDvd(>P|>bN|(t^ D !~!!"(""#@##$$0$n$$%&&X&''d'()B)*&*x++,4,,-(-Z---.".Z.//r/0$0`0011:1l1122>2X2222303\3|334@444550566N6677r78 8<8d88899P9999::;x;<<6J>f>>>? ?V?r?@&@f@@A*AXAABBpBBCC8CrCCDD&DDDE,EhEEEF&FVF~FFG GVG|GGGH*HZHHI"IfIIJJXJ|JJKKLKpKKKLL>LLM&M~MNNvNNOOFOtOOPPrPPQQRLRRSSSSSSSST$T$TPTPTbTTTTUUBUpUUUVV"VJVzVVVWWJWxWWXXXYYlYYZZZZZZ[N[l[\(\Z\]]^.^d^^^__V___``>`Z`v````aa>alaaaabb*bRbrbbcc6ccdhde$ejeffFf^fxfffgvggghiiijk"kkl&lm2mmn0ndnnnooPozooopFppqfqrrssVststtBtlttuu8ubuvww!x}x$WInj6.etD ۀ1ӛ146 IZNBB:$!7&7k̮ ɽ3gN3gs QC, AzL(9P[fLg`#[6pG qHiŚ 8˜bՅh.)ExC0E&㘸ȘbbbQt l6'xdL7Xp>8%7T(0Ц[ pXpP0 cDח(/YةSVVfsTTh(uݩw=wܱCvYmڶnhȴgn2=-5ŜlNNY-Q(S"aՇtZ …h?&4E#Mz;+[VFQv\W0:0fܐّ9ϟ>-:uL^ӧq  % @)$5c!;1%[q_J C;kv;60׿̤.ot_b_sw <Y_/!dD&MyCXOpl> ^Qx>(H V JiJAY55A/T(T D 0HNֻQa<(\ 98ݥy(լB:B:NwkAEh#,zdeY,LB\5;ka%s/zj8bL0A!eh,Gդ8f[ľ9ӯ3%6^cU3k)k:@ &j3ς*wLphAmV,,×#cy ߴ\)*ӁDPF @08Z=#<`ZM ^nlܵwN<"՛߬d8phYMl^Kk[_}۾uenDS+Fx#`*(@xSx-]x @ZVH6 lV[DXۧnlrwS"X. gMih+4cSm N% mlN3:Ä?`n@(4\+)Z GX;n75`L>(M|kԥW7xyS+o.zq ?v^;a|<Q3@\,0)1!DDȍI\Lt[=v6[E0^Ymב#npm=;A1v1QOJT1gbJ)*H A m4 H*"g]KuWKfkþ}|~1e.t +27{P>P$ )0F((}P 8DDҜr1Ρ0F+JBP3M* !PgLxFGWU,KLHj?EmCB":xA k<L!R $3ʼxAxbYjá~]}x*S:xs׉A\eT,xάXf,(!9_ܤ.謉Zp[tWraܚ0O`#?}xZw.b/?K%e$5f겇p?kC?/{u}*|bϐV @W]raPk,hjf6@y/,tZdjX z$[%rp/t#$az:!` 2LI yc^+0U(Sw \}gpf!c VxPRA V` wnȁ?\%/,2%BD51hCTuBFְI} bt9oS&iAn)nfWp]uf. juèuh罵͹Xoaݪk>qnc8? ?#9 58>ZF(`;&&4p]1Ǟ8%M2*\$zԲ)]Q:Yź]"Jm~?qA },ΑPČ:]( 4ab ZѴU4k_86?˴e Mv2; 9SXƍ F"9vLp?}xoMv ~We6x CDZZAvDK@ ( D4(6{DхypXcl4c]HVcVS+%_ͯͫAi+*je9f%F`}tϓjz_oSO`?QtaOW9;>tx|SҟNy)|IhKdf>Iඨ3l9WN*Dl66=`mcv;gJP( utf`AjlH^plČ1&uIZq1ԯK lslȶ#ٰܲ_[1 L}wIU]d9aV$oӯqwFkG0~7=Fysǣ^fו6uK+KZnhΝE-['0&qh2Y ǗFD)dk$&:ߘUݏ6npY|jɮmԱK̘P޾}ꀝ8qqS/X${ xK7x\ I4b!:aʹpÃaZ6@cp9d&.iNv_bi0zb׋`l*T p]& <# 1Y51WQ1PHd Ia"w/vOfdx'u:Noa=aeK.^kď]=)Xgz@a È9zz8^"itޢhG2tkwnZA>Z}ŘK*:׈~_j=ox))IK{.e8[cQ%ĉbЇDF  H&;q~-%忰rkW6wH<0:|Ws{[4K9 ̕ _&U{4]5uӰ:2H@o$T"}-oѤ/ˇlsۢ/fׁKE~ ud+s]9ؾ-?4k%/o(\ީlZ~IM7g+Z/)PKx *%(mtWB.BIiCQe}gj J\uج&bdJeK>&Pp`>zXP-䗥n޺&kڂ}ۊV9g ,kk^ޣlpx^'%iGf줹hgaw\G $GF#SL{^Lgwx;n?k` VJxCg 3Q3Nni;ϟٻeOgμyQg;#A H} ًQhx^fj 𲝙FL#z %.p5}`g9L#F^oL5,}s\Ýh) `baEX3 L\f0 H"pzM$,D;]߼$,| WC#wɺ %:ƞ@YfHMulqb)6X +UfХcpBG$Nlӧ߻E={Nx25&J;0by"kV+13;f-w?_|O-]F*t~ TNbQ% CY$IC pY uϙ%d*d;,qSA4Fc9a+|RA  uxn5&a;wb&NdPQk$!q8ۮ:k6C>߷oj{ iy?,HW 9~Zb̚k.]ڎzfkPYrB W?hpf!^ 1}d;eL7jZs1&C6=q's`:u^0P{ nS376#;G9b. |ͻom}b5cYlZHןXGilq&x`( f% pAK?1O3g]}.=]^ "Dq?@|zB~y?lpdsf^-{KGegqw:{]Uu>Z؃ur+n{kSy~/t1wSH+UVqTML'OhK:Iy& )z3ö94hY-ؖ?|QYyx֠D :~` sxQ38SJ-;qģgAYl=;;CvM=]Dֿ Hl 7/.~8 DHVJO-4vݺ ֿO/{W{mшCg_ۯu*^㽥|{xܚVXڲU `"o'_+Lha\n԰K2Gfj:h<eVqF9O#ȋţS5!!GF0_p' ?QL@{!(LRvx/7J*;I636fɕqU !id!m6>"5JQi)|ڠa5~5?T͸)?7m`QE+:v\^@wќw]i}w~kϖK$tEHekoԂ9(4*ƽȪ8VJR Hdd lXpCAedDz&芅Ycז&cÍV?ǟpgwգ.}uaƕݲ/:~V/?5moM~r.K>?_#KdH؍Ig"HV t%@j6I q%+kXL F.3SʬY^޶%n|Vhev/>Zз1cg#5koвGGq|"ăe_q*Pty*q+(r7DdhgR.aIe} d2YMV-F9pW8,XTA(ADצKq?GPg6HYM) G5%χ~Q5 I&b` TjVҚƬeKemvS)"x_WEV5Ch~h zI9Ublg%>O2Լ!Q>I\'yy(D9IRtg"XM 7ojOx]iT ahmv%Bq XJÇ$ ژ(P(i9k5Xtdz, 4iO*U ze_02L_a:$Js xS-&X8bV-Y7KnhE~;6- ry0Vo` B!!Apڬ 0$RǛ%=-۷l6!=}w;\;OMXR35a.Jw0x h&^/A5f]XddP9/4.7Wy~sj*L1uX1 8T6& M 8Рy j_?Fl0#5_.gͩ "!d;SѿϨHkshWQӡ,㱜Lq9  nt,;N٭*[Zuxܯz jU!O76Y1ء|V*%VWі-m<=v<7q4BqbGW%rVҰ Qh3nªE-q_ i4 ď d%MBj8w|P , Wʐh#HIK 3Ѱ('TdY矪7BjP9T26?{ՖbrI]\v[z}õ׵װx!CQ}Ⱥ!,$B򱱖3]hՅO( v '>93qya62^EB KQH_폿-՜՞yk$O9?V1!.&"Ȣ0$@ww_d,|sZD5)V|Yٶ/\-~=zkeŻ6oǦM7>?uM$Nq18`\ >A m&9"{M\ X,IpO[3ワ_s6M.!a&#"ԛmh$uo7B:5VKjJr%͚ B}hq킟;8ۂzΧ5@Kg;,,-ՁҹCgBABkٖ5CgF<2A_*{2R[㲇om"jZݔu׵S Ӟn0 ?wnѕha :;{h "b)߰>ȹXGLj>YFkgV\TrysʉmeÊ*:w^ A]+̯{@eSzoa]M;٩XsEA1JV)ER1!$WD;;p{R#M5kMM4z2~U@8O*ѧ0-yjC&=tI\YLβNȰ,QǦN"mzx#n4ޣ{um'>0ݩ -j6H$S'@h VJKop9IKIJw ]ĖtO4OJ.s{ԾM916+v*@!M py3@l>nwz~#؈vgf>mk̩I7&Sbagq#74!ggiN]m=&N2sgnjV&7w=^4mPQ#%H 0R4 \"tDRs'hU桳5$g[=zi}U>VfF]P8=?G 8|N]J[JsmFIG;5$Fwg*_|WuCUXrsrɵ9!k"`r G%k`7D&@MD\ְ7OӮv户{eWb.OLu4^/{!Z8r>8`ۂ>[ g˚(F ԑ ZxE0Y+䰚;kC8aaR:/Gg=00DΊb#EjAE#[eaH8Hyl"Fyg6Y#j3uwz1\g v۱c-seݧOʼn9|4 Ĭ3 Ip ܓ#y)HTp%Q@=GJ*j.CJ, F .'wu\@799:ѡ5/GcΖ^<` CBOɇ?1㽕 :_^:70rYeSf9cz숻Z\Vޥc٘+[ _,wԚܞ-K {7/{ Q /=7miONHgHn?"7&_6e;:UT߻jMNm_O49ᬳ$T ,_pmUϚKF& Wջ"O nXU*<9[?Tiu&dwRt?/,' r:/";Q 0G 2[u[ԣXzssvl_uXt}t$MEJ3L6m6vߺ]JiLR&ZThvjDC."5J{gO@8s:gfjG0/Cu-R[`ש @!4I$ R )IVO_(4J+z6LA-@g$#drX`HPEF R)82D [Lm ja5'ęxF U5## , nؔ7kO|Kt\˗NQ@k}'S=+$!&JE=ƨx膠@FES.H-&?@ILEjƸy.`2p('sVYWDR7\:c&źI41x#U`l",S"Y ,n#z"FHK2;|<JUs(Y ÀI׷0rʷSP0";&=}90OjO,8k>$W5Xf- \/]pVJGt{|8 /]w\ս9q|OA.8"aD.@k4K/4>af5Il0Dtc_59:m3u7jkt9d/^'ƒ}zv ;͍4K7;%fPTUT*oKܚTZreȽeM܋lf޿!/CAH?^>cþy7^gCbɑ1obVaq6W]<]\[27HMzxp }@uH+ -v7j 3=xmAo^vyWuV0BL%ܽt kWё\ՁʍxȈ7b$BȵΔIZ%w-~rOCk/^Wfj*?woѢA{bx5,\gJz/{`'ũ%9lWkRdK[D [*dʋ<P>bW>_`k݇ZR.I>^,DKuPo)v 8Eg=vK\պ9r}uyF}Z$&I/)DçѮowX\t]oo/N J0)=H1rb ٌtIU>FLv5;ŵDRؠɞzf;$[7Qҕ!!,H9P7ӷBCs19c:ؾcG!|P42" B܇F5\ Fc6WC#ymڪڲı!wrCnF[[ gwaexQYhhc rkSfDR;hs\ {XC}y!;y~{N {'n\4bٓ?l{`{zl}zry]`"bXZ;kɭôu']ׇ)$oWj^rWY|hc'iQ|'-$4ko Dq{wjljSW7 M3 q'JNyz3 ovqrH#d}6>4~>Z[oZLdeju׵HW XI?\vkcuJ_H}p KI?\~E8Rxp^W2!ucC_0Lv4쇋.{P쌺-o ;&^5-%yd|OO\m魰mw^w6 *7ouF5qf;cou)fV,vo]8w^Pn]\W< ])ѕ!U!٢njL+8E07 Z+YK㞹zO޺x [->ϣ6xn|mM"==sƻ+3g hOҜp*ԅ~1ё]RFJwH-XRƿ9UɖP S /U;2\#;\];a}K>ž}g]r^I_ךDH=OpvwC?ug;uwӯ)+fK55y)hoc!ߵz]Cuw8x<4|m򤗫ok񻫖e)~#cJQ녙uGOis}bqOlDq5W^oǹsscM-=<6WD{1pȫwѷx?>aNXZ|;ZM1ZJǢg?㻕+*-"VTor:NyT֑_{Ta]K^LdU+n?Mv,V,-eNUOR?n|j5$v⪞:9S3Ǵ7 wSK JJ|*uo[27b+SkW-_XnٴkxyՒOYxUyS7.8<.4oz66n7y3ݖXػ -940poW|:a&YkF=_U XfEѨ۫UGrry$yh}^{ASɐVEݱnL6=Ucvuiny+s _ҙ0?FR9Iwho8ՊfZ&uF#*>ts-azR% }LtJ%)R;]$j2UjX}O3w3&`4z3SySI'ֱS~mjoz^y:1V'.ptm6CjŶ.քvK3lK$ջ>"dO@K=?}[HTnM [2i!oyG1b4Q *9<1~᠂ :GG eb,@ qWDX 1yK,VP9Ey2˗DGGۢ6֖H,V.88ƶlN{ [́K`zmMW=n)rpCݝa]q#zF 7mFmtoua1`4P(6S;iT)!名.J7c#II{Ϗ`4d[3kKiY9Lz*kܳv+&/?xR0R{^&tMsz3FR$Sd%O`hVZ)rV #/S}8ٌ8$w)M a0| ?D'\r#y$YGC.o-3&M!JU<;p?l/,nӈu$ɹKW(!8@I@O[@>L Զaڕ?3lo߮E Ul4XUvc7<ُ̢PV-{;g`E h)uᤁCޮGimȉ|µWj,vtގCMѧ*k?6h |7C%Xn* ]Hp?T!CHF65+VS?)NKH2t(2i%b!PcC֩b/N.fumn.y> A RXtVHY'2I|<\Ť[8gOľa LG*~vWc_$ZLlNvoMI 2\yefE?n6X.nwSan&|UAa[M4.(HႱ~ӂuqr ή,r9vċ8ъ?2vNm6a~SG$i [% u. v%}X ~BP-vW߭7Iu&>P?)_!̘HH񘽳ƉsE5ج™G[!˹D{ 8dP^GRHg6`6\n_<{F|-kb\FC)cv:ݸ+G39&$gޤq%S<[Ӛh0(-ŨWAKq̓~82H2G;c~/=M# &>? 7YM:Lmƒ(Yَ8DJ\.rh۳5weĀV-n2H8êܨ">}q˹#6Ui9;:A̢8{%L/{Fl1`=c=vw&^^&`g6uZ9γDžt`ȝ,t4ٹq`^=Bܞ uSߞʏqWXɹIJZr6AEuy5 SiJzUƚ>}O15.G-1䄐B};3,gO0KQ32ⵣ1u&']jͦ GOn? ]^br~~އ3~aG~lftoG֗wקt\݋l nguiCtk I敇:BsCOG k4' d O`PnR >5>q<;s}Q`VJBSRSR[ H w %9Ǵ.<;$P@9Oa7ӒT{pbg' Hvn&I]s& 9j6& ]?wnĵDLh5Gzr|jc1S{e<\{t.L rjT]t*b0 FªFWr]wC>F`. ˕4Tɂ hD{<K`}$B,̔bJEhu"])Od spX==9ArIp'K2 ,I7%p"6f3 9/DGTc8fUcҡTX\Ь /݇Lpy#3L3 ;t6zth4OC 9sE@R}| 7t.j^FPȩ;C踊΅l8YLVҔ<;ӯU*ET< /!𿖱/K|0y AλI9$`4"0I "rRD֛4qz61i1Q`yם 4Diu*::;;(!36 _h cT1.AQܝˤJFP4PI csKsz$Qi m0>>uPf-,94{6#\0hLK3&foߢE4wn -y]3XɆ49 #"9u@A% q,XHrB Ҋjܖd7>88828f4y'ɗjK/c~SECL^){apN'oD XWe.kFNS{|W#=`n2Eyhj} )?$hw 䬈rivM@a&ݶ@AZ);9rM+=tZ!Wl1yng_mP"۬W՞V^bg/ZcUDN'Iyg|&*f.%Y$! As@PtϝX,F顮DF Ojy\ ٧?۵kׯܸr 6T<3<OS?|v'-I#GBX,q,>0Q E9c]{#ީ<]0Xi.C{ $ &Xl[ep,G`ƠILyGLWr|Ԩ_o:_vƇ;^\\|э_;}=[ڂ +;H߻Ok5R3ȓNqfr]_=Dפ$EzJbOA$)ͮo/V6t#W_\V~YZ}uzFI_ W?_\dy߹ׯYzFsvN|j .@lh;>:v ZIt<`[* $$j49vݨ|+̆]-_Q4.+FζyDm^ɶꏗ:lŠ[FX4&~hdw_*Kbe[I ^Xrd14wH 5ugϥʫiY3 %QimysAsaOfZK5HYZ))+d$rR,f4(}vz`y^(vr| zA㭵 x<,MȤ9yzcBRϓxjG1D:sw)I w8paleq% hy{qnOiEaТs t@ʦN8LE:#/o.q T YҲ+))&&ɞcX 0~f3CmhIfhURG\ Dq(mjcJ s0!М 9&}R+]sDVO+]Gj򆼼ԅrpϞp0[@e֯=#7;q<ұ+WQ^~d#qtFN@<<^ˌn|n-+P;( ],iD&{(~^0bMvntފi=#Ms:^г#:n]#v06/3060 " ˢ"7pnF=5&16ѸF̢4iiK&6msߛa@0c޻{le+;{&Z»#?O^Y>? =H~[ 2Mk2 %lkF1՛PVK.mAe9l5aڣczR^+cJR2sb W1GO+6-_ghͅ*_X S/698&r(DCè=D'7X+C LJ'"lQ(1g_ݞPV3UQN;d-),)|6y&o-MM+LlA [j~yϠj>O^ b #xgu 4!,iZc@xF@a=D'ZV |㩚Mp IKmh0 R POCz^s\\s^zgURk~NVR7FG'$DG% &%NKdOD&UDkU,-X;c Ps=w,Cqo;%wC.ǵ0ՔG+Z|4SL-Z]M.)2`]yc'\ >&bxz"&!wXlBX=K"2rȤ82ki4[%gbHRL<.ʸzIDJ(A+zELVH*%(v=~N@h=Ê'$QAO3&Ē6qXzLi؇BOk!;q_3@KttsjĄ|&f' <+ &o4 YY*?LDX;\=%_%h_"+MJۉgx'1'S֏E?l$Ylb<#ºS/k9y;7By 4JMVfZe%vl#6vF<`K yVjA.{0}A&Ԁێl+5}wf&3%ƥaArb9K#sБ&LyVȷV\Wq2^=!TU 泥 =çE{xh edY)#2= )hom?AyÈ?u Np8!;`ԑU [l&mM|a1rPRBwy.l.=Sg(DNơ;)Dyo&((X09- is/>9uƚWZ?Ww-Z|G_<^@'9ӧ?%^>n_nj}ƟGb4>y^c~=a[ԛ}E ]yՊ+o-;WgnLx=ټ!PHKs4f_S1,i)s|O | gIqꢁU[V,]ޖޜ8:dŦZ:yK vϋ/3sN]kxSYyMh~im&>`/QXk,Onŋ:cEߓڐt6G:now ؀#9XP28`brȿ.cٹٯlnê9 bmOcjN/8\uuS^XS<+.ʃ 𛇚D;2I+xDzDdw\.=;=85vsa`NޠsU{Zzw0n{`,>*ޡ(omDXϡJ8I`J3X1?hX( |*Ɵ:OJwo<D>KtǴ_YHlm~lB>C5cX3raG׆ڲ$ŽLbcRLѱid] cp)*&$Ȫ|2;6Zh'pk{`0[Ȁ O27`0I\Ҷe5i;ZLͩhj2ƅOJtwyCoܔDh?^9Gʍ^Qa L~%AAX b,bAJf,[g JG<Αؐ&bp'+icC] }r z*91DDkORP@KyBTMϮN^5{Jlj/)߷*7vi ]FI`ӞJ`ϑĤœirRhCePd(`9:Ѷf}6"`o$r)\@ ~(U ;B)r2 3+mCnA8Rs.4#P59A- 1k*0,iB }JzwT;Ctw L&@>ʽ@5ه. VvDejiDNʼn B_~{?]_cNf>"L$Vy̼Dk*a۫Q v` K)J YΜ'k̗8Yf܏d~p7nNuHa 2D07B5p5>s<qZwuq5G;[G$`t|3cC1= +[c^I]ǜ螤&\Īy1їQ=ܜ]'i\euk73|a`eaX99Aų+$#׸hf9D\ ^Α^Ycuh9~vɵ. jty{;#7sKq2İj'd%oxl:K`}ྦl-ytW gd. %ԙ-*uf"wn%2$4U")&h&<չ94-r/ 6.!\)UFꡤBc`FK/^5ϕMy\Kib9K*/"pwgJ妜*>/!IrZ{(r AH8hOb}ꢫՕd4j֨zkOַo9PP,yw奰=Jub((e ܃Ao◺?)j${V_/F[_?~/qo'kwu('ؽHLt @j)TjZڨ.T^<…*_\e/_|e"[{Ã8id+,L2fޏ& &HuslK+ L{I$\-Uб&doS *z RNqY +:f:PN"!VGzļtKM_}祐>#gs{}hn\w3?rSn4KKbe_8rΐ(P U-Qy`=NXS-Gl]Ýak >d@Ӂ.3MO>jO)ouZO{՟vi;ސnƒ;7T愑NVX9axӎ=`2RKZua^|01 MHBc=`tq]in(Znΐlص  v]]jWEv!\i}]hlRܿqwbjjҞSyC{{y_AŰ A*$&{ApsP; 8d[S sJ1Mi&9P"5M+Jx}>U{nwK v۵f7+j˗;.mxzRC;l\`jWϵ?xܿ-+2j ":vAEs10lI#Cb(6@nG!1 tu PPElrxkx(%*CPK\tEёubk2ڡT-)=xÞi"v㛭we?Q(AV@H ~CJ6K$uXY).0T$R:ޚ"b+(rG#!?_E` Ʋd7 B(/tl/S+%{g-[)ab81J˳8V픐.T # !Kz]eʌхG뾒IJ(}1m)QT9H2yη 0b40,eYR&:8|k.^.n66g0lLFcJ2?iѩش/*d/y *Q9R8T ;a2_e2ÇÇ md>b pR1@G(Iz|W ͼB#_gsnŕY>.cA0=]>h9y*y$ $+( *i|ީ"(0 UB&{#HvYz+Dg);sk'9+"dDH_/Eť(./_R/_BSgA0[Ka`h"TH&lJ%ԺY`Q\aV a RQRcW f5q款>pZY>'ur77<%0/n_m*-\V4mSIɦiE{[[ o&`(M-~k-Z؊l{bfO, ˢ"! V9y8~?t-G +@hcIh^cvJ! Ki5%K1d`eȪOК)_a*zti[ %t?ջ$7n3Bi~xG60N@w3d[*Yp".pYD߱^olއe!#4ʊ ٢!0FRa,7&v`bm1vc^i)t*Gyr*rʧfhC!PǗUIԒwhP@{7 ZXBhn@F6|NH,>f)i43R4<HmmFKnRp6MmM3"nuyo5?5e3no}|N4eʳ= z$dL5i]Ys//-۸(i_Iie+W9<̏\TLC\W^~tUvH^Ľt7{.o1.Lt:"'9LSmvHg -[AXBiLuj$ I%v+)lzP#%P#c[ T#CP)%)AwupS,8TсLe,E`8=)z5~5~Pֵ+V=sr9R_<8PyO?Q]]7on}'7?C7~| _?~U?k{{FPfHSJq .n{J@Kв)dr2I.K.L UKHO0b"#C !7J#5=`[rF".X'3=T{OH:8s AmpLdʞtyɜO䇈,ɻ˱aA/baI)Q&ZT%)+ 0 8)Q-Fhl J JU4{G;m@ފ][|2+k>Z~];[ok(ڶqӕWsV%$ԛʲš,-TYsm:?.%kzzhC˪iQvEfʭ)+27SєKೖJM>V>.Ԍq0FP H,XnG4|x+3+3둬ř\Y83sqv'ߘXXht!3!h/߈5Vlprw-X-gΎ#Ab9` X8b]d<2JȨ]؀:d`2j7)s_+,;GVP9QU݌ՠ|'B={8=YFk ,ٷ|FBF#?F(cye1z@P#cHTos)o}7IKeS޾`)=x84 6J+  5j`IZ*'kUf5#25}O#Ƕmey:DRU $.^IR!# lLedt>c]bbu||ubb%+*&'''dtD=3!`]`X⊄SprX`f BR{DcCVh VZ^it+m$6td20_߰P?߰X.(CM4T4ֽjH Ձgxګ$Hj)d2Z'd'+ $$TBV;1`88רDH!2XN7|j,`4E @%P #G\$){ y^)|mHxAa ^tec@.K3ۿP}&)>jx:aEXO`'m/J $ĩ+1:@l:eH@1! +@=asV@JDZ n> !_%xٽ!\S ;ӬwGJeAԵqvI#Q]>&B:]3/寋ns3s!c qd h7Q#jE(B`Baԇވw-*0)5TZ.#.xOH파g}|C1L3C&8U\] o{BaC;5 b: (t1 S$^ uTSwJu$%:2PI* |'b3#.9Eq woKF7zc4Fa qaeL! }2)>&*~ H]h:w}4`9^Lx>\XPea+^~Mm YJ,'v6^-byrdA~]z_I@; 9qHw"ܥ A'q pzn.. 6k8Qc{PB~a[Amy=ԡH 'Q_2՗'nC`#Lj&umAF7p/m Ą PU3^rSN) > wK7>q7s;9\"/ݧ% \qx.M =yx\e-B=hx>dNU*P!VUt ?r-bӔc#@\N脎{^.~V-!6ӺCv ylq2Nv~3NS@#=N}}K\.51Kk3qBtP+ϕyoQcםqthM= ^wuݶt$ݮ*7UXF[{+[K5{W"2rUuܶ@ԞiVmjݢpwi_x#GWM :wB7?k~%7>xmxe@r Ni4.%(iITQQ[P{p{Op=z~o+|P/D+õ= hH][C#'4{a(sx/`o`P6//4܈`kC=}Ua|؃=YF?YJV! f!ÑC.8N`61 1Ec1M #p$`5͘o0͘YX81`|{p{7/q3n-%s+܆r;<rw`ĝ wnݹ^ʽY}`-4h a?b1ڌsG8c8XUlN4N ăyL[d+Slc;pb1Nf]9s9GHŅ

Ǹ >ɧ4||/:|oG;/EO\ ?6Ne@l 0F/܊[Џj\ 6܎;p'#~O)?ñ8g##c.÷30_`gTe8K븞_+~o-=-7?g_o},/2v.]Ⱥe_Aq& e|)kWR׶贤FFF^ScFCHzl+Qlyq:ՑDT/ŎljZiͤ )'Hn{!1GVE \,l^.؊~K#;Lfل^)TnZKJ؛Vbo1RQ5h6vlTDsh1c5qwRS[SwMݚ6U79لDvnt+4&jLR%^Ѭ՚K*j^Q5Tiu elg:Q=x@X+ z[f\| ) CaXZ¨0&o-o-o-vD6 k!4uzaH ݠ% cB[(k!k!~FX(k)>o)}SM7E}Ser_nq$6JbrAi+$mbAL ENf ԜTNL{ՉLNS,wxtJ7]H OԽ-EZKRޒRvsŎ OZTiq/ȓWy**QYx`!  !a0N B ^HBºmؓw:ȈR=Pyd/.{+I!;.eZq0.}ȵؚzII5縩BvkbT%OgRdRMPiER$KfΒ eLd&Y2BKfDuFKf%ђYhЯq}[,%QUmOgrޥx% @pG,|x!A`g`$X–Wy=uFWIxyy xڥX t{}eDҡNeDѩZWҙZ4ej|+5NI(5QmQ%"D zvr><(.M{1@9 pQ5Z5JbT"HLZUދɤ*Q"+5Y{ddv9 *FuQ},u8 5 .z~ 1 Ƽm9iJmMɶhm]`gly6ۅIaOċw[wSwo$n$䞑!nK2W&S.>*B 6.VSmQO*Fg`< ")pz֝@gѝTwVstQ# D)c ςDjv]kInN㴌XOAW2KIK&bGMݧ9|N"m\ZltnG]x5:xY,I_9ޅ\w P Rte5r5D D +TfaTsu0ޭct$}|rPm+S]$,HS>G U\DeDYz,Z<&"rs``г@vcq"`n K[t8^jKH"p析^5DC^E$^F7Ir6+с2ͭn 2YNG'֣щ v6H;5vsn'@ Ms _{{(bG2XAA_/uDh̹FGQ}7t:3Ї#l5n,X Z`ق5w;ůtzF)lϠK%E#ND߁;}{ݽWiC5 |+ѕ];_ՅLg9& cJ,Tb|ReJ[,!{`m>~υ-x%@_ҍ{Oxc=hp> 5cb%!eAد#CJPk)j-eTmwaϨm3i؝jC+;>xC ౕ b"n(B=RJ\{AEjXQQ.d>V{VƫI4_Dc`5w*z4ݍ_SQQKyhXJ7<ØbNT#%LzCA«H ]!8TϟrA?^?ov6Df.cVQ<5@@zCE:uJw@1z8ᒢ{un&tC hCH'I=U?g{Q׋hyz^&Eo[ׇQh'9}I_ Dz;mԚi-t1=M*f3OBK7&Lmf9f&,2o֙ p=9s6M)1]6JoB40{mbCk7Dv)35gfѾvl{f;ε/[hWrh[v}ZeC>'^Q .LXذax~vNf͙|n{ofz[r|0.l>gᛵ9w~.|^=#ϴqLjצH5ӾǃDzN^dAH"8.d&Ll"d"Cs%>xK2dy^#*?~BD2?9B}"n[vN"]tOI3tD3o#sx^|Noz@}\}Y_3(=ĘM;IoV&$N&6@fflYU%,6@_nVF캍Ĝ0gLbʭmmk{mmc"In{>1;!#mm'ةwlo;K]AYc߮"a{Ԟ"%{50 #~8lFio; ۇa*}azfHv83 pW??<=*D}󖅃1%>(꣨ץ~}/NUwm#-ų韅C/0>/۫Gu=&~[?G0f=Drv4shZgmQm~S/bMw?λ2ym sf29|ԟc`-#[břq\iQ[Ѿ2#_z]rF`T ƏOAfZ/I=ygQnRѾ5M]| ^s){(dRȜI3ޣek2q{*ciw"Ȑ>p iNm~լԓ#M6ood&?T郯_`"2_S:MfH,|Y)ke] T&Iǹ2; Z ;j mjTEH]nX\U_Zki+?DnJ:/F}d4L—8d4T~l~Y:mkosi-25.3/docs/index.md000066400000000000000000000014151474711424400151570ustar00rootroot00000000000000--- layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # mkosi — Build Bespoke OS Images A fancy wrapper around `dnf --installroot`, `apt`, `pacman` and `zypper` that generates customized disk images with a number of bells and whistles. --- {% assign tutorials = site.pages | group_by:"category" %} {% assign project = site.data.project_pages | group_by:"category" %} {% assign documentation = site.data.documentation_page | group_by:"category" %} {% assign merged = documentation | concat: tutorials | concat: project %} {% for pair in merged %} {% if pair.name != "" %} ## {{ pair.name }} {% assign sorted = pair.items | sort:"title" %}{% for page in sorted %} * [{{ page.title }}]({{ page.url | relative_url }}){% endfor %} {% endif %} {% endfor %} --- mkosi-25.3/docs/initrd.md000066400000000000000000000017351474711424400153460ustar00rootroot00000000000000--- title: Building a custom initrd and using it in a mkosi image category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building a custom initrd and using it in a mkosi image Building an image with a mkosi-built initrd is a two step process, because you will build two images - the initrd and your distribution image. 1. Build an initrd image using the `cpio` output format with the same target distributions as you want to use for your distribution image. mkosi compresses the `cpio` output format by default. ```conf [Output] Format=cpio [Content] Packages=systemd udev kmod ``` 2. Invoke `mkosi` passing the initrd image via the `--initrd` option or add the `Initrd=` option to your mkosi config when building your distribution image. ```bash mkosi --initrd= ... ``` This will build an image using the provided initrd image. mkosi will add the kernel modules found in the distribution image to this initrd. mkosi-25.3/docs/root-verity.md000066400000000000000000000101051474711424400163470ustar00rootroot00000000000000# Operating disk images with verity protected root partition First of all, to build a disk image with a verity protected root partition, put the following in mkosi.repart: ```conf # mkosi.repart/00-esp.conf [Partition] Type=esp Format=vfat CopyFiles=/efi:/ CopyFiles=/boot:/ SizeMinBytes=1G SizeMaxBytes=1G # mkosi.repart/10-root-verity-sig.conf [Partition] Type=root-verity-sig Label=%M_%A_verity_sig Verity=signature VerityMatchKey=root SplitName=%t.%U # mkosi.repart/11-root-verity.conf [Partition] Type=root-verity Label=%M_%A_verity Verity=hash VerityMatchKey=root SizeMinBytes=300M SizeMaxBytes=300M SplitName=%t.%U # mkosi.repart/12-root.conf [Partition] Type=root Format=erofs Label=%M_%A_root Verity=data VerityMatchKey=root CopyFiles=/ ExcludeFilesTarget=/var/ Minimize=yes SplitName=%t.%U ``` Then, you'll need a dropin for systemd-repart in the initrd to make sure it runs after the root partition has been mounted, so let's create an initrd with `mkosi.images` where we customize systemd-repart to behave like this: ```conf # mkosi.images/initrd/mkosi.conf [Include] Include=mkosi-initrd # mkosi.images/initrd/mkosi.extra/usr/lib/systemd/system/systemd-repart.service.d/sysroot.conf [Unit] After=sysroot.mount ConditionDirectoryNotEmpty=|/sysroot/usr/lib/repart.d ``` To use the initrd in the top level image, add the following to mkosi.conf: ```conf [Content] Initrds=%O/initrd ``` Finally, we'll need some partition definitions in the image itself to create an A/B update setup and an encrypted `/var`. This includes the definitions from mkosi.repart in a reduced form solely for matching the existing partitions: ```conf # mkosi.extra/usr/lib/repart.d/00-esp.conf [Partition] Type=esp # mkosi.extra/usr/lib/repart.d/10-root-verity-sig.conf [Partition] Type=root-verity-sig Label=%M_%A_verity_sig # mkosi.extra/usr/lib/repart.d/11-root-verity.conf [Partition] Type=root-verity Label=%M_%A_verity # mkosi.extra/usr/lib/repart.d/12-root.conf [Partition] Type=root Label=%M_%A SizeMinBytes=2G SizeMaxBytes=2G # mkosi.extra/usr/lib/repart.d/20-root-verity-sig.conf [Partition] Type=root-verity-sig Label=_empty # mkosi.extra/usr/lib/repart.d/21-root-verity.conf [Partition] Type=root-verity Label=_empty SizeMinBytes=300M SizeMaxBytes=300M # mkosi.extra/usr/lib/repart.d/22-root.conf [Partition] Type=root Label=_empty SizeMinBytes=2G SizeMaxBytes=2G # mkosi.extra/usr/lib/repart.d/30-swap.conf [Partition] Type=swap Format=swap Encrypt=tpm2 SizeMinBytes=4G SizeMaxBytes=4G # mkosi.extra/usr/lib/repart.d/40-var.conf [Partition] Type=var Format=ext4 Encrypt=tpm2 SizeMinBytes=2G ``` Because in this setup `/etc` is immutable, we have to embed the machine ID in the image itself at build time so let's generate a machine ID and persist it by running `systemd-id128 new >mkosi.machine-id`. The machine ID is required as it is embedded in the `/var` partition UUID and systemd will refuse to mount a `/var` partition without the machine ID embedded in its UUID. You'll then also need some `systemd-sysupdate` definitions in `/usr/lib/sysupdate.d` which describe how to update the image. These will differ depending on how the image is updated but we list some example definitions here. These are all missing a `[Source]` section whose contents will depend on how updates are deployed: ```conf # /usr/lib/sysupdate.d/10-root-verity-sig.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=%M_@v_verity_sig MatchPartitionType=root-verity-sig PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/11-root-verity.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=%M_@v_verity MatchPartitionType=root-verity PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/12-root.conf [Transfer] ProtectVersion=%A [Target] Type=partition Path=auto MatchPattern=ParticleOS_@v MatchPartitionType=root PartitionFlags=0 ReadOnly=1 # /usr/lib/sysupdate.d/20-uki.conf [Transfer] ProtectVersion=%A [Target] Type=regular-file Path=/EFI/Linux PathRelativeTo=boot MatchPattern=%M_@v+@l-@d.efi \ %M_@v+@l.efi \ %M_@v.efi Mode=0444 TriesLeft=3 TriesDone=0 InstancesMax=2 ``` mkosi-25.3/docs/style.css000066400000000000000000000221421474711424400154000ustar00rootroot00000000000000/* SPDX-License-Identifier: LGPL-2.1-or-later */ @font-face { font-family: 'Heebo'; src: url('fonts/heebo-regular.woff'); font-weight: 400; } @font-face { font-family: 'Heebo'; src: url('fonts/heebo-bold.woff'); font-weight: 600; } /* Variables */ :root { --sd-brand-black: hsl(270, 19%, 13%); /* #201A26; */ --sd-brand-green: hsl(145, 66%, 51%); /* #30D475; */ --sd-brand-white: #fff; --sd-black: hsl(270, 7%, 13%); --sd-green: hsl(145, 66%, 43%); /* #26b763 */ --sd-gray-extralight: hsl(30, 10%, 96%); /* #f6f5f4 */ --sd-gray-light: hsl(30, 10%, 92%); --sd-gray: hsl(30, 10%, 85%); --sd-gray-dark: hsl(257, 23%, 20%); --sd-gray-extradark: hsl(257, 23%, 16%); /* #241f31 */ --sd-blue: hsl(200, 66%, 55%); --sd-highlight-bg-light: rgba(255, 255, 255, 1); --sd-highlight-bg-dark: rgba(0, 0, 0, .6); --sd-highlight-inline-bg-light: rgba(0, 0, 0, 0.07); --sd-highlight-inline-bg-dark: rgba(255, 255, 255, 0.1); --sd-font-weight-normal: 400; --sd-font-weight-bold: 600; /* Light mode variables */ --sd-foreground-color: var(--sd-gray-extradark); --sd-background-color: var(--sd-gray-extralight); --sd-logo-color: var(--sd-brand-black); --sd-link-color: var(--sd-green); --sd-small-color: var(--sd-gray-dark); --sd-highlight-bg: var(--sd-highlight-bg-light); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-light); --sd-link-font-weight: var(--sd-font-weight-bold); --sd-table-row-bg: var(--sd-highlight-inline-bg-light); --sd-table-row-hover-bg: var(--sd-gray); } @media (prefers-color-scheme: dark) { :root { color-scheme: dark; --sd-foreground-color: var(--sd-gray); --sd-background-color: var(--sd-black); --sd-logo-color: var(--sd-brand-white); --sd-link-color: var(--sd-brand-green); --sd-small-color: var(--sd-gray); --sd-highlight-bg: var(--sd-highlight-bg-dark); --sd-highlight-inline-bg: var(--sd-highlight-inline-bg-dark); --sd-link-font-weight: var(--sd-font-weight-normal); --sd-table-row-bg: var(--sd-highlight-inline-bg-dark); --sd-table-row-hover-bg: var(--sd-highlight-bg-dark); } } /* Typography */ * { -moz-box-sizing: border-box; -webkit-box-sizing: border-box; box-sizing: border-box; } html, body { margin: 0; padding: 0; font-size: 1rem; font-family: "Heebo", sans-serif; font-weight: 400; line-height: 1.6; } body { color: var(--sd-foreground-color); background-color: var(--sd-background-color); } h1, h2, h3, h4, h5, h6 { margin: 1rem 0 0.625rem; font-weight: 600; line-height: 1.25; } h1 { text-align: center; font-size: 1.87rem; font-weight: 400; font-style: normal; margin-bottom: 2rem; } @media screen and (min-width: 650px) { img { margin-left: 10%; margin-right: 10%; } h1 { font-size: 2.375em; } } h2 { font-size: 1.25rem; margin-top: 2.5em; } h3 { font-size: 1.15rem; } a { font-weight: var(--sd-link-font-weight); text-decoration: none; color: var(--sd-link-color); cursor: pointer; } a:hover { text-decoration: underline; } b { font-weight: 600; } small { color: var(--sd-small-color); } hr { margin: 3rem auto 4rem; width: 40%; opacity: 40%; } /* Layout */ .container { width: 80%; margin-left: auto; margin-right: auto; max-width: 720px; } /* Singletons */ .page-logo { display: block; padding: 5rem 0 3rem; color: var(--sd-logo-color); } .page-logo > svg { display: block; width: 12.625em; height: auto; margin: 0 auto; } .color-green { color: var(--sd-brand-green); } .color-blue { color: var(--sd-blue); } .page-link::after { content: " ➜"; } /* Footer */ footer { text-align: center; padding: 3em 0 3em; font-size: 1em; margin-top: 4rem; } @media (prefers-color-scheme: light) { .highlight .cm { color: #999988; font-style: italic; } .highlight .cp { color: #999999; font-weight: bold; } .highlight .c1 { color: #999988; font-style: italic; } .highlight .cs { color: #999999; font-weight: bold; font-style: italic; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cpf { color: #999988; font-style: italic; } .highlight .err { color: #a61717; background-color: #e3d2d2; } .highlight .gd { color: #000000; background-color: #ffdddd; } .highlight .ge { color: #000000; font-style: italic; } .highlight .gr { color: #aa0000; } .highlight .gh { color: #999999; } .highlight .gi { color: #000000; background-color: #ddffdd; } .highlight .go { color: #888888; } .highlight .gp { color: #555555; } .highlight .gs { font-weight: bold; } .highlight .gu { color: #aaaaaa; } .highlight .gt { color: #aa0000; } .highlight .kc { color: #000000; font-weight: bold; } .highlight .kd { color: #000000; font-weight: bold; } .highlight .kn { color: #000000; font-weight: bold; } .highlight .kp { color: #000000; font-weight: bold; } .highlight .kr { color: #000000; font-weight: bold; } .highlight .kt { color: #445588; font-weight: bold; } .highlight .k, .highlight .kv { color: #000000; font-weight: bold; } .highlight .mf { color: #009999; } .highlight .mh { color: #009999; } .highlight .il { color: #009999; } .highlight .mi { color: #009999; } .highlight .mo { color: #009999; } .highlight .m, .highlight .mb, .highlight .mx { color: #009999; } .highlight .sa { color: #000000; font-weight: bold; } .highlight .sb { color: #d14; } .highlight .sc { color: #d14; } .highlight .sd { color: #d14; } .highlight .s2 { color: #d14; } .highlight .se { color: #d14; } .highlight .sh { color: #d14; } .highlight .si { color: #d14; } .highlight .sx { color: #d14; } .highlight .sr { color: #009926; } .highlight .s1 { color: #d14; } .highlight .ss { color: #990073; } .highlight .s, .highlight .dl { color: #d14; } .highlight .na { color: #008080; } .highlight .bp { color: #999999; } .highlight .nb { color: #0086B3; } .highlight .nc { color: #445588; font-weight: bold; } .highlight .no { color: #008080; } .highlight .nd { color: #3c5d5d; font-weight: bold; } .highlight .ni { color: #800080; } .highlight .ne { color: #990000; font-weight: bold; } .highlight .nf, .highlight .fm { color: #990000; font-weight: bold; } .highlight .nl { color: #990000; font-weight: bold; } .highlight .nn { color: #555555; } .highlight .nt { color: #000080; } .highlight .vc { color: #008080; } .highlight .vg { color: #008080; } .highlight .vi { color: #008080; } .highlight .nv, .highlight .vm { color: #008080; } .highlight .ow { color: #000000; font-weight: bold; } .highlight .o { color: #000000; font-weight: bold; } .highlight .w { color: #bbbbbb; } } @media (prefers-color-scheme: dark) { /* rouge "base16.dark" code highlight */ /* generated with: rougify style base16.dark | sed '/background-color: #151515/d' */ .highlight, .highlight .w { color: #d0d0d0; } .highlight .err { color: #151515; background-color: #ac4142; } .highlight .c, .highlight .ch, .highlight .cd, .highlight .cm, .highlight .cpf, .highlight .c1, .highlight .cs { color: #505050; } .highlight .cp { color: #f4bf75; } .highlight .nt { color: #f4bf75; } .highlight .o, .highlight .ow { color: #d0d0d0; } .highlight .p, .highlight .pi { color: #d0d0d0; } .highlight .gi { color: #90a959; } .highlight .gd { color: #ac4142; } .highlight .gh { color: #6a9fb5; font-weight: bold; } .highlight .k, .highlight .kn, .highlight .kp, .highlight .kr, .highlight .kv { color: #aa759f; } .highlight .kc { color: #d28445; } .highlight .kt { color: #d28445; } .highlight .kd { color: #d28445; } .highlight .s, .highlight .sb, .highlight .sc, .highlight .dl, .highlight .sd, .highlight .s2, .highlight .sh, .highlight .sx, .highlight .s1 { color: #90a959; } .highlight .sa { color: #aa759f; } .highlight .sr { color: #75b5aa; } .highlight .si { color: #8f5536; } .highlight .se { color: #8f5536; } .highlight .nn { color: #f4bf75; } .highlight .nc { color: #f4bf75; } .highlight .no { color: #f4bf75; } .highlight .na { color: #6a9fb5; } .highlight .m, .highlight .mb, .highlight .mf, .highlight .mh, .highlight .mi, .highlight .il, .highlight .mo, .highlight .mx { color: #90a959; } .highlight .ss { color: #90a959; } } /* Code Blocks */ .highlighter-rouge { padding: 2px 1rem; border-radius: 5px; color: var(--sd-foreground-color); background-color: var(--sd-highlight-bg); overflow: auto; } .highlighter-rouge .highlight .err { background: transparent !important; color: inherit !important; } /* Inline Code */ code.highlighter-rouge { padding: 2px 6px; background-color: var(--sd-highlight-inline-bg); } a code.highlighter-rouge { color: inherit; } mkosi-25.3/docs/sysext.md000066400000000000000000000053351474711424400154140ustar00rootroot00000000000000--- title: Building system extensions with mkosi category: Tutorials layout: default SPDX-License-Identifier: LGPL-2.1-or-later --- # Building system extensions with mkosi [System extension](https://uapi-group.org/specifications/specs/extension_image/) images may – dynamically at runtime — extend the base system with an overlay containing additional files. To build system extensions with mkosi, we first have to create a base image on top of which we can build our extension. To keep things manageable, we'll use mkosi's support for building multiple images so that we can build our base image and system extension in one go. Start by creating a temporary directory with a base configuration file `mkosi.conf` with some shared settings: ```conf [Output] OutputDirectory=mkosi.output CacheDirectory=mkosi.cache ``` From now on we'll assume all steps are executed inside the temporary directory. Now let's continue with the base image definition by writing the following to `mkosi.images/base/mkosi.conf`: ```conf [Output] Format=directory [Content] CleanPackageMetadata=no Packages=systemd udev ``` We use the `directory` output format here instead of the `disk` output so that we can build our extension without needing root privileges. Now that we have our base image, we can define a sysext that builds on top of it by writing the following to `mkosi.images/btrfs/mkosi.conf`: ```conf [Config] Dependencies=base [Output] Format=sysext Overlay=yes [Content] BaseTrees=%O/base Packages=btrfs-progs ``` `BaseTrees=` points to our base image and `Overlay=yes` instructs mkosi to only package the files added on top of the base tree. We can't sign the extension image without a key, so let's generate one with `mkosi genkey` (or write your own private key and certificate yourself to `mkosi.key` and `mkosi.crt` respectively). Note that this key will need to be loaded into your kernel keyring either at build time or via MOK for systemd to accept the system extension at runtime as trusted. Finally, you can build the base image and the extension by running `mkosi -f`. You'll find `btrfs.raw` in `mkosi.output` which is the extension image. You'll also find the main image `image.raw` there but it will be almost empty. What we can do now is package up the base image as the main image, but in another format, for example an initrd, we can do that by adding the following to `mkosi.conf`: ```conf [Output] Format=cpio Output=initrd [Content] MakeInitrd=yes BaseTrees=%O/base ``` If we now run `mkosi -f` again, we'll find `initrd.cpio.zst` in `mkosi.output` with its accompanying extension still in `btrfs.raw`. If you don't have any need for a main image, you can configure `Format=none` in the `Output` section in `mkosi.conf` to disable it. mkosi-25.3/kernel-install/000077500000000000000000000000001474711424400155215ustar00rootroot00000000000000mkosi-25.3/kernel-install/50-mkosi.install000077500000000000000000000061021474711424400204570ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-or-later import logging import sys import tempfile from pathlib import Path from typing import Optional from mkosi import identify_cpu from mkosi.archive import make_cpio from mkosi.config import OutputFormat from mkosi.initrd import KernelInstallContext from mkosi.log import log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.sandbox import umask from mkosi.util import PathString def we_are_wanted(context: KernelInstallContext) -> bool: return context.uki_generator == "mkosi" or context.initrd_generator in ("mkosi", "mkosi-initrd") def build_microcode_initrd(output: Path) -> Optional[Path]: vendor, ucode = identify_cpu(Path("/")) if vendor is None: logging.warning("Unable to determine the vendor of your CPU, not adding microcode") return None if ucode is None: logging.warning("Unable to find microcode for your CPU in /usr/lib/firmware, not adding microcode") return None with tempfile.TemporaryDirectory() as tmp: root = Path(tmp) / "initrd-microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) with (destdir / f"{vendor}.bin").open("wb") as f: f.write(ucode.read_bytes()) make_cpio(root, output) return output @uncaught_exception_handler() def main() -> None: context = KernelInstallContext.parse( name="50-mkosi.install", description="kernel-install plugin to build initrds or Unified Kernel Images using mkosi", ) log_setup(default_log_level="info" if context.verbose else "warning") if context.command != "add" or not we_are_wanted(context): logging.info("mkosi-initrd is not enabled, skipping") return # If kernel-install was passed a UKI, there's no need to build anything ourselves. if context.image_type == "uki": logging.info("Provided kernel image is already a unified kernel image, skipping mkosi-initrd") return # If the initrd was provided on the kernel command line, we shouldn't generate our own. if context.layout != "uki" and context.initrds: logging.info("Pre-built initrds were provided, skipping mkosi-initrd") return if context.layout == "uki" and context.uki_generator == "mkosi": format = OutputFormat.uki else: format = OutputFormat.cpio output = "initrd" if format == OutputFormat.cpio else "uki.efi" cmdline: list[PathString] = [ "mkosi-initrd", "--kernel-version", context.kernel_version, "--format", str(format), "--output", output, "--output-dir", context.staging_area, "--kernel-image", context.kernel_image, ] # fmt: skip if context.verbose: cmdline += ["--debug"] logging.info(f"Building {output}") run(cmdline, stdin=sys.stdin, stdout=sys.stdout) if format == OutputFormat.cpio: build_microcode_initrd(context.staging_area / "microcode") if __name__ == "__main__": main() mkosi-25.3/kernel-install/51-mkosi-addon.install000077500000000000000000000027431474711424400215520ustar00rootroot00000000000000#!/usr/bin/env python3 # SPDX-License-Identifier: LGPL-2.1-or-later import logging import sys from pathlib import Path from mkosi.initrd import KernelInstallContext from mkosi.log import log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.util import PathString @uncaught_exception_handler() def main() -> None: context = KernelInstallContext.parse( name="51-mkosi-addon.install", description="kernel-install plugin to build local addon for initrd/cmdline", ) log_setup(default_log_level="info" if context.verbose else "warning") # No local configuration? Then nothing to do if not Path("/etc/mkosi-addon").exists() and not Path("/run/mkosi-addon").exists(): logging.info("No local configuration defined, skipping mkosi-addon") return if context.command != "add" or context.layout != "uki": logging.info("Not an UKI layout 'add' step, skipping mkosi-addon") return if context.image_type != "uki": logging.info("Provided kernel image is not a unified kernel image, skipping mkosi-addon") return cmdline: list[PathString] = [ "mkosi-addon", "--output", "mkosi-local.addon.efi", "--output-dir", context.staging_area / "uki.efi.extra.d", ] # fmt: skip if context.verbose: cmdline += ["--debug"] logging.info("Building mkosi-local.addon.efi") run(cmdline, stdin=sys.stdin, stdout=sys.stdout) if __name__ == "__main__": main() mkosi-25.3/mkosi-addon000077700000000000000000000000001474711424400222042mkosi/resources/mkosi-addonustar00rootroot00000000000000mkosi-25.3/mkosi-initrd000077700000000000000000000000001474711424400226142mkosi/resources/mkosi-initrdustar00rootroot00000000000000mkosi-25.3/mkosi.conf000066400000000000000000000023071474711424400145700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Include] Include=mkosi-vm [Build] CacheDirectory=mkosi.cache History=yes [Output] # These images are (among other things) used for running mkosi which means we need some disk space available so # default to directory output where disk space isn't a problem. Format=directory OutputDirectory=mkosi.output [Build] Incremental=yes BuildSources=. BuildSourcesEphemeral=yes [Content] Autologin=yes SELinuxRelabel=no ShimBootloader=unsigned Packages= binutils gdb sudo tmux zsh InitrdPackages= less RemoveFiles= # The grub install plugin doesn't play nice with booting from virtiofs. /usr/lib/kernel/install.d/20-grub.install # The dracut install plugin doesn't honor KERNEL_INSTALL_INITRD_GENERATOR. /usr/lib/kernel/install.d/50-dracut.install # Make sure that SELinux doesn't run in enforcing mode even if it's pulled in as a dependency. KernelCommandLine= enforcing=0 systemd.log_ratelimit_kmsg=0 systemd.crash_shell printk.devkmsg=on systemd.early_core_pattern=/core KernelModulesInitrdExclude=.* KernelModulesInitrdInclude=default [Runtime] RAM=4G mkosi-25.3/mkosi.conf.d/000077500000000000000000000000001474711424400150655ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/15-bootable.conf000066400000000000000000000002371474711424400177500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Bootable=yes mkosi-25.3/mkosi.conf.d/15-memory.conf000066400000000000000000000001551474711424400174700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=|esp Format=|uki Format=|cpio [Runtime] RAM=8G mkosi-25.3/mkosi.conf.d/15-metadata.conf000066400000000000000000000003601474711424400177360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Remove package manager metadata on cpio based output formats # to keep memory usage within reasonable limits. [Match] Format=|uki Format=|esp Format=|cpio [Content] CleanPackageMetadata=yes mkosi-25.3/mkosi.conf.d/15-x86-64.conf000066400000000000000000000001511474711424400170300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] BiosBootloader=grub mkosi-25.3/mkosi.conf.d/20-arch.conf000066400000000000000000000002571474711424400170740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= grub openssh python qemu-user-static shim mkosi-25.3/mkosi.conf.d/20-azure.conf000066400000000000000000000005101474711424400172750ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Distribution] Release=3.0 Repositories=base-preview [Content] # The unsigned-shim package tries to install to the same location as the signed shim package so we can't install and # test unsigned shim. ShimBootloader=none Packages= kernel-tools mkosi-25.3/mkosi.conf.d/20-centos/000077500000000000000000000000001474711424400165775ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-centos/mkosi.conf000066400000000000000000000003771474711424400205770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky [Distribution] Release=10 [Content] # CentOS Stream 10 does not ship an unsigned shim ShimBootloader=none Packages= linux-firmware mkosi-25.3/mkosi.conf.d/20-centos/mkosi.conf.d/000077500000000000000000000000001474711424400210675ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-centos/mkosi.conf.d/epel.conf000066400000000000000000000002141474711424400226600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=10 [Distribution] Repositories=epel [Content] Packages= rpmautospec mkosi-25.3/mkosi.conf.d/20-debian/000077500000000000000000000000001474711424400165265ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-debian/mkosi.conf000066400000000000000000000002111474711424400205110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=debian [Distribution] Release=testing Repositories=non-free-firmware mkosi-25.3/mkosi.conf.d/20-fedora/000077500000000000000000000000001474711424400165445ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-fedora/mkosi.conf000066400000000000000000000002711474711424400205350ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Distribution] Release=rawhide [Content] Packages= fish qemu-user-static rpmautospec mkosi-25.3/mkosi.conf.d/20-fedora/mkosi.conf.d/000077500000000000000000000000001474711424400210345ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-fedora/mkosi.conf.d/20-arm64.conf000066400000000000000000000001741474711424400230550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= shim-unsigned-aarch64 mkosi-25.3/mkosi.conf.d/20-fedora/mkosi.conf.d/20-x86_64.conf000066400000000000000000000002541474711424400230610ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd-ucode-firmware kernel-uki-virt shim-unsigned-x64 mkosi-25.3/mkosi.conf.d/20-kali/000077500000000000000000000000001474711424400162245ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-kali/mkosi.conf000066400000000000000000000001671474711424400202210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=kali [Distribution] Repositories=non-free-firmware mkosi-25.3/mkosi.conf.d/20-opensuse/000077500000000000000000000000001474711424400171455ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-opensuse/mkosi.conf000066400000000000000000000005511474711424400211370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Distribution] Release=tumbleweed [Content] # openSUSE does not ship an unsigned shim ShimBootloader=none Packages= diffutils fish openssh-clients openssh-server python3 qemu-linux-user shim sudo-policy-wheel-auth-self mkosi-25.3/mkosi.conf.d/20-opensuse/mkosi.conf.d/000077500000000000000000000000001474711424400214355ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-opensuse/mkosi.conf.d/x86-64.conf000066400000000000000000000003061474711424400231570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi grub2-i386-pc grub2-x86_64-efi ucode-amd ucode-intel mkosi-25.3/mkosi.conf.d/20-rhel-ubi.conf000066400000000000000000000001751474711424400176650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=rhel-ubi [Distribution] Release=9 [Content] Bootable=no mkosi-25.3/mkosi.conf.d/20-ubuntu/000077500000000000000000000000001474711424400166265ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/20-ubuntu/mkosi.conf000066400000000000000000000001761474711424400206230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] Release=noble Repositories=universe mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/000077500000000000000000000000001474711424400211625ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/mkosi.conf000066400000000000000000000004011474711424400231460ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|fedora Distribution=|azure [Content] Packages= openssh-clients openssh-server python3 rpm-build mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/mkosi.conf.d/000077500000000000000000000000001474711424400234525ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/mkosi.conf.d/20-arm64.conf000066400000000000000000000001751474711424400254740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=arm64 [Content] Packages= grub2-efi-aa64-modules mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/mkosi.conf.d/20-uefi.conf000066400000000000000000000002231474711424400254650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= grub2-efi shim mkosi-25.3/mkosi.conf.d/30-azure-centos-fedora/mkosi.conf.d/20-x86-64.conf000066400000000000000000000002441474711424400254140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= grub2-efi-x64-modules grub2-pc microcode_ctl mkosi-25.3/mkosi.conf.d/30-debian-kali-ubuntu/000077500000000000000000000000001474711424400207655ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/30-debian-kali-ubuntu/mkosi.conf000066400000000000000000000004001474711424400227500ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= fish openssh-client openssh-server python3 qemu-user-static shim-signed mkosi-25.3/mkosi.conf.d/30-debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001474711424400232555ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/30-debian-kali-ubuntu/mkosi.conf.d/20-ext4-orphan-file.conf000066400000000000000000000004531474711424400274330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu Release=jammy [Build] # "orphan_file" is enabled by default in recent versions of mkfs.ext4 but not supported by the Jammy kernel so we # explicitly disable it. Environment=SYSTEMD_REPART_MKFS_OPTIONS_EXT4="-O ^orphan_file" mkosi-25.3/mkosi.conf.d/30-debian-kali-ubuntu/mkosi.conf.d/20-x86-64.conf000066400000000000000000000003131474711424400252140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=x86-64 [Content] Packages= amd64-microcode grub-efi grub-efi-amd64 grub-pc-bin intel-microcode mkosi-25.3/mkosi.conf.d/30-rpm/000077500000000000000000000000001474711424400161035ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/30-rpm/mkosi.build.chroot000077500000000000000000000005741474711424400215540ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex rpmbuild \ -bb \ --noprep \ --build-in-place \ $([ "$WITH_TESTS" = "0" ] && echo --nocheck) \ --define "_topdir /var/tmp" \ --define "_sourcedir $PWD/rpm" \ --define "_rpmdir $PACKAGEDIR" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec mkosi-25.3/mkosi.conf.d/30-rpm/mkosi.conf000066400000000000000000000002331474711424400200720ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] BuildSources=rpm Distribution=fedora [Content] VolatilePackages= mkosi mkosi-initrd mkosi-25.3/mkosi.conf.d/30-rpm/mkosi.prepare000077500000000000000000000020461474711424400206120ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -e mkosi-chroot \ rpmspec \ --query \ --buildrequires \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ rpm/mkosi.spec | sort --unique | tee /tmp/buildrequires | xargs --delimiter '\n' mkosi-install until mkosi-chroot \ rpmbuild \ -bd \ --build-in-place \ --define "_topdir /var/tmp" \ --define "_sourcedir rpm" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" \ rpm/mkosi.spec do EXIT_STATUS=$? if [ $EXIT_STATUS -ne 11 ]; then exit $EXIT_STATUS fi mkosi-chroot \ rpm \ --query \ --package \ --requires \ /var/tmp/SRPMS/mkosi-*.buildreqs.nosrc.rpm | grep --invert-match '^rpmlib(' | sort --unique >/tmp/dynamic-buildrequires sort /tmp/buildrequires /tmp/dynamic-buildrequires | uniq --unique | tee --append /tmp/buildrequires | xargs --delimiter '\n' mkosi-install done mkosi-25.3/mkosi.conf.d/40-tools/000077500000000000000000000000001474711424400164465ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf000066400000000000000000000001441474711424400204360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Build] ToolsTreePackages= gnupg lvm2 mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/000077500000000000000000000000001474711424400207365ustar00rootroot00000000000000mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/arch.conf000066400000000000000000000003041474711424400225170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] ToolsTreeDistribution=arch [Build] ToolsTreePackages= cryptsetup mypy python-pytest ruff sequoia-sop mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/azure-centos-fedora.conf000066400000000000000000000003521474711424400254620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] ToolsTreeDistribution=|azure ToolsTreeDistribution=|centos ToolsTreeDistribution=|fedora [Build] ToolsTreePackages= cryptsetup python3-mypy python3-pytest mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/debian-kali-ubuntu.conf000066400000000000000000000004001474711424400252570ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] ToolsTreeDistribution=|debian ToolsTreeDistribution=|kali ToolsTreeDistribution=|ubuntu [Build] ToolsTreePackages= cryptsetup-bin fdisk mypy python3-pytest sqop mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/fedora.conf000066400000000000000000000002201474711424400230370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] ToolsTreeDistribution=fedora [Build] ToolsTreePackages= ruff sequoia-sop mkosi-25.3/mkosi.conf.d/40-tools/mkosi.conf.d/opensuse.conf000066400000000000000000000004561474711424400234530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] ToolsTreeDistribution=opensuse [Build] ToolsTreePackages= cryptsetup grub2 # TODO: Move to default tools tree when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved. mypy python3-pytest ruff mkosi-25.3/mkosi.extra/000077500000000000000000000000001474711424400150415ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/000077500000000000000000000000001474711424400156525ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/000077500000000000000000000000001474711424400164205ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/repart.d/000077500000000000000000000000001474711424400201375ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/repart.d/root.conf000066400000000000000000000001041474711424400217640ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root mkosi-25.3/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001474711424400201105ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/systemd/mkosi-check-and-shutdown.sh000077500000000000000000000003511474711424400252540ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -eux systemctl --failed --no-legend | tee /failed-services # Exit with non-zero EC if the /failed-services file is not empty (we have -e set) [[ ! -s /failed-services ]] mkosi-25.3/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001474711424400227345ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/systemd/system-preset/00-mkosi.preset000066400000000000000000000016161474711424400255230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # mkosi adds its own ssh units via the --ssh switch so disable the default ones. disable ssh.service disable sshd.service # Make sure dbus-broker is started by default on Debian/Kali/Ubuntu. enable dbus-broker.service # Make sure we have networking available. enable systemd-networkd.service enable systemd-networkd-wait-online.service enable systemd-resolved.service # We install dnf in some images but it's only going to be used rarely, # so let's not have dnf create its cache. disable dnf-makecache.* # The rpmdb is already in the right location, don't try to migrate it. disable rpmdb-migrate.service # We have journald to receive audit data so let's make sure we're not running auditd as well disable auditd.service # systemd-timesyncd is not enabled by default in the default systemd preset so enable it here instead. enable systemd-timesyncd.service mkosi-25.3/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002161474711424400255400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Kali/Ubuntu). disable * mkosi-25.3/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001474711424400214345ustar00rootroot00000000000000mkosi-25.3/mkosi.extra/usr/lib/systemd/system/mkosi-check-and-shutdown.service000066400000000000000000000005251474711424400276260ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Unit] Description=Check if any service failed and then shut down the machine After=multi-user.target network-online.target Requires=multi-user.target SuccessAction=exit FailureAction=exit SuccessActionExitStatus=123 [Service] Type=oneshot ExecStart=/usr/lib/systemd/mkosi-check-and-shutdown.sh mkosi-25.3/mkosi.md000077700000000000000000000000001474711424400217462mkosi/resources/man/mkosi.1.mdustar00rootroot00000000000000mkosi-25.3/mkosi.postinst000077500000000000000000000006421474711424400155310ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [[ "$DISTRIBUTION" =~ ubuntu|kali|debian ]]; then SUDO_GROUP=sudo else SUDO_GROUP=wheel fi mkosi-chroot \ useradd \ --user-group \ --create-home \ --password "$(openssl passwd -1 mkosi)" \ --groups "$SUDO_GROUP",systemd-journal \ --shell /bin/bash \ mkosi systemctl --root="$BUILDROOT" mask lvm2-monitor.service mkosi-25.3/mkosi.prepare000077500000000000000000000002701474711424400153010ustar00rootroot00000000000000#!/bin/bash # SPDX-License-Identifier: LGPL-2.1-or-later set -e if [ "$1" = "build" ]; then exit 0 fi mkosi-chroot "$SRCDIR"/bin/mkosi dependencies | xargs -d '\n' mkosi-install mkosi-25.3/mkosi/000077500000000000000000000000001474711424400137175ustar00rootroot00000000000000mkosi-25.3/mkosi/__init__.py000066400000000000000000005474251474711424400160510ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import datetime import functools import hashlib import itertools import json import logging import os import re import resource import shlex import shutil import socket import stat import subprocess import sys import tempfile import textwrap import uuid import zipapp from collections.abc import Iterator, Mapping, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional, Union, cast from mkosi.archive import can_extract_tar, extract_tar, make_cpio, make_tar from mkosi.bootloader import ( efi_boot_binary, extract_pe_section, gen_kernel_images, grub_bios_setup, install_grub, install_shim, install_systemd_boot, prepare_grub_config, python_binary, run_systemd_sign_tool, shim_second_stage_binary, sign_efi_binary, want_efi, want_grub_bios, want_grub_efi, ) from mkosi.burn import run_burn from mkosi.completion import print_completion from mkosi.config import ( PACKAGE_GLOBS, Args, ArtifactOutput, Cacheonly, CertificateSourceType, Compression, Config, ConfigFeature, DocFormat, Incremental, JsonEncoder, KeySourceType, ManifestFormat, Network, OutputFormat, SecureBootSignTool, ShimBootloader, Verb, Vmm, cat_config, format_bytes, have_history, parse_boolean, parse_config, resolve_deps, summary, systemd_tool_version, want_selinux_relabel, yes_no, ) from mkosi.context import Context from mkosi.distributions import Distribution, detect_distribution from mkosi.documentation import show_docs from mkosi.installer import clean_package_manager_metadata from mkosi.kmod import gen_required_kernel_modules, loaded_modules, process_kernel_modules from mkosi.log import ARG_DEBUG, complete_step, die, log_notice, log_step from mkosi.manifest import Manifest from mkosi.mounts import finalize_certificate_mounts, finalize_source_mounts, mount_overlay from mkosi.pager import page from mkosi.partition import Partition, finalize_root, finalize_roothash from mkosi.qemu import ( KernelType, copy_ephemeral, finalize_credentials, finalize_kernel_command_line_extra, finalize_register, join_initrds, run_qemu, run_ssh, start_journal_remote, ) from mkosi.run import ( apivfs_options, chroot_cmd, chroot_options, finalize_interpreter, finalize_passwd_symlinks, fork_and_wait, run, workdir, ) from mkosi.sandbox import ( CLONE_NEWNS, MOUNT_ATTR_NODEV, MOUNT_ATTR_NOEXEC, MOUNT_ATTR_NOSUID, MOUNT_ATTR_RDONLY, MS_REC, MS_SLAVE, __version__, acquire_privileges, join_new_session_keyring, mount, mount_rbind, umask, unshare, userns_has_single_user, ) from mkosi.sysupdate import run_sysupdate from mkosi.tree import copy_tree, make_tree, move_tree, rmtree from mkosi.user import INVOKING_USER, become_root_cmd from mkosi.util import ( PathString, current_home_dir, flatten, flock, flock_or_die, format_rlimit, hash_file, make_executable, one_zero, read_env_file, resource_path, scopedenv, ) from mkosi.versioncomp import GenericVersion from mkosi.vmspawn import run_vmspawn @contextlib.contextmanager def mount_base_trees(context: Context) -> Iterator[None]: if not context.config.base_trees or not context.config.overlay: yield return with complete_step("Mounting base trees…"), contextlib.ExitStack() as stack: bases = [] (context.workspace / "bases").mkdir(exist_ok=True) for path in context.config.base_trees: d = context.workspace / f"bases/{path.name}-{uuid.uuid4().hex}" path = path.resolve() if path.is_dir(): bases += [path] elif can_extract_tar(path): extract_tar(path, d, sandbox=context.sandbox) bases += [d] elif path.suffix == ".raw": run( ["systemd-dissect", "--mount", "--mkdir", path, d], env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), ) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d])) bases += [d] else: die(f"Unsupported base tree source {path}") with mount_overlay(bases, context.root, upperdir=context.root): yield stack.enter_context(mount_overlay(bases, context.workspace / "lower")) for p in context.root.rglob("*"): rel = p.relative_to(context.root) q = context.workspace / "lower" / rel if ( context.config.output_format == OutputFormat.sysext and not rel.is_relative_to("usr") and not rel.is_relative_to("opt") ): continue if context.config.output_format == OutputFormat.confext and not rel.is_relative_to("etc"): continue if not q.is_symlink() and not q.exists(): continue if not p.is_symlink() and p.is_dir(): if q.is_symlink() or not q.is_dir(): die(f"/{rel} is a directory in the overlay but not in the base tree") shutil.copystat(q, p) else: logging.info(f"Removing duplicate path /{rel} from overlay") p.unlink() def remove_files(context: Context) -> None: """Remove files based on user-specified patterns""" if context.config.remove_files or (context.root / "work").exists(): with complete_step("Removing files…"): remove = flatten( context.root.glob(pattern.lstrip("/")) for pattern in context.config.remove_files ) rmtree(*remove, context.root / "work", sandbox=context.sandbox) if context.config.output_format.is_extension_image(): with complete_step("Removing empty directories…"): for path, dirs, _ in os.walk(context.root, topdown=False): p = Path(path) for d in dirs: t = p / d if not t.is_symlink() and not any(t.iterdir()): t.rmdir() def install_distribution(context: Context) -> None: if context.config.base_trees: if not context.config.packages: return with complete_step(f"Installing extra packages for {context.config.distribution.pretty_name()}"): context.config.distribution.install_packages(context, context.config.packages) else: if context.config.overlay or context.config.output_format.is_extension_image(): if context.config.packages: die( "Cannot install packages in extension images without a base tree", hint="Configure a base tree with the BaseTrees= setting", ) return with complete_step(f"Installing {context.config.distribution.pretty_name()}"): context.config.distribution.install(context) if context.config.machine_id: with umask(~0o755): (context.root / "etc").mkdir(exist_ok=True) with umask(~0o444): (context.root / "etc/machine-id").write_text(context.config.machine_id.hex) elif (context.root / "etc").exists() and not (context.root / "etc/machine-id").exists(): # Uninitialized means we want it to get initialized on first boot. with umask(~0o444): (context.root / "etc/machine-id").write_text("uninitialized\n") # Ensure /efi exists so that the ESP is mounted there, as recommended by # https://0pointer.net/blog/linux-boot-partitions.html. Use the most restrictive access # mode we can without tripping up mkfs tools since this directory is only meant to be # overmounted and should not be read from or written to. with umask(~0o500): (context.root / "efi").mkdir(exist_ok=True) (context.root / "boot").mkdir(exist_ok=True) # Ensure /boot/loader/entries.srel exists and has "type1" written to it to nudge # kernel-install towards using the boot loader specification layout. with umask(~0o700): (context.root / "boot/loader").mkdir(exist_ok=True) with umask(~0o600): (context.root / "boot/loader/entries.srel").write_text("type1\n") if context.config.packages: context.config.distribution.install_packages(context, context.config.packages) for f in ( "var/lib/systemd/random-seed", "var/lib/systemd/credential.secret", "etc/machine-info", "var/lib/dbus/machine-id", ): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): (context.root / f).unlink() def install_build_packages(context: Context) -> None: if not context.config.build_scripts or not context.config.build_packages: return with ( complete_step(f"Installing build packages for {context.config.distribution.pretty_name()}"), mount_build_overlay(context), ): context.config.distribution.install_packages(context, context.config.build_packages) def install_volatile_packages(context: Context) -> None: if not context.config.volatile_packages: return with complete_step(f"Installing volatile packages for {context.config.distribution.pretty_name()}"): context.config.distribution.install_packages(context, context.config.volatile_packages) def remove_packages(context: Context) -> None: """Remove packages listed in config.remove_packages""" if not context.config.remove_packages: return with complete_step(f"Removing {len(context.config.remove_packages)} packages…"): try: context.config.distribution.remove_packages(context, context.config.remove_packages) except NotImplementedError: die(f"Removing packages is not supported for {context.config.distribution}") def check_root_populated(context: Context) -> None: if ( context.config.output_format == OutputFormat.none or context.config.output_format.is_extension_image() ): return """Check that the root was populated by looking for a os-release file.""" osrelease = context.root / "usr/lib/os-release" if not osrelease.exists(): die( f"{osrelease} not found.", hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." ), ) def configure_os_release(context: Context) -> None: """Write IMAGE_ID and IMAGE_VERSION to /usr/lib/os-release in the image.""" if not (context.config.image_id or context.config.image_version or context.config.hostname): return if context.config.overlay or context.config.output_format.is_extension_image(): return for candidate in ["usr/lib/os-release", "usr/lib/initrd-release", "etc/os-release"]: osrelease = context.root / candidate if not osrelease.is_file() or osrelease.is_symlink(): continue # at this point we know we will either change or add to the file newosrelease = osrelease.with_suffix(".new") image_id_written = image_version_written = default_hostname_written = False with osrelease.open("r") as old, newosrelease.open("w") as new: # fix existing values for line in old.readlines(): if context.config.image_id and line.startswith("IMAGE_ID="): new.write(f'IMAGE_ID="{context.config.image_id}"\n') image_id_written = True elif context.config.image_version and line.startswith("IMAGE_VERSION="): new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') image_version_written = True elif context.config.hostname and line.startswith("DEFAULT_HOSTNAME="): new.write(f'DEFAULT_HOSTNAME="{context.config.hostname}"\n') default_hostname_written = True else: new.write(line) # append if they were missing if context.config.image_id and not image_id_written: new.write(f'IMAGE_ID="{context.config.image_id}"\n') if context.config.image_version and not image_version_written: new.write(f'IMAGE_VERSION="{context.config.image_version}"\n') if context.config.hostname and not default_hostname_written: new.write(f'DEFAULT_HOSTNAME="{context.config.hostname}"\n') newosrelease.rename(osrelease) def configure_extension_release(context: Context) -> None: if context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext): return prefix = "SYSEXT" if context.config.output_format == OutputFormat.sysext else "CONFEXT" d = "usr/lib" if context.config.output_format == OutputFormat.sysext else "etc" p = context.root / d / f"extension-release.d/extension-release.{context.config.output}" p.parent.mkdir(parents=True, exist_ok=True) osrelease = read_env_file(q) if (q := context.root / "usr/lib/os-release").exists() else {} extrelease = read_env_file(p) if p.exists() else {} new = p.with_suffix(".new") with new.open("w") as f: for k, v in extrelease.items(): f.write(f"{k}={v}\n") if "ID" not in extrelease: f.write(f"ID={osrelease.get('ID', '_any')}\n") if f"{prefix}_LEVEL" not in extrelease and (level := osrelease.get(f"{prefix}_LEVEL")): f.write(f"{prefix}_LEVEL={level}\n") if "VERSION_ID" not in extrelease and (version := osrelease.get("VERSION_ID")): f.write(f"VERSION_ID={version}\n") if f"{prefix}_ID" not in extrelease and context.config.image_id: f.write(f"{prefix}_ID={context.config.image_id}\n") if f"{prefix}_VERSION_ID" not in extrelease and context.config.image_version: f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n") if f"{prefix}_SCOPE" not in extrelease: f.write( f"{prefix}_SCOPE=" f"{context.config.environment.get(f'{prefix}_SCOPE', 'initrd system portable')}\n" ) if "ARCHITECTURE" not in extrelease: f.write(f"ARCHITECTURE={context.config.architecture}\n") new.rename(p) def configure_autologin_service(context: Context, service: str, extra: str) -> None: dropin = context.root / f"usr/lib/systemd/system/{service}.d/autologin.conf" with umask(~0o755): dropin.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): dropin.write_text( textwrap.dedent( f"""\ [Service] ExecStart= ExecStart=-agetty -o '-f -p -- \\\\u' --autologin root {extra} $TERM StandardInput=tty StandardOutput=tty """ ) ) def configure_autologin(context: Context) -> None: if not context.config.autologin: return with complete_step("Setting up autologin…"): configure_autologin_service( context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600", ) configure_autologin_service( context, "getty@tty1.service", "--noclear -", ) configure_autologin_service( context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -", ) @contextlib.contextmanager def mount_build_overlay(context: Context, volatile: bool = False) -> Iterator[Path]: d = context.workspace / "build-overlay" if not d.is_symlink(): with umask(~0o755): d.mkdir(exist_ok=True) with contextlib.ExitStack() as stack: lower = [context.root] if volatile: lower += [d] upper = None else: upper = d stack.enter_context(mount_overlay(lower, context.root, upperdir=upper)) yield context.root @contextlib.contextmanager def finalize_scripts(config: Config, scripts: Mapping[str, Sequence[PathString]]) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-scripts-") as d: for name, script in scripts.items(): # Make sure we don't end up in a recursive loop when we name a script after the binary # it execs by removing the scripts directory from the PATH when we execute a script. with (Path(d) / name).open("w") as f: f.write("#!/bin/sh\n") if config.find_binary(name): f.write( textwrap.dedent( """\ DIR="$(cd "$(dirname "$0")" && pwd)" PATH="$(echo "$PATH" | tr ':' '\\n' | grep -v "$DIR" | tr '\\n' ':')" export PATH """ ) ) f.write(f'exec {shlex.join(str(s) for s in script)} "$@"\n') make_executable(Path(d) / name) os.utime(Path(d) / name, (0, 0)) yield Path(d) GIT_ENV = { "GIT_CONFIG_COUNT": "1", "GIT_CONFIG_KEY_0": "safe.directory", "GIT_CONFIG_VALUE_0": "*", } def mkosi_as_caller() -> tuple[str, ...]: # Kept for backwards compatibility. return ("env",) def finalize_host_scripts( context: Context, helpers: Mapping[str, Sequence[PathString]] = {}, ) -> AbstractContextManager[Path]: scripts: dict[str, Sequence[PathString]] = {} for binary in ("useradd", "groupadd"): if context.config.find_binary(binary): scripts[binary] = (binary, "--root", "/buildroot") if ukify := context.config.find_binary("ukify"): scripts["ukify"] = (python_binary(context.config), ukify) return finalize_scripts(context.config, scripts | dict(helpers)) @contextlib.contextmanager def finalize_config_json(config: Config) -> Iterator[Path]: with tempfile.NamedTemporaryFile(mode="w") as f: f.write(config.to_json()) f.flush() yield Path(f.name) def run_configure_scripts(config: Config) -> Config: if not config.configure_scripts: return config for script in config.configure_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), QEMU_ARCHITECTURE=config.architecture.to_qemu(), DISTRIBUTION_ARCHITECTURE=config.distribution.architecture(config.architecture), SRCDIR="/work/src", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), ) if config.profiles: env["PROFILES"] = " ".join(config.profiles) with finalize_source_mounts(config, ephemeral=False) as sources: for script in config.configure_scripts: with complete_step(f"Running configure script {script}…"): result = run( ["/work/configure"], env=env | config.environment, sandbox=config.sandbox( options=[ "--dir", "/work/src", "--chdir", "/work/src", "--ro-bind", script, "/work/configure", *sources, ], ), input=config.to_json(indent=None), stdout=subprocess.PIPE, ) # fmt: skip config = Config.from_json(result.stdout) return config def run_sync_scripts(config: Config) -> None: if not config.sync_scripts: return env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), DISTRIBUTION_ARCHITECTURE=config.distribution.architecture(config.architecture), SRCDIR="/work/src", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", CACHED=one_zero(have_cache(config)), ) if config.profiles: env["PROFILES"] = " ".join(config.profiles) # We make sure to mount everything in to make ssh work since syncing might involve git which # could invoke ssh. if agent := os.getenv("SSH_AUTH_SOCK"): env["SSH_AUTH_SOCK"] = agent with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, tempfile.TemporaryDirectory( dir=config.workspace_dir_or_default(), prefix="mkosi-metadata-" ) as sandbox_tree, ): install_sandbox_trees(config, Path(sandbox_tree)) for script in config.sync_scripts: options = [ *finalize_certificate_mounts(config), "--ro-bind", script, "/work/sync", "--ro-bind", json, "/work/config.json", "--dir", "/work/src", "--chdir", "/work/src", *sources, ] # fmt: skip if (p := INVOKING_USER.home()).exists() and p != Path("/"): # We use a writable mount here to keep git worktrees working which encode absolute # paths to the parent git repository and might need to modify the git config in the # parent git repository when submodules are in use as well. options += ["--bind", p, p] env["HOME"] = os.fspath(p) if (p := Path(f"/run/user/{os.getuid()}")).exists(): options += ["--ro-bind", p, p] with complete_step(f"Running sync script {script}…"): run( ["/work/sync", "final"], env=env | config.environment, stdin=sys.stdin, sandbox=config.sandbox( network=True, options=options, overlay=Path(sandbox_tree), ), ) @contextlib.contextmanager def script_maybe_chroot_sandbox( context: Context, *, script: Path, options: Sequence[PathString], network: bool, ) -> Iterator[list[PathString]]: options = ["--dir", "/work/src", "--chdir", "/work/src", *options] suppress_chown = parse_boolean(context.config.environment.get("MKOSI_CHROOT_SUPPRESS_CHOWN", "0")) helpers = { "mkosi-chroot": [ finalize_interpreter(bool(context.config.tools_tree)), "-SI", "/sandbox.py", "--bind", "/buildroot", "/", "--bind", "/var/tmp", "/var/tmp", *apivfs_options(root=Path("/")), *chroot_options(), "--bind", "/work", "/work", "--chdir", "/work/src", *(["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"] if network else []), *(["--suppress-chown"] if suppress_chown else []), ], "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), } # fmt: skip with finalize_host_scripts(context, helpers) as hd: if script.suffix != ".chroot": with context.sandbox( network=network, options=[ *options, "--bind", context.root, "/buildroot", *context.config.distribution.package_manager(context.config).mounts(context), ], scripts=hd, ) as sandbox: # fmt: skip yield sandbox else: if suppress_chown: options += ["--suppress-chown"] with chroot_cmd( root=context.root, network=network, options=options, ) as sandbox: yield sandbox def run_prepare_scripts(context: Context, build: bool) -> None: if not context.config.prepare_scripts: return if build and not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/prepare", CHROOT_SCRIPT="/work/prepare", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), **GIT_ENV, ) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) env |= context.config.environment with ( mount_build_overlay(context) if build else contextlib.nullcontext(), finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): if build: step_msg = "Running prepare script {} in build overlay…" arg = "build" else: step_msg = "Running prepare script {}…" arg = "final" for script in context.config.prepare_scripts: with complete_step(step_msg.format(script)): options: list[PathString] = [ "--ro-bind", script, "/work/prepare", "--ro-bind", json, "/work/config.json", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *sources, ] # fmt: skip run( ["/work/prepare", arg], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=True, ), ) def run_build_scripts(context: Context) -> None: if not context.config.build_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", DESTDIR="/work/dest", CHROOT_DESTDIR="/work/dest", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/build-script", CHROOT_SCRIPT="/work/build-script", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_DOCS=one_zero(context.config.with_docs), WITH_NETWORK=one_zero(context.config.with_network), WITH_TESTS=one_zero(context.config.with_tests), **GIT_ENV, ) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict( BUILDDIR="/work/build", CHROOT_BUILDDIR="/work/build", ) env |= context.config.environment with ( mount_build_overlay(context, volatile=True), finalize_source_mounts(context.config, ephemeral=context.config.build_sources_ephemeral) as sources, finalize_config_json(context.config) as json, ): for script in context.config.build_scripts: cmdline = context.args.cmdline if context.args.verb == Verb.build else [] with complete_step(f"Running build script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/build-script", "--ro-bind", json, "/work/config.json", "--bind", context.install_dir, "/work/dest", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--bind", str(context.config.build_dir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/build-script", *cmdline], env=env, stdin=sys.stdin, stdout=sys.stdout, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_postinst_scripts(context: Context) -> None: if not context.config.postinst_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SCRIPT="/work/postinst", CHROOT_SCRIPT="/work/postinst", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), **GIT_ENV, ) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") env |= context.config.environment with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.postinst_scripts: with complete_step(f"Running postinstall script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/postinst", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--ro-bind", str(context.config.build_dir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/postinst", "final"], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_finalize_scripts(context: Context) -> None: if not context.config.finalize_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), BUILDROOT="/buildroot", OUTPUTDIR="/work/out", CHROOT_OUTPUTDIR="/work/out", SRCDIR="/work/src", CHROOT_SRCDIR="/work/src", PACKAGEDIR="/work/packages", ARTIFACTDIR="/work/artifacts", SCRIPT="/work/finalize", CHROOT_SCRIPT="/work/finalize", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", WITH_NETWORK=one_zero(context.config.with_network), **GIT_ENV, ) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) if context.config.build_dir is not None: env |= dict(BUILDDIR="/work/build") env |= context.config.environment with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.finalize_scripts: with complete_step(f"Running finalize script {script}…"): options: list[PathString] = [ "--ro-bind", script, "/work/finalize", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--bind", context.artifacts, "/work/artifacts", "--bind", context.package_dir, "/work/packages", *( ["--ro-bind", str(context.config.build_dir), "/work/build"] if context.config.build_dir else [] ), *sources, ] # fmt: skip run( ["/work/finalize"], env=env, stdin=sys.stdin, sandbox=script_maybe_chroot_sandbox( context, script=script, options=options, network=context.config.with_network, ), ) def run_postoutput_scripts(context: Context) -> None: if not context.config.postoutput_scripts: return env = dict( DISTRIBUTION=str(context.config.distribution), RELEASE=context.config.release, ARCHITECTURE=str(context.config.architecture), DISTRIBUTION_ARCHITECTURE=context.config.distribution.architecture(context.config.architecture), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", ) if context.config.profiles: env["PROFILES"] = " ".join(context.config.profiles) with ( finalize_source_mounts( context.config, ephemeral=bool(context.config.build_sources_ephemeral), ) as sources, finalize_config_json(context.config) as json, ): for script in context.config.postoutput_scripts: with complete_step(f"Running post-output script {script}…"): run( ["/work/postoutput"], env=env | context.config.environment, sandbox=context.sandbox( # postoutput scripts should run as (fake) root so that file ownership is # always recorded as if owned by root. options=[ "--ro-bind", script, "/work/postoutput", "--ro-bind", json, "/work/config.json", "--bind", context.staging, "/work/out", "--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out", "--become-root", *sources, ], ), stdin=sys.stdin, ) # fmt: skip def install_tree( config: Config, src: Path, dst: Path, *, target: Optional[Path] = None, preserve: bool = True, ) -> None: src = src.resolve() t = dst if target: t = dst / target.relative_to("/") with umask(~0o755): t.parent.mkdir(parents=True, exist_ok=True) def copy() -> None: copy_tree( src, t, preserve=preserve, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) if src.is_dir() or (src.is_file() and target): copy() elif can_extract_tar(src): extract_tar(src, t, sandbox=config.sandbox) elif src.suffix == ".raw": run( ["systemd-dissect", "--copy-from", workdir(src), "/", workdir(t)], env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), sandbox=config.sandbox( devices=True, network=True, options=[ "--ro-bind", src, workdir(src), "--bind", t.parent, workdir(t.parent), ], ), ) # fmt: skip else: # If we get an unknown file without a target, we just copy it into /. copy() def install_base_trees(context: Context) -> None: if not context.config.base_trees or context.config.overlay: return with complete_step("Copying in base trees…"): for path in context.config.base_trees: install_tree(context.config, path, context.root) def install_skeleton_trees(context: Context) -> None: if not context.config.skeleton_trees: return with complete_step("Copying in skeleton file trees…"): for tree in context.config.skeleton_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_sandbox_trees(config: Config, dst: Path) -> None: # Ensure /etc exists in the sandbox (dst / "etc").mkdir(exist_ok=True) if (p := config.tools() / "usr/share/crypto-policies/back-ends/DEFAULT").exists(): Path(dst / "etc/crypto-policies").mkdir(exist_ok=True) copy_tree(p, dst / "etc/crypto-policies/back-ends", sandbox=config.sandbox) if config.sandbox_trees: with complete_step("Copying in sandbox trees…"): for tree in config.sandbox_trees: install_tree(config, tree.source, dst, target=tree.target, preserve=False) if Path("/etc/passwd").exists(): shutil.copy("/etc/passwd", dst / "etc/passwd") if Path("/etc/group").exists(): shutil.copy("/etc/passwd", dst / "etc/group") if not (dst / "etc/mtab").is_symlink(): (dst / "etc/mtab").symlink_to("../proc/self/mounts") Path(dst / "etc/resolv.conf").unlink(missing_ok=True) Path(dst / "etc/resolv.conf").touch() if not (dst / "etc/nsswitch.conf").exists(): (dst / "etc/nsswitch.conf").write_text( textwrap.dedent( """\ passwd: files shadow: files group: files hosts: files myhostname resolve [!UNAVAIL=return] dns services: files netgroup: files automount: files aliases: files ethers: files gshadow: files networks: files dns protocols: files publickey: files rpc: files """ ) ) if not (dst / "etc/hosts").exists() and Path("/etc/hosts").exists(): shutil.copy("/etc/hosts", dst / "etc/hosts") Path(dst / "etc/static").unlink(missing_ok=True) if (config.tools() / "etc/static").is_symlink(): (dst / "etc/static").symlink_to((config.tools() / "etc/static").readlink()) # Create various mountpoints in /etc as /etc from the sandbox tree is mounted read-only into the sandbox. for d in ( "etc/pki", "etc/ssl", "etc/ca-certificates", "etc/pacman.d/gnupg", "etc/alternatives", ): (dst / d).mkdir(parents=True, exist_ok=True) for f in ( "etc/passwd", "etc/group", "etc/shadow", "etc/gshadow", "etc/ld.so.cache", ): (dst / f).touch(exist_ok=True) def install_package_directories(context: Context, directories: Sequence[Path]) -> None: directories = [d for d in directories if any(d.iterdir())] if not directories: return with complete_step("Copying in extra packages…"): for d in directories: for p in itertools.chain(*(d.glob(glob) for glob in PACKAGE_GLOBS)): shutil.copy(p, context.repository, follow_symlinks=True) def install_extra_trees(context: Context) -> None: if not context.config.extra_trees: return with complete_step("Copying in extra file trees…"): for tree in context.config.extra_trees: install_tree(context.config, tree.source, context.root, target=tree.target, preserve=False) def install_build_dest(context: Context) -> None: if not any(context.install_dir.iterdir()): return with complete_step("Copying in build tree…"): copy_tree( context.install_dir, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def gzip_binary(context: Context) -> str: return "pigz" if context.config.find_binary("pigz") else "gzip" def fixup_vmlinuz_location(context: Context) -> None: # Some architectures ship an uncompressed vmlinux (ppc64el, riscv64) for type in ("vmlinuz", "vmlinux"): for d in context.root.glob(f"boot/{type}-*"): if d.is_symlink(): continue kver = d.name.removeprefix(f"{type}-") vmlinuz = context.root / "usr/lib/modules" / kver / type if not vmlinuz.parent.exists(): continue # Some distributions (OpenMandriva) symlink /usr/lib/modules//vmlinuz to # /boot/vmlinuz-, so get rid of the symlink and copy the actual vmlinuz to # /usr/lib/modules/. if vmlinuz.is_symlink() and vmlinuz.resolve().is_relative_to("/boot"): vmlinuz.unlink() if not vmlinuz.exists(): shutil.copy2(d, vmlinuz) def want_initrd(context: Context) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format not in (OutputFormat.disk, OutputFormat.directory): return False if not any((context.artifacts / "io.mkosi.initrd").glob("*")) and not any(gen_kernel_images(context)): return False return True def finalize_default_initrd( config: Config, *, resources: Path, tools: bool = True, output_dir: Optional[Path] = None, ) -> Config: if config.root_password: password, hashed = config.root_password rootpwopt = f"hashed:{password}" if hashed else password else: rootpwopt = None relabel = ( ConfigFeature.auto if config.selinux_relabel == ConfigFeature.enabled else config.selinux_relabel ) # Default values are assigned via the parser so we go via the argument parser to construct # the config for the initrd. cmdline = [ "--directory", "", "--distribution", str(config.distribution), "--release", config.release, "--architecture", str(config.architecture), *(["--mirror", config.mirror] if config.mirror else []), "--repository-key-check", str(config.repository_key_check), "--repository-key-fetch", str(config.repository_key_fetch), *([f"--repositories={repository}" for repository in config.repositories]), *([f"--sandbox-tree={tree}" for tree in config.sandbox_trees]), # Note that when compress_output == Compression.none == 0 we don't pass --compress-output # which means the default compression will get picked. This is exactly what we want so that # initrds are always compressed. *(["--compress-output", str(config.compress_output)] if config.compress_output else []), "--compress-level", str(config.compress_level), "--with-network", str(config.with_network), "--cache-only", str(config.cacheonly), *(["--output-directory", str(output_dir)] if output_dir else []), *(["--workspace-directory", str(config.workspace_dir)] if config.workspace_dir else []), *(["--cache-directory", str(config.cache_dir)] if config.cache_dir else []), *(["--package-cache-directory", str(config.package_cache_dir)] if config.package_cache_dir else []), *(["--local-mirror", str(config.local_mirror)] if config.local_mirror else []), "--incremental", str(config.incremental), *(f"--package={package}" for package in config.initrd_packages), *(f"--volatile-package={package}" for package in config.initrd_volatile_packages), *(f"--package-directory={d}" for d in config.package_directories), *(f"--volatile-package-directory={d}" for d in config.volatile_package_directories), "--output", "initrd", *(["--image-id", config.image_id] if config.image_id else []), *(["--image-version", config.image_version] if config.image_version else []), *( ["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else [] ), *(["--locale", config.locale] if config.locale else []), *(["--locale-messages", config.locale_messages] if config.locale_messages else []), *(["--keymap", config.keymap] if config.keymap else []), *(["--timezone", config.timezone] if config.timezone else []), *(["--hostname", config.hostname] if config.hostname else []), *(["--root-password", rootpwopt] if rootpwopt else []), *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *(["--tools-tree", str(config.tools_tree)] if config.tools_tree and tools else []), "--tools-tree-certificates", str(config.tools_tree_certificates), *([f"--extra-search-path={p}" for p in config.extra_search_paths]), *(["--proxy-url", config.proxy_url] if config.proxy_url else []), *([f"--proxy-exclude={host}" for host in config.proxy_exclude]), *(["--proxy-peer-certificate", str(p)] if (p := config.proxy_peer_certificate) else []), *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), "--selinux-relabel", str(relabel), "--include=mkosi-initrd", ] # fmt: skip _, [config] = parse_config(cmdline + ["build"], resources=resources) run_configure_scripts(config) return dataclasses.replace(config, image="default-initrd") def build_default_initrd(context: Context) -> Path: if context.config.distribution == Distribution.custom: die("Building a default initrd is not supported for custom distributions") config = finalize_default_initrd( context.config, resources=context.resources, output_dir=context.workspace, ) assert config.output_dir if config.incremental == Incremental.strict and not have_cache(config): die( f"Strict incremental mode is enabled and cache for image {config.name()} is out-of-date", hint="Build once with -i yes to update the image cache", ) config.output_dir.mkdir(exist_ok=True) if (config.output_dir / config.output).exists(): return config.output_dir / config.output with ( complete_step("Building default initrd"), setup_workspace(context.args, config) as workspace, ): build_image( Context( context.args, config, workspace=workspace, resources=context.resources, # Reuse the keyring, repository metadata and local package repository from the main image for # the default initrd. keyring_dir=context.keyring_dir, metadata_dir=context.metadata_dir, package_dir=context.package_dir, ) ) return config.output_dir / config.output def identify_cpu(root: Path) -> tuple[Optional[Path], Optional[Path]]: for entry in Path("/proc/cpuinfo").read_text().split("\n\n"): vendor_id = family = model = stepping = None for line in entry.splitlines(): key, _, value = line.partition(":") key = key.strip() value = value.strip() if not key or not value: continue if key == "vendor_id": vendor_id = value elif key == "cpu family": family = int(value) elif key == "model": model = int(value) elif key == "stepping": stepping = int(value) if vendor_id is not None and family is not None and model is not None and stepping is not None: break else: return (None, None) if vendor_id == "AuthenticAMD": uroot = root / "usr/lib/firmware/amd-ucode" if family > 21: ucode = uroot / f"microcode_amd_fam{family:x}h.bin" else: ucode = uroot / "microcode_amd.bin" if ucode.exists(): return (Path(f"{vendor_id}.bin"), ucode) elif vendor_id == "GenuineIntel": uroot = root / "usr/lib/firmware/intel-ucode" if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}").exists(): return (Path(f"{vendor_id}.bin"), ucode) if (ucode := uroot / f"{family:02x}-{model:02x}-{stepping:02x}.initramfs").exists(): return (Path(f"{vendor_id}.bin"), ucode) return (Path(f"{vendor_id}.bin"), None) def build_microcode_initrd(context: Context) -> list[Path]: if not context.config.architecture.is_x86_variant(): return [] microcode = context.workspace / "microcode.initrd" if microcode.exists(): return [microcode] amd = context.root / "usr/lib/firmware/amd-ucode" intel = context.root / "usr/lib/firmware/intel-ucode" if not amd.exists() and not intel.exists(): logging.warning("/usr/lib/firmware/{amd-ucode,intel-ucode} not found, not adding microcode") return [] root = context.workspace / "microcode-root" destdir = root / "kernel/x86/microcode" with umask(~0o755): destdir.mkdir(parents=True, exist_ok=True) if context.config.microcode_host: vendorfile, ucodefile = identify_cpu(context.root) if vendorfile is None or ucodefile is None: logging.warning("Unable to identify CPU for MicrocodeHostonly=") return [] with (destdir / vendorfile).open("wb") as f: f.write(ucodefile.read_bytes()) else: if amd.exists(): with (destdir / "AuthenticAMD.bin").open("wb") as f: for p in amd.iterdir(): f.write(p.read_bytes()) if intel.exists(): with (destdir / "GenuineIntel.bin").open("wb") as f: for p in intel.iterdir(): f.write(p.read_bytes()) make_cpio(root, microcode, sandbox=context.sandbox) return [microcode] def finalize_kernel_modules_include(context: Context, *, include: Sequence[str], host: bool) -> set[str]: final = {i for i in include if i not in ("default", "host")} if "default" in include: initrd = finalize_default_initrd(context.config, resources=context.resources) final.update(initrd.kernel_modules_include) if host or "host" in include: final.update(loaded_modules()) return final def build_kernel_modules_initrd(context: Context, kver: str) -> Path: kmods = context.workspace / f"kernel-modules-{kver}.initrd" if kmods.exists(): return kmods make_cpio( context.root, kmods, files=gen_required_kernel_modules( context, kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_initrd_include, host=context.config.kernel_modules_initrd_include_host, ), exclude=context.config.kernel_modules_initrd_exclude, ), sandbox=context.sandbox, ) if context.config.distribution.is_apt_distribution(): # Older Debian and Ubuntu releases do not compress their kernel modules, so we compress the # initramfs instead. Note that this is not ideal since the compressed kernel modules will # all be decompressed on boot which requires significant memory. if context.config.distribution == Distribution.debian and context.config.release in ( "sid", "testing", ): compression = Compression.none else: compression = Compression.zstd maybe_compress(context, compression, kmods, kmods) return kmods def find_devicetree(context: Context, kver: str) -> Path: assert context.config.devicetree for d in ( context.root / f"usr/lib/firmware/{kver}/device-tree", context.root / f"usr/lib/linux-image-{kver}", context.root / f"usr/lib/modules/{kver}/dtb", ): dtb = d / context.config.devicetree if dtb.exists(): return dtb die(f"Requested devicetree {context.config.devicetree} not found") def want_signed_pcrs(config: Config) -> bool: return config.sign_expected_pcr == ConfigFeature.enabled or ( config.sign_expected_pcr == ConfigFeature.auto and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None and bool(config.sign_expected_pcr_key) and bool(config.sign_expected_pcr_certificate) ) def run_ukify( context: Context, stub: Path, output: Path, *, cmdline: Sequence[str] = (), arguments: Sequence[PathString] = (), options: Sequence[PathString] = (), sign: bool = True, ) -> None: ukify = context.config.find_binary("ukify", "/usr/lib/systemd/ukify") if not ukify: die("Could not find ukify") if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") # Older versions of systemd-stub expect the cmdline section to be null terminated. We can't # embed NUL terminators in argv so let's communicate the cmdline via a file instead. (context.workspace / "cmdline").write_text(f"{' '.join(cmdline)}\x00") cmd = [ python_binary(context.config), ukify, "build", *arguments, "--efi-arch", arch, "--stub", workdir(stub), "--output", workdir(output), *(["--cmdline", f"@{workdir(context.workspace / 'cmdline')}"] if cmdline else []), ] # fmt: skip opt: list[PathString] = [ "--ro-bind", stub, workdir(stub), "--bind", output.parent, workdir(output.parent), "--ro-bind", context.workspace / "cmdline", workdir(context.workspace / "cmdline"), ] # fmt: skip if sign and context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate cmd += [ "--signtool", ( "sbsign" if context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or not context.config.find_binary("systemd-sbsign", "/usr/lib/systemd/systemd-sbsign") else "systemd-sbsign" ), ] # fmt: skip if ( context.config.secure_boot_key_source.type != KeySourceType.file or context.config.secure_boot_certificate_source.type != CertificateSourceType.file ): opt += ["--bind", "/run", "/run"] if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--signing-engine", context.config.secure_boot_key_source.source] elif context.config.secure_boot_key_source.type == KeySourceType.provider: cmd += ["--signing-provider", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): cmd += ["--secureboot-private-key", workdir(context.config.secure_boot_key)] opt += ["--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key)] else: cmd += ["--secureboot-private-key", context.config.secure_boot_key] if context.config.secure_boot_certificate_source.type == CertificateSourceType.provider: cmd += ["--certificate-provider", context.config.secure_boot_certificate_source.source] if context.config.secure_boot_certificate.exists(): cmd += ["--secureboot-certificate", workdir(context.config.secure_boot_certificate)] opt += [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), # noqa: E501 ] # fmt: skip else: cmd += ["--secureboot-certificate", context.config.secure_boot_certificate] run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), env=context.config.environment, sandbox=context.sandbox( options=[*opt, *options], devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) def build_uki( context: Context, stub: Path, kver: str, kimg: Path, microcodes: list[Path], initrds: list[Path], cmdline: Sequence[str], profiles: Sequence[Path], output: Path, ) -> None: if not (ukify := context.config.find_binary("ukify", "/usr/lib/systemd/ukify")): die("Could not find ukify") arguments: list[PathString] = [ "--os-release", f"@{workdir(context.root / 'usr/lib/os-release')}", "--uname", kver, "--linux", workdir(kimg), *flatten(["--join-profile", os.fspath(workdir(profile))] for profile in profiles), ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.root / "usr/lib/os-release", workdir(context.root / "usr/lib/os-release"), "--ro-bind", kimg, workdir(kimg), *flatten(["--ro-bind", os.fspath(profile), os.fspath(workdir(profile))] for profile in profiles), ] # fmt: skip if context.config.devicetree: dtb = find_devicetree(context, kver) arguments += ["--devicetree", workdir(dtb)] options += ["--ro-bind", dtb, workdir(dtb)] if context.config.secure_boot: assert context.config.secure_boot_key assert context.config.secure_boot_certificate arguments += ["--sign-kernel"] if want_signed_pcrs(context.config): assert context.config.sign_expected_pcr_key assert context.config.sign_expected_pcr_certificate arguments += [ # SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign # for SHA1 to avoid having to manage a bunch of configuration to re-enable SHA1. "--pcr-banks", "sha256", ] # fmt: skip # If we're providing the private key via an engine or provider, we have to pass in a X.509 # certificate via --pcr-public-key as well. if context.config.sign_expected_pcr_key_source.type != KeySourceType.file: if context.config.sign_expected_pcr_certificate_source.type == CertificateSourceType.provider: arguments += [ "--certificate-provider", f"provider:{context.config.sign_expected_pcr_certificate_source.source}", ] options += ["--bind", "/run", "/run"] if context.config.sign_expected_pcr_certificate.exists(): arguments += [ "--pcr-public-key", workdir(context.config.sign_expected_pcr_certificate), ] # fmt: skip options += [ "--ro-bind", context.config.sign_expected_pcr_certificate, workdir(context.config.sign_expected_pcr_certificate), # noqa: E501 ] # fmt: skip else: arguments += ["--pcr-public-key", context.config.sign_expected_pcr_certificate] if context.config.sign_expected_pcr_key_source.type == KeySourceType.engine: arguments += ["--signing-engine", context.config.sign_expected_pcr_key_source.source] elif context.config.sign_expected_pcr_key_source.type == KeySourceType.provider: arguments += ["--signing-provider", context.config.sign_expected_pcr_key_source.source] if context.config.sign_expected_pcr_key.exists(): arguments += ["--pcr-private-key", workdir(context.config.sign_expected_pcr_key)] options += [ "--ro-bind", context.config.sign_expected_pcr_key, workdir(context.config.sign_expected_pcr_key), # noqa: E501 ] # fmt: skip else: arguments += ["--pcr-private-key", context.config.sign_expected_pcr_key] if microcodes: # new .ucode section support? if ( systemd_tool_version( python_binary(context.config), ukify, sandbox=context.sandbox, ) >= "256" and (version := systemd_stub_version(context, stub)) and version >= "256" ): for microcode in microcodes: arguments += ["--microcode", workdir(microcode)] options += ["--ro-bind", microcode, workdir(microcode)] else: initrds = microcodes + initrds for initrd in initrds: arguments += ["--initrd", workdir(initrd)] options += ["--ro-bind", initrd, workdir(initrd)] with complete_step(f"Generating unified kernel image for kernel version {kver}"): run_ukify(context, stub, output, cmdline=cmdline, arguments=arguments, options=options) def systemd_stub_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub" return stub def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersion]: try: sdmagic = extract_pe_section(context, stub, ".sdmagic", context.workspace / "sdmagic") except KeyError: return None sdmagic_text = sdmagic.read_text().strip("\x00") # Older versions of the stub have misaligned sections which results in an empty sdmagic text. # Let's check for that explicitly and treat it as no version. # # TODO: Drop this logic once every distribution we support ships systemd-stub v254 or newer. if not sdmagic_text: return None if not ( version := re.match( r"#### LoaderInfo: systemd-stub (?P[.~^a-zA-Z0-9-+]+) ####", sdmagic_text ) ): die(f"Unable to determine systemd-stub version, found {sdmagic_text!r}") return GenericVersion(version.group("version")) def want_uki(context: Context) -> bool: return want_efi(context.config) and ( context.config.bootloader.is_uki() or context.config.unified_kernel_images == ConfigFeature.enabled or ( context.config.unified_kernel_images == ConfigFeature.auto and systemd_stub_binary(context).exists() and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None ) ) def find_entry_token(context: Context) -> str: if ( not context.config.find_binary("kernel-install") or ( "--version" not in run( ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(), ).stdout ) or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" ): return context.config.image_id or context.config.distribution.name output = json.loads( run( ["kernel-install", "--root=/buildroot", "--json=pretty", "inspect"], sandbox=context.sandbox(options=["--ro-bind", context.root, "/buildroot"]), stdout=subprocess.PIPE, env={"BOOT_ROOT": "/boot"}, ).stdout ) logging.debug(json.dumps(output, indent=4)) return cast(str, output["EntryToken"]) def finalize_cmdline( context: Context, partitions: Sequence[Partition], roothash: Optional[str] ) -> list[str]: if (context.root / "etc/kernel/cmdline").exists(): cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()] elif (context.root / "usr/lib/kernel/cmdline").exists(): cmdline = [(context.root / "usr/lib/kernel/cmdline").read_text().strip()] else: cmdline = [] if roothash: cmdline += [roothash] cmdline += context.config.kernel_command_line if not roothash: for name in ("root", "mount.usr"): type_prefix = name.removeprefix("mount.") if not (root := next((p.uuid for p in partitions if p.type.startswith(type_prefix)), None)): continue cmdline = [f"{name}=PARTUUID={root}" if c == f"{name}=PARTUUID" else c for c in cmdline] return cmdline def finalize_microcode(context: Context) -> list[Path]: if any((context.artifacts / "io.mkosi.microcode").glob("*")): return sorted((context.artifacts / "io.mkosi.microcode").iterdir()) elif microcode := build_microcode_initrd(context): return microcode return [] def finalize_initrds(context: Context) -> list[Path]: if context.config.initrds: return context.config.initrds elif any((context.artifacts / "io.mkosi.initrd").glob("*")): return sorted((context.artifacts / "io.mkosi.initrd").iterdir()) return [build_default_initrd(context)] def install_type1( context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition], cmdline: list[str], ) -> None: dst = context.root / "boot" / token / kver entry = context.root / f"boot/loader/entries/{token}-{kver}.conf" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) entry.parent.mkdir(parents=True, exist_ok=True) kmods = build_kernel_modules_initrd(context, kver) dtb = None if context.config.devicetree: dtb = dst / context.config.devicetree with umask(~0o700): dtb.parent.mkdir(parents=True, exist_ok=True) with umask(~0o600): if ( want_efi(context.config) and context.config.secure_boot and not context.config.bootloader.is_signed() and KernelType.identify(context.config, kimg) == KernelType.pe ): kimg = sign_efi_binary(context, kimg, dst / "vmlinuz") else: kimg = Path(shutil.copy2(context.root / kimg, dst / "vmlinuz")) initrds = [ Path(shutil.copy2(initrd, dst.parent / initrd.name)) for initrd in finalize_microcode(context) + finalize_initrds(context) ] initrds += [Path(shutil.copy2(kmods, dst / "kernel-modules.initrd"))] if dtb: shutil.copy2(find_devicetree(context, kver), dtb) with entry.open("w") as f: f.write( textwrap.dedent( f"""\ title {token} {kver} version {kver} linux /{kimg.relative_to(context.root / "boot")} options {" ".join(cmdline)} """ ) ) for initrd in initrds: f.write(f"initrd /{initrd.relative_to(context.root / 'boot')}\n") if dtb: f.write(f"devicetree /{dtb.relative_to(context.root / 'boot')}\n") if want_grub_efi(context) or want_grub_bios(context, partitions): config = prepare_grub_config(context) assert config if ( not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and (root := finalize_root(partitions)) ): cmdline = [root] + cmdline with config.open("a") as f: f.write("if [ ") conditions = [] if want_grub_efi(context) and not want_uki(context): conditions += ['"${grub_platform}" == "efi"'] if want_grub_bios(context, partitions): conditions += ['"${grub_platform}" == "pc"'] f.write(" -o ".join(conditions)) f.write(" ]; then\n") f.write( textwrap.dedent( f"""\ menuentry "{token}-{kver}" {{ linux /{kimg.relative_to(context.root / "boot")} {" ".join(cmdline)} initrd {" ".join(os.fspath(Path("/") / i.relative_to(context.root / "boot")) for i in initrds)} }} """ # noqa: E501 ) ) f.write("fi\n") def expand_kernel_specifiers(text: str, kver: str, token: str, roothash: str, boot_count: str) -> str: specifiers = { "&": "&", "e": token, "k": kver, "h": roothash, "c": boot_count, } def replacer(match: re.Match[str]) -> str: m = match.group("specifier") if specifier := specifiers.get(m): return specifier logging.warning(f"Unknown specifier '&{m}' found in {text}, ignoring") return "" return re.sub(r"&(?P[&a-zA-Z])", replacer, text) def install_uki( context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition], profiles: Sequence[Path], cmdline: list[str], ) -> None: bootloader_entry_format = context.config.unified_kernel_image_format or "&e-&k" roothash_value = "" if roothash := finalize_roothash(partitions): roothash_value = roothash.partition("=")[2] if not context.config.unified_kernel_image_format: bootloader_entry_format += "-&h" boot_count = "" if (context.root / "etc/kernel/tries").exists(): boot_count = (context.root / "etc/kernel/tries").read_text().strip() if not context.config.unified_kernel_image_format: bootloader_entry_format += "+&c" bootloader_entry = expand_kernel_specifiers( bootloader_entry_format, kver=kver, token=token, roothash=roothash_value, boot_count=boot_count, ) if context.config.bootloader.is_uki(): if context.config.shim_bootloader != ShimBootloader.none: boot_binary = context.root / shim_second_stage_binary(context) else: boot_binary = context.root / efi_boot_binary(context) else: boot_binary = context.root / f"boot/EFI/Linux/{bootloader_entry}.efi" # Make sure the parent directory where we'll be writing the UKI exists. with umask(~0o700): boot_binary.parent.mkdir(parents=True, exist_ok=True) if context.config.bootloader.is_signed(): for p in (context.root / "usr/lib/modules" / kver).glob("*.efi"): log_step(f"Installing prebuilt UKI at {p} to {boot_binary}") shutil.copy2(p, boot_binary) break else: if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find a signed UKI binary installed at /usr/lib/modules/{kver} in the image") return else: microcodes = finalize_microcode(context) initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: initrds += [build_kernel_modules_initrd(context, kver)] build_uki( context, systemd_stub_binary(context), kver, context.root / kimg, microcodes, initrds, cmdline, profiles, boot_binary, ) print_output_size(boot_binary) if want_grub_efi(context): config = prepare_grub_config(context) assert config with config.open("a") as f: f.write('if [ "${grub_platform}" == "efi" ]; then\n') f.write( textwrap.dedent( f"""\ menuentry "{boot_binary.stem}" {{ chainloader /{boot_binary.relative_to(context.root / "boot")} }} """ ) ) f.write("fi\n") def systemd_addon_stub_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() stub = context.root / f"usr/lib/systemd/boot/efi/addon{arch}.efi.stub" return stub def build_uki_profiles(context: Context, cmdline: Sequence[str]) -> list[Path]: if not context.config.unified_kernel_image_profiles: return [] stub = systemd_addon_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") (context.workspace / "uki-profiles").mkdir() profiles = [] for profile in context.config.unified_kernel_image_profiles: id = profile.profile["ID"] output = context.workspace / f"uki-profiles/{id}.efi" profile_section = context.workspace / f"uki-profiles/{id}.profile" with profile_section.open("w") as f: for k, v in profile.profile.items(): if not all(c.isalnum() for c in v): v = f'"{v}"' f.write(f"{k}={v}\n") with complete_step(f"Generating UKI profile '{id}'"): run_ukify( context, stub, output, cmdline=[*cmdline, *profile.cmdline], arguments=["--profile", f"@{profile_section}"], options=["--ro-bind", profile_section, profile_section], sign=False, ) profiles += [output] return profiles def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: # Iterates through all kernel versions included in the image and generates a combined # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of # the ESP. sd-boot iterates through them and shows them in the menu. These "unified" # single-file images have the benefit that they can be signed like normal EFI binaries, and can # encode everything necessary to boot a specific root device, including the root hash. if context.config.output_format in (OutputFormat.uki, OutputFormat.esp): return if context.config.bootable == ConfigFeature.disabled: return if context.config.bootable == ConfigFeature.auto and ( context.config.output_format == OutputFormat.cpio or context.config.output_format.is_extension_or_portable_image() or context.config.overlay ): return stub = systemd_stub_binary(context) if want_uki(context) and not stub.exists(): die( "Unified kernel image(s) requested but systemd-stub not found at " f"/{stub.relative_to(context.root)}" ) if context.config.bootable == ConfigFeature.enabled and not any(gen_kernel_images(context)): die("A bootable image was requested but no kernel was found") token = find_entry_token(context) cmdline = finalize_cmdline(context, partitions, finalize_roothash(partitions)) profiles = build_uki_profiles(context, cmdline) if want_uki(context) else [] for kver, kimg in gen_kernel_images(context): if want_uki(context): install_uki(context, kver, kimg, token, partitions, profiles, cmdline) if not want_uki(context) or want_grub_bios(context, partitions): install_type1(context, kver, kimg, token, partitions, cmdline) if context.config.bootloader.is_uki(): break def make_uki( context: Context, stub: Path, kver: str, kimg: Path, microcode: list[Path], output: Path, ) -> None: make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) maybe_compress( context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd", ) initrds = [context.workspace / "initrd"] build_uki( context, stub, kver, kimg, microcode, initrds, context.config.kernel_command_line, build_uki_profiles(context, context.config.kernel_command_line), output, ) if ArtifactOutput.kernel in context.config.split_artifacts: extract_pe_section(context, output, ".linux", context.staging / context.config.output_split_kernel) if ArtifactOutput.initrd in context.config.split_artifacts: extract_pe_section(context, output, ".initrd", context.staging / context.config.output_split_initrd) def make_addon(context: Context, stub: Path, output: Path) -> None: arguments: list[PathString] = [] options: list[PathString] = [] if any(context.root.iterdir()): make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) maybe_compress( context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd", ) arguments += ["--initrd", workdir(context.workspace / "initrd")] options += [ "--ro-bind", context.workspace / "initrd", workdir(context.workspace / "initrd") ] # fmt: skip with complete_step(f"Generating PE addon {output}"): run_ukify( context, stub, output, cmdline=context.config.kernel_command_line, arguments=arguments, options=options, ) def compressor_command(context: Context, compression: Compression) -> list[PathString]: """Returns a command suitable for compressing archives.""" if compression == Compression.gz: return [gzip_binary(context), f"-{context.config.compress_level}", "--stdout", "-"] elif compression == Compression.xz: return ["xz", "--check=crc32", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] elif compression == Compression.zstd: return ["zstd", "-q", f"-{context.config.compress_level}", "-T0", "--stdout", "-"] else: die(f"Unknown compression {compression}") def maybe_compress( context: Context, compression: Compression, src: Path, dst: Optional[Path] = None, ) -> None: if not compression or src.is_dir(): if dst: move_tree( src, dst, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) return if not dst: dst = src.parent / f"{src.name}{compression.extension()}" cmd = compressor_command(context, compression) with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: # if src == dst, make sure dst doesn't truncate the src file but creates a new file. src.unlink() with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox()) def copy_nspawn_settings(context: Context) -> None: if context.config.nspawn_settings is None: return None with complete_step("Copying nspawn settings file…"): shutil.copy2(context.config.nspawn_settings, context.staging / context.config.output_nspawn_settings) def get_uki_path(context: Context) -> Optional[Path]: if not want_efi(context.config) or context.config.unified_kernel_images == ConfigFeature.disabled: return None ukis = sorted( (context.root / "boot/EFI/Linux").glob("*.efi"), key=lambda p: GenericVersion(p.name), reverse=True, ) if (uki := context.root / efi_boot_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass elif (uki := context.root / shim_second_stage_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass elif ukis: uki = ukis[0] else: return None return uki def copy_uki(context: Context) -> None: if ArtifactOutput.uki not in context.config.split_artifacts: return if (context.staging / context.config.output_split_uki).exists(): return if uki := get_uki_path(context): shutil.copy(uki, context.staging / context.config.output_split_uki) def copy_vmlinuz(context: Context) -> None: if ArtifactOutput.kernel not in context.config.split_artifacts: return if (context.staging / context.config.output_split_kernel).exists(): return # ukify will have signed the kernel image as well. Let's make sure we put the signed kernel # image in the output directory instead of the unsigned one by reading it from the UKI. if uki := get_uki_path(context): extract_pe_section(context, uki, ".linux", context.staging / context.config.output_split_kernel) return for _, kimg in gen_kernel_images(context): shutil.copy(context.root / kimg, context.staging / context.config.output_split_kernel) break def copy_initrd(context: Context) -> None: if ArtifactOutput.initrd not in context.config.split_artifacts: return if not want_initrd(context): return if (context.staging / context.config.output_split_initrd).exists(): return # Extract the combined initrds from the UKI so we can use it to direct kernel boot with qemu if needed. if uki := get_uki_path(context): extract_pe_section(context, uki, ".initrd", context.staging / context.config.output_split_initrd) return for kver, _ in gen_kernel_images(context): initrds = finalize_initrds(context) if context.config.kernel_modules_initrd: kver = next(gen_kernel_images(context))[0] initrds += [build_kernel_modules_initrd(context, kver)] join_initrds(context.config, initrds, context.staging / context.config.output_split_initrd) break def calculate_sha256sum(context: Context) -> None: if not context.config.checksum: return with complete_step("Calculating SHA256SUMS…"): with open(context.workspace / context.config.output_checksum, "w") as f: for p in context.staging.iterdir(): if p.is_dir(): logging.warning(f"Cannot checksum directory '{p}', skipping") continue print(hash_file(p) + " *" + p.name, file=f) (context.workspace / context.config.output_checksum).rename( context.staging / context.config.output_checksum ) def calculate_signature(context: Context) -> None: if not context.config.sign or not context.config.checksum: return if context.config.openpgp_tool == "gpg": calculate_signature_gpg(context) else: calculate_signature_sop(context) def calculate_signature_gpg(context: Context) -> None: cmdline: list[PathString] = ["gpg", "--detach-sign", "--pinentry-mode", "loopback"] # Need to specify key before file to sign if context.config.key is not None: cmdline += ["--default-key", context.config.key] cmdline += [ "--output", workdir(context.staging / context.config.output_signature), workdir(context.staging / context.config.output_checksum), ] home = Path(context.config.environment.get("GNUPGHOME", INVOKING_USER.home() / ".gnupg")) if not home.exists(): die(f"GPG home {home} not found") env = dict(GNUPGHOME=os.fspath(workdir(home))) if sys.stderr.isatty(): env |= dict(GPG_TTY=os.ttyname(sys.stderr.fileno())) options: list[PathString] = [ "--bind", home, workdir(home), "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", ] # fmt: skip with complete_step("Signing SHA256SUMS…"): run( cmdline, env=env, sandbox=context.sandbox(options=options), ) def calculate_signature_sop(context: Context) -> None: if context.config.key is None: die("Signing key is mandatory when using SOP signing") with ( complete_step("Signing SHA256SUMS…"), open(context.staging / context.config.output_checksum, "rb") as i, open(context.staging / context.config.output_signature, "wb") as o, ): run( [context.config.openpgp_tool, "sign", "/signing-key.pgp"], env=context.config.environment, stdin=i, stdout=o, sandbox=context.sandbox( options=[ "--bind", context.config.key, "/signing-key.pgp", "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", ], ), ) # fmt: skip def dir_size(path: Union[Path, os.DirEntry[str]]) -> int: dir_sum = 0 for entry in os.scandir(path): if entry.is_symlink(): # We can ignore symlinks because they either point into our tree, # in which case we'll include the size of target directory anyway, # or outside, in which case we don't need to. continue elif entry.is_file(): dir_sum += entry.stat().st_blocks * 512 elif entry.is_dir(): dir_sum += dir_size(entry) return dir_sum def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if not manifest: return if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): with open(context.staging / context.config.output_manifest, "w") as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): with open(context.staging / context.config.output_changelog, "w") as f: manifest.write_package_report(f) def print_output_size(path: Path) -> None: if path.is_dir(): log_step(f"{path} size is " + format_bytes(dir_size(path)) + ".") else: size = format_bytes(path.stat().st_size) space = format_bytes(path.stat().st_blocks * 512) log_step(f"{path} size is {size}, consumes {space}.") def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: if config.image == "tools": key = "tools" else: fragments = [config.distribution, config.release, config.architecture] if config.image: fragments += [config.image] key = "~".join(str(s) for s in fragments) assert config.cache_dir return ( config.cache_dir / f"{key}.cache", config.cache_dir / f"{key}.build.cache", config.cache_dir / f"{key}.manifest", ) def keyring_cache(config: Config) -> Path: if config.image == "tools": key = "tools" else: key = f"{'~'.join(str(s) for s in (config.distribution, config.release, config.architecture))}" assert config.cache_dir return config.cache_dir / f"{key}.keyring.cache" def metadata_cache(config: Config) -> Path: if config.image == "tools": key = "tools" else: key = f"{'~'.join(str(s) for s in (config.distribution, config.release, config.architecture))}" assert config.cache_dir return config.cache_dir / f"{key}.metadata.cache" def check_inputs(config: Config) -> None: """ Make sure all the inputs exist that aren't checked during config parsing because they might be created by an earlier build. """ for base in config.base_trees: if not base.exists(): die(f"Base tree {base} not found") if base.is_file() and base.suffix == ".raw" and os.getuid() != 0: die("Must run as root to use disk images in base trees") if config.tools_tree and not config.tools_tree.exists(): die(f"Tools tree {config.tools_tree} not found") trees_with_name = [ ("skeleton", config.skeleton_trees), ("sandbox", config.sandbox_trees), ] if config.output_format != OutputFormat.none: trees_with_name += [("extra", config.extra_trees)] for name, trees in trees_with_name: for tree in trees: if not tree.source.exists(): die(f"{name.capitalize()} tree {tree.source} not found") if ( tree.source.is_file() and tree.source.suffix == ".raw" and not tree.target and os.getuid() != 0 ): die(f"Must run as root to use disk images in {name} trees") if config.output_format != OutputFormat.none and config.bootable != ConfigFeature.disabled: for p in config.initrds: if not p.exists(): die(f"Initrd {p} not found") if not p.is_file(): die(f"Initrd {p} is not a file") for script in itertools.chain( config.sync_scripts, config.prepare_scripts, config.build_scripts, config.postinst_scripts, config.finalize_scripts, config.postoutput_scripts, ): if not os.access(script, os.X_OK): die(f"{script} is not executable") if config.secure_boot and not config.secure_boot_key: die( "SecureBoot= is enabled but no secure boot key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.secure_boot and not config.secure_boot_certificate: die( "SecureBoot= is enabled but no secure boot certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.sign_expected_pcr == ConfigFeature.enabled and not config.sign_expected_pcr_key: die( "SignExpectedPcr= is enabled but no private key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.sign_expected_pcr == ConfigFeature.enabled and not config.sign_expected_pcr_certificate: die( "SignExpectedPcr= is enabled but no certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.secure_boot_key_source != config.sign_expected_pcr_key_source: die("Secure boot key source and expected PCR signatures key source have to be the same") if config.secure_boot_certificate_source != config.sign_expected_pcr_certificate_source: die( "Secure boot certificate source and expected PCR signatures certificate source have to be the same" # noqa: E501 ) # fmt: skip if config.verity == ConfigFeature.enabled and not config.verity_key: die( "Verity= is enabled but no verity key is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) if config.verity == ConfigFeature.enabled and not config.verity_certificate: die( "Verity= is enabled but no verity certificate is configured", hint="Run mkosi genkey to generate a key/certificate pair", ) for profile in config.unified_kernel_image_profiles: if "ID" not in profile.profile: die( "UKI Profile is missing ID key in its .profile section", hint="Use Profile= to configure the profile ID", ) def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: tool = config.find_binary(*tools) if not tool: die(f"Could not find '{tools[0]}' which is required to {reason}.", hint=hint) return tool def check_systemd_tool( config: Config, *tools: PathString, version: str, reason: str, hint: Optional[str] = None, ) -> None: tool = check_tool(config, *tools, reason=reason, hint=hint) v = systemd_tool_version(tool, sandbox=config.sandbox) if v < version: die( f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.", ) def check_ukify( config: Config, version: str, reason: str, hint: Optional[str] = None, ) -> None: ukify = check_tool(config, "ukify", "/usr/lib/systemd/ukify", reason=reason, hint=hint) v = systemd_tool_version(python_binary(config), ukify, sandbox=config.sandbox) if v < version: die( f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", hint="Use ToolsTree=default to get a newer version of 'ukify'.", ) def check_tools(config: Config, verb: Verb) -> None: if verb == Verb.build: if config.output_format == OutputFormat.none: return if want_efi(config): if config.unified_kernel_image_profiles: check_ukify( config, version="257", reason="build unified kernel image profiles", hint=( "Use ToolsTree=default to download most required tools including ukify automatically" ), ) elif config.unified_kernel_images == ConfigFeature.enabled: check_ukify( config, version="254", reason="build bootable images", hint=( "Use ToolsTree=default to download most required tools including ukify " "automatically or use Bootable=no to create a non-bootable image which doesn't " "require ukify" ), ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): check_systemd_tool(config, "systemd-repart", version="254", reason="build disk images") if config.selinux_relabel == ConfigFeature.enabled: check_tool(config, "setfiles", reason="relabel files") if config.secure_boot_key_source.type != KeySourceType.file: check_ukify( config, version="256", reason="sign Unified Kernel Image with OpenSSL engine", ) if want_signed_pcrs(config): check_systemd_tool( config, "systemd-measure", "/usr/lib/systemd/systemd-measure", version="256", reason="sign PCR hashes with OpenSSL engine", ) if config.verity_key_source.type != KeySourceType.file: check_systemd_tool( config, "systemd-repart", version="256", reason="sign verity roothash signature with OpenSSL engine", ) if ( want_efi(config) and config.secure_boot and config.secure_boot_auto_enroll and ( not config.find_binary("bootctl") or systemd_tool_version("bootctl", sandbox=config.sandbox) < "257" ) ): check_tool(config, "sbsiglist", reason="set up systemd-boot secure boot auto-enrollment") check_tool(config, "sbvarsign", reason="set up systemd-boot secure boot auto-enrollment") if verb == Verb.boot: check_systemd_tool(config, "systemd-nspawn", version="254", reason="boot images") if verb in (Verb.vm, Verb.qemu) and config.vmm == Vmm.vmspawn: check_systemd_tool(config, "systemd-vmspawn", version="256", reason="boot images with vmspawn") if verb == Verb.sysupdate: check_systemd_tool( config, "systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate", version="257", reason="Update the host system with systemd-sysupdate", ) def configure_ssh(context: Context) -> None: if not context.config.ssh: return unitdir = context.root / "usr/lib/systemd/system" with umask(~0o755): unitdir.mkdir(parents=True, exist_ok=True) with umask(~0o644): (unitdir / "ssh.socket").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server VSock Socket ConditionVirtualization=!container Wants=sshd-keygen.target [Socket] ListenStream=vsock::22 Accept=yes [Install] WantedBy=sockets.target """ ) ) (unitdir / "ssh@.service").write_text( textwrap.dedent( """\ [Unit] Description=Mkosi SSH Server After=sshd-keygen.target [Service] # We disable PAM because of an openssh-server bug where it sets PAM_RHOST=UNKNOWN when -i is # used causing a very slow reverse DNS lookup by pam. ExecStart=sshd -i -o UsePAM=no StandardInput=socket RuntimeDirectoryPreserve=yes RuntimeDirectory=sshd # ssh always exits with 255 even on normal disconnect, so let's mark that as success so we # don't get noisy logs about SSH service failures. SuccessExitStatus=255 """ ) ) preset = context.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset" with umask(~0o755): preset.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): preset.write_text("enable ssh.socket\n") def configure_initrd(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_or_portable_image(): return if ( not (context.root / "init").exists() and not (context.root / "init").is_symlink() and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") if not context.config.make_initrd: return if ( not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink() ): (context.root / "etc/initrd-release").symlink_to("/etc/os-release") def configure_clock(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return with umask(~0o644): (context.root / "usr/lib/clock-epoch").touch() def run_depmod(context: Context, *, cache: bool = False) -> None: if context.config.overlay: return if not cache: for kver, _ in gen_kernel_images(context): process_kernel_modules( context, kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_include, host=context.config.kernel_modules_include_host, ), exclude=context.config.kernel_modules_exclude, ) if context.config.output_format.is_extension_or_portable_image(): return outputs = ( "modules.dep", "modules.dep.bin", "modules.symbols", "modules.symbols.bin", ) for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver if ( not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs) ): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue with complete_step(f"Running depmod for {kver}"): run(["depmod", "--all", kver], sandbox=chroot_cmd(root=context.root)) def run_sysusers(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not context.config.find_binary("systemd-sysusers"): logging.warning("systemd-sysusers is not installed, not generating system users") return with complete_step("Generating system users"): run( ["systemd-sysusers", "--root=/buildroot"], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) def run_tmpfiles(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not context.config.find_binary("systemd-tmpfiles"): logging.warning("systemd-tmpfiles is not installed, not generating volatile files") return with complete_step("Generating volatile files"): run( [ "systemd-tmpfiles", "--root=/buildroot", "--boot", "--create", "--remove", # Exclude APIVFS and temporary files directories. *(f"--exclude-prefix={d}" for d in ("/tmp", "/var/tmp", "/run", "/proc", "/sys", "/dev")), # Exclude /var if we're not invoked as root as all the chown()'s for daemon owned # directories will fail. *(["--exclude-prefix=/var"] if os.getuid() != 0 or userns_has_single_user() else []), ], env={"SYSTEMD_TMPFILES_FORCE_SUBVOL": "0"}, # systemd-tmpfiles can exit with DATAERR or CANTCREAT in some cases which are handled # as success by the systemd-tmpfiles service so we handle those as success as well. success_exit_status=(0, 65, 73), sandbox=context.sandbox( options=[ "--bind", context.root, "/buildroot", # systemd uses acl.h to parse ACLs in tmpfiles snippets which uses the host's # passwd so we have to symlink the image's passwd to make ACL parsing work. *finalize_passwd_symlinks("/buildroot"), # Sometimes directories are configured to be owned by root in tmpfiles snippets # so we want to make sure those chown()'s succeed by making ourselves the root # user so that the root user exists. "--become-root", ], ), ) # fmt: skip def run_preset(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not context.config.find_binary("systemctl"): logging.warning("systemctl is not installed, not applying presets") return with complete_step("Applying presets…"): run( ["systemctl", "--root=/buildroot", "preset-all"], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) run( ["systemctl", "--root=/buildroot", "--global", "preset-all"], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) def run_hwdb(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_image(): return if not context.config.find_binary("systemd-hwdb"): logging.warning("systemd-hwdb is not installed, not generating hwdb") return with complete_step("Generating hardware database"): run( ["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) def run_firstboot(context: Context) -> None: if context.config.overlay or context.config.output_format.is_extension_or_portable_image(): return if not context.config.find_binary("systemd-firstboot"): logging.warning("systemd-firstboot is not installed, not applying first boot settings") return password, hashed = context.config.root_password or (None, False) if password and not hashed: password = run( ["openssl", "passwd", "-stdin", "-6"], sandbox=context.sandbox(), input=password, stdout=subprocess.PIPE, ).stdout.strip() settings = ( ("--locale", "firstboot.locale", context.config.locale), ("--locale-messages", "firstboot.locale-messages", context.config.locale_messages), ("--keymap", "firstboot.keymap", context.config.keymap), ("--timezone", "firstboot.timezone", context.config.timezone), ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), ) # fmt: skip options = [] creds = [] for option, cred, value in settings: # Check for None as password might be the empty string if value is None: continue options += [option, value] if cred: creds += [(cred, value)] if not options and not creds: return with complete_step("Applying first boot settings"): run( ["systemd-firstboot", "--root=/buildroot", "--force", *options], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) # Initrds generally don't ship with only /usr so there's not much point in putting the # credentials in /usr/lib/credstore. if context.config.output_format != OutputFormat.cpio or not context.config.make_initrd: with umask(~0o755): (context.root / "usr/lib/credstore").mkdir(exist_ok=True) for cred, value in creds: with umask(~0o600 if "password" in cred else ~0o644): (context.root / "usr/lib/credstore" / cred).write_text(value) def run_selinux_relabel(context: Context) -> None: if not (selinux := want_selinux_relabel(context.config, context.root)): return setfiles, policy, fc, binpolicy = selinux fc = Path("/buildroot") / fc.relative_to(context.root) binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root) with complete_step(f"Relabeling files using {policy} policy"): run( [setfiles, "-mFr", "/buildroot", "-T0", "-c", binpolicy, fc, "/buildroot"], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), check=context.config.selinux_relabel == ConfigFeature.enabled, ) def need_build_overlay(config: Config) -> bool: return bool(config.build_scripts and (config.build_packages or config.prepare_scripts)) def save_cache(context: Context) -> None: if not context.config.incremental or context.config.base_trees or context.config.overlay: return final, build, manifest = cache_tree_paths(context.config) with complete_step("Installing cache copies"): rmtree(final) move_tree( context.root, final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build) move_tree( context.workspace / "build-overlay", build, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) manifest.write_text( json.dumps( context.config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True, ) ) def have_cache(config: Config) -> bool: if not config.incremental or config.base_trees or config.overlay: return False final, build, manifest = cache_tree_paths(config) if not final.exists(): logging.debug(f"{final} does not exist, not reusing cached images") return False if config.image != "tools" and (uid := final.stat().st_uid) != os.getuid(): logging.debug( f"{final} uid ({uid}) does not match user uid ({os.getuid()}), not reusing cached images" ) return False if need_build_overlay(config) and not build.exists(): logging.debug(f"{build} does not exist, not reusing cached images") return False if manifest.exists(): prev = json.loads(manifest.read_text()) new = json.dumps(config.cache_manifest(), cls=JsonEncoder, indent=4, sort_keys=True) if prev != json.loads(new): logging.info(f"Cache manifest mismatch for {config.name()} image, not reusing cached images") if ARG_DEBUG.get(): run( ["diff", "--unified", workdir(manifest), "-"], input=new, check=False, sandbox=config.sandbox( tools=False, options=["--bind", manifest, workdir(manifest)], ), ) return False else: logging.debug(f"{manifest} does not exist, not reusing cached images") return False return True def reuse_cache(context: Context) -> bool: if not context.config.incremental or context.config.base_trees or context.config.overlay: return False final, build, _ = cache_tree_paths(context.config) if not final.exists() or (need_build_overlay(context.config) and not build.exists()): return False with complete_step("Copying cached trees"): copy_tree( final, context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) if need_build_overlay(context.config): (context.workspace / "build-overlay").symlink_to(build) return True def save_esp_components( context: Context, ) -> tuple[Optional[Path], Optional[str], Optional[Path], list[Path]]: if context.config.output_format == OutputFormat.addon: stub = systemd_addon_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") return shutil.copy2(stub, context.workspace), None, None, [] if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): return None, None, None, [] try: kver, kimg = next(gen_kernel_images(context)) except StopIteration: die("A kernel must be installed in the image to build a UKI") kimg = shutil.copy2(context.root / kimg, context.workspace) if not context.config.architecture.to_efi(): die(f"Architecture {context.config.architecture} does not support UEFI") stub = systemd_stub_binary(context) if not stub.exists(): die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image") stub = shutil.copy2(stub, context.workspace) microcode = build_microcode_initrd(context) return stub, kver, kimg, microcode def make_image( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, verity: bool = False, root: Optional[Path] = None, definitions: Sequence[Path] = [], options: Sequence[PathString] = (), ) -> list[Partition]: cmdline: list[PathString] = [ "systemd-repart", "--empty=allow", "--size=auto", "--dry-run=no", "--json=pretty", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed), workdir(context.staging / context.config.output_with_format), ] # fmt: skip opts: list[PathString] = [ *options, # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", context.staging, workdir(context.staging), ] # fmt: skip if root: cmdline += ["--root=/buildroot"] opts += ["--bind", root, "/buildroot"] if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if not (context.staging / context.config.output_with_format).exists(): cmdline += ["--empty=create"] if context.config.passphrase: cmdline += ["--key-file", workdir(context.config.passphrase)] opts += ["--ro-bind", context.config.passphrase, workdir(context.config.passphrase)] if skip: cmdline += ["--defer-partitions", ",".join(skip)] if split: cmdline += ["--split=yes"] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if tabs and systemd_tool_version("systemd-repart", sandbox=context.sandbox) >= 256: cmdline += [ "--generate-fstab=/etc/fstab", "--generate-crypttab=/etc/crypttab", ] for d in definitions: cmdline += ["--definitions", workdir(d)] opts += ["--ro-bind", d, workdir(d)] with complete_step(msg): output = json.loads( run_systemd_sign_tool( context.config, cmdline=cmdline, options=opts, certificate=context.config.verity_certificate if verity else None, certificate_source=context.config.verity_certificate_source, key=context.config.verity_key if verity else None, key_source=context.config.verity_key_source, stdout=subprocess.PIPE, devices=not context.config.repart_offline, ).stdout ) logging.debug(json.dumps(output, indent=4)) partitions = [Partition.from_dict(d) for d in output] arch = context.config.architecture if context.config.verity == ConfigFeature.enabled and not any( p.type.startswith(f"usr-{arch}-verity-sig") or p.type.startswith(f"root-{arch}-verity-sig") for p in partitions ): die( "Verity is explicitly enabled but didn't find any verity signature partition", hint="Make sure to add verity signature partitions in mkosi.repart if building a disk image", ) if split: for p in partitions: if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) return partitions def want_verity(config: Config) -> bool: return config.verity == ConfigFeature.enabled or bool( config.verity == ConfigFeature.auto and config.verity_key and config.verity_certificate ) def make_disk( context: Context, msg: str, skip: Sequence[str] = [], split: bool = False, tabs: bool = False, ) -> list[Partition]: if context.config.output_format != OutputFormat.disk: return [] if context.config.repart_dirs: definitions = context.config.repart_dirs else: defaults = context.workspace / "repart-definitions" if not defaults.exists(): defaults.mkdir() if arch := context.config.architecture.to_efi(): bootloader = context.root / f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI" else: bootloader = None esp = context.config.bootable == ConfigFeature.enabled or ( context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists() ) bios = context.config.bootable != ConfigFeature.disabled and want_grub_bios(context) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds # and grub modules. We can't use UKIs so we have to put each kernel and initrd on # the ESP twice, so let's make the ESP twice as big in that case. (defaults / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes={"1G" if bios else "512M"} SizeMaxBytes={"1G" if bios else "512M"} """ ) ) # If grub for BIOS is installed, let's add a BIOS boot partition onto which we can # install grub. if bios: (defaults / "05-bios.conf").write_text( textwrap.dedent( f"""\ [Partition] Type={Partition.GRUB_BOOT_PARTITION_UUID} SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (defaults / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={context.config.distribution.filesystem()} CopyFiles=/ Minimize=guess """ ) ) definitions = [defaults] return make_image( context, msg=msg, skip=skip, split=split, tabs=tabs, verity=want_verity(context.config), root=context.root, definitions=definitions, ) def make_oci(context: Context, root_layer: Path, dst: Path) -> None: ca_store = dst / "blobs" / "sha256" with umask(~0o755): ca_store.mkdir(parents=True) layer_diff_digest = hash_file(root_layer) maybe_compress( context, context.config.compress_output, context.staging / "rootfs.layer", # Pass explicit destination to suppress adding an extension context.staging / "rootfs.layer", ) layer_digest = hash_file(root_layer) root_layer.rename(ca_store / layer_digest) creation_time = ( datetime.datetime.fromtimestamp(context.config.source_date_epoch, tz=datetime.timezone.utc) if context.config.source_date_epoch is not None else datetime.datetime.now(tz=datetime.timezone.utc) ).isoformat() oci_config = { "created": creation_time, "architecture": context.config.architecture.to_oci(), # Name of the operating system which the image is built to run on as defined by # https://github.com/opencontainers/image-spec/blob/v1.0.2/config.md#properties. "os": "linux", "rootfs": { "type": "layers", "diff_ids": [f"sha256:{layer_diff_digest}"], }, "config": { "Cmd": [ "/sbin/init", *context.config.kernel_command_line, ], }, "history": [ { "created": creation_time, "comment": "Created by mkosi", }, ], } oci_config_blob = json.dumps(oci_config) oci_config_digest = hashlib.sha256(oci_config_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_config_digest).write_text(oci_config_blob) layer_suffix = context.config.compress_output.oci_media_type_suffix() oci_manifest = { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", "config": { "mediaType": "application/vnd.oci.image.config.v1+json", "digest": f"sha256:{oci_config_digest}", "size": (ca_store / oci_config_digest).stat().st_size, }, "layers": [ { "mediaType": f"application/vnd.oci.image.layer.v1.tar{layer_suffix}", "digest": f"sha256:{layer_digest}", "size": (ca_store / layer_digest).stat().st_size, } ], "annotations": { "io.systemd.mkosi.version": __version__, **( { "org.opencontainers.image.version": context.config.image_version, } if context.config.image_version else {} ), }, } oci_manifest_blob = json.dumps(oci_manifest) oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest() with umask(~0o644): (ca_store / oci_manifest_digest).write_text(oci_manifest_blob) (dst / "index.json").write_text( json.dumps( { "schemaVersion": 2, "mediaType": "application/vnd.oci.image.index.v1+json", "manifests": [ { "mediaType": "application/vnd.oci.image.manifest.v1+json", "digest": f"sha256:{oci_manifest_digest}", "size": (ca_store / oci_manifest_digest).stat().st_size, } ], } ) ) (dst / "oci-layout").write_text(json.dumps({"imageLayoutVersion": "1.0.0"})) def make_esp(context: Context, uki: Path) -> list[Partition]: if not (arch := context.config.architecture.to_efi()): die(f"Architecture {context.config.architecture} does not support UEFI") definitions = context.workspace / "esp-definitions" definitions.mkdir(exist_ok=True) # Use a minimum of 36MB or 260MB depending on sector size because otherwise the generated FAT # filesystem will have too few clusters to be considered a FAT32 filesystem by OVMF which will # refuse to boot from it. See # https://superuser.com/questions/1702331/what-is-the-minimum-size-of-a-4k-native-partition-when-formatted-with-fat32/1717643#1717643 if context.config.sector_size == 512: m = 36 # TODO: Figure out minimum size for 2K sector size else: m = 260 # Always reserve 10MB for filesystem metadata. size = max(uki.stat().st_size, (m - 10) * 1024**2) + 10 * 1024**2 # TODO: Remove the extra 4096 for the max size once # https://github.com/systemd/systemd/pull/29954 is in a stable release. (definitions / "00-esp.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=esp Format=vfat CopyFiles={workdir(uki)}:/EFI/BOOT/BOOT{arch.upper()}.EFI SizeMinBytes={size} SizeMaxBytes={size + 4096} """ ) ) return make_image( context, msg="Generating ESP image", definitions=[definitions], options=["--ro-bind", uki, workdir(uki)], ) def make_extension_or_portable_image(context: Context, output: Path) -> None: unsigned = "-unsigned" if not want_verity(context.config) else "" r = context.resources / f"repart/definitions/{context.config.output_format}{unsigned}.repart.d" cmdline: list[PathString] = [ "systemd-repart", "--root=/buildroot", "--json=pretty", "--dry-run=no", "--no-pager", f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed) if context.config.seed else "random", "--empty=create", "--size=auto", "--definitions", workdir(r), workdir(output), ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", output.parent, workdir(output.parent), "--ro-bind", context.root, "/buildroot", "--ro-bind", r, workdir(r), ] # fmt: skip if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] if context.config.passphrase: cmdline += ["--key-file", context.config.passphrase] options += ["--ro-bind", context.config.passphrase, workdir(context.config.passphrase)] if context.config.sector_size: cmdline += ["--sector-size", str(context.config.sector_size)] if ArtifactOutput.partitions in context.config.split_artifacts: cmdline += ["--split=yes"] with complete_step(f"Building {context.config.output_format} extension image"): j = json.loads( run_systemd_sign_tool( context.config, cmdline=cmdline, options=options, certificate=context.config.verity_certificate if want_verity(context.config) else None, certificate_source=context.config.verity_certificate_source, key=context.config.verity_key if want_verity(context.config) else None, key_source=context.config.verity_key_source, stdout=subprocess.PIPE, devices=not context.config.repart_offline, ).stdout ) logging.debug(json.dumps(j, indent=4)) if ArtifactOutput.partitions in context.config.split_artifacts: for p in (Partition.from_dict(d) for d in j): if p.split_path: maybe_compress(context, context.config.compress_output, p.split_path) def finalize_staging(context: Context) -> None: rmtree( *(context.config.output_dir_or_cwd() / f.name for f in context.staging.iterdir()), sandbox=context.sandbox, ) for f in context.staging.iterdir(): if f.is_symlink(): (context.config.output_dir_or_cwd() / f.name).symlink_to(f.readlink()) continue if f.is_file() and context.config.output_mode is not None: os.chmod(f, context.config.output_mode) move_tree( f, context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) def clamp_mtime(path: Path, mtime: int) -> None: st = os.stat(path, follow_symlinks=False) orig = (st.st_atime_ns, st.st_mtime_ns) updated = (min(orig[0], mtime * 1_000_000_000), min(orig[1], mtime * 1_000_000_000)) # fmt: skip if orig != updated: os.utime(path, ns=updated, follow_symlinks=False) def normalize_mtime(root: Path, mtime: Optional[int], directory: Path = Path("")) -> None: if mtime is None: return if not (root / directory).exists(): return with complete_step(f"Normalizing modification times of /{directory}"): clamp_mtime(root / directory, mtime) for p in (root / directory).rglob("*"): clamp_mtime(p, mtime) @contextlib.contextmanager def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-")) # Discard setuid/setgid bits as these are inherited and can leak into the image. workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) stack.callback(lambda: rmtree(workspace, sandbox=config.sandbox)) (workspace / "tmp").mkdir(mode=0o1777) with scopedenv({"TMPDIR": os.fspath(workspace / "tmp")}): try: yield Path(workspace) finally: if args.debug_workspace: stack.pop_all() log_notice(f"Workspace: {workspace}") @contextlib.contextmanager def lock_repository_metadata(config: Config) -> Iterator[None]: subdir = config.distribution.package_manager(config).subdir(config) with contextlib.ExitStack() as stack: for d in ("cache", "lib"): if (src := config.package_cache_dir_or_default() / d / subdir).exists(): stack.enter_context(flock(src)) yield def copy_repository_metadata(config: Config, dst: Path) -> None: subdir = config.distribution.package_manager(config).subdir(config) with complete_step("Copying repository metadata"): for d in ("cache", "lib"): src = config.package_cache_dir_or_default() / d / subdir if not src.exists(): logging.debug(f"{src} does not exist, not copying repository metadata from it") continue with tempfile.TemporaryDirectory() as tmp: os.chmod(tmp, 0o755) # cp doesn't support excluding directories but we can imitate it by bind mounting # an empty directory over the directories we want to exclude. exclude: list[PathString] if d == "cache": exclude = flatten( ("--ro-bind", tmp, workdir(p)) for p in config.distribution.package_manager(config).cache_subdirs(src) ) else: exclude = flatten( ("--ro-bind", tmp, workdir(p)) for p in config.distribution.package_manager(config).state_subdirs(src) ) subdst = dst / d / subdir with umask(~0o755): subdst.mkdir(parents=True, exist_ok=True) def sandbox( *, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return config.sandbox(options=[*options, *exclude]) copy_tree(src, subdst, sandbox=sandbox) @contextlib.contextmanager def createrepo(context: Context) -> Iterator[None]: st = context.repository.stat() try: yield finally: if context.repository.stat().st_mtime_ns != st.st_mtime_ns: with complete_step("Rebuilding local package repository"): context.config.distribution.createrepo(context) def make_rootdir(context: Context) -> None: if context.root.exists(): return with umask(~0o755): # Using a btrfs subvolume as the upperdir in an overlayfs results in EXDEV so make sure we # create the root directory as a regular directory if the Overlay= option is enabled. if context.config.overlay: context.root.mkdir() else: make_tree(context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox) def build_image(context: Context) -> None: manifest = Manifest(context) if context.config.manifest_format else None install_sandbox_trees(context.config, context.sandbox_tree) with mount_base_trees(context): install_base_trees(context) cached = reuse_cache(context) make_rootdir(context) wantrepo = ( ( not cached and ( context.config.packages or context.config.build_packages or context.config.prepare_scripts ) ) or context.config.volatile_packages or context.config.postinst_scripts or context.config.finalize_scripts ) context.config.distribution.setup(context) if wantrepo: with createrepo(context): install_package_directories(context, context.config.package_directories) install_package_directories(context, context.config.volatile_package_directories) install_package_directories(context, [context.package_dir]) if not cached: install_skeleton_trees(context) install_distribution(context) run_prepare_scripts(context, build=False) install_build_packages(context) run_prepare_scripts(context, build=True) fixup_vmlinuz_location(context) run_depmod(context, cache=True) save_cache(context) reuse_cache(context) check_root_populated(context) run_build_scripts(context) if context.config.output_format == OutputFormat.none: finalize_staging(context) rmtree(context.root, sandbox=context.sandbox) return if wantrepo: with createrepo(context): install_package_directories(context, [context.package_dir]) install_volatile_packages(context) install_build_dest(context) install_extra_trees(context) run_postinst_scripts(context) fixup_vmlinuz_location(context) configure_autologin(context) configure_os_release(context) configure_extension_release(context) configure_initrd(context) configure_ssh(context) configure_clock(context) install_systemd_boot(context) install_grub(context) install_shim(context) run_sysusers(context) run_tmpfiles(context) run_preset(context) run_depmod(context) run_firstboot(context) run_hwdb(context) # These might be removed by the next steps, so let's save them for later if needed. stub, kver, kimg, microcode = save_esp_components(context) remove_packages(context) if manifest: manifest.record_packages() run_selinux_relabel(context) clean_package_manager_metadata(context) remove_files(context) run_finalize_scripts(context) normalize_mtime(context.root, context.config.source_date_epoch) partitions = make_disk(context, skip=("esp", "xbootldr"), tabs=True, msg="Generating disk image") install_kernel(context, partitions) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("boot")) normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("efi")) partitions = make_disk(context, msg="Formatting ESP/XBOOTLDR partitions") grub_bios_setup(context, partitions) if ArtifactOutput.partitions in context.config.split_artifacts: make_disk(context, split=True, msg="Extracting partitions") copy_nspawn_settings(context) copy_uki(context) copy_vmlinuz(context) copy_initrd(context) if context.config.output_format == OutputFormat.tar: make_tar(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.oci: make_tar(context.root, context.staging / "rootfs.layer", sandbox=context.sandbox) make_oci( context, context.staging / "rootfs.layer", context.staging / context.config.output_with_format, ) elif context.config.output_format == OutputFormat.cpio: make_cpio(context.root, context.staging / context.config.output_with_format, sandbox=context.sandbox) elif context.config.output_format == OutputFormat.uki: assert stub and kver and kimg make_uki(context, stub, kver, kimg, microcode, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.esp: assert stub and kver and kimg make_uki(context, stub, kver, kimg, microcode, context.staging / context.config.output_split_uki) make_esp(context, context.staging / context.config.output_split_uki) elif context.config.output_format == OutputFormat.addon: assert stub make_addon(context, stub, context.staging / context.config.output_with_format) elif context.config.output_format.is_extension_or_portable_image(): make_extension_or_portable_image(context, context.staging / context.config.output_with_format) elif context.config.output_format == OutputFormat.directory: context.root.rename(context.staging / context.config.output_with_format) if context.config.output_format.use_outer_compression(): maybe_compress( context, context.config.compress_output, context.staging / context.config.output_with_format, context.staging / context.config.output_with_compression, ) calculate_sha256sum(context) calculate_signature(context) save_manifest(context, manifest) output_base = context.staging / context.config.output if not output_base.exists() or output_base.is_symlink(): output_base.unlink(missing_ok=True) output_base.symlink_to(context.config.output_with_compression) run_postoutput_scripts(context) finalize_staging(context) if not context.args.debug_workspace: rmtree(context.root, sandbox=context.sandbox) print_output_size(context.config.output_dir_or_cwd() / context.config.output_with_compression) def in_sandbox() -> bool: return parse_boolean(os.getenv("MKOSI_IN_SANDBOX", "0")) def run_sandbox(args: Args, config: Config) -> None: if in_sandbox(): die( "mkosi sandbox cannot be invoked from within another mkosi sandbox environment", hint="Exit the current sandbox environment and try again", ) if not args.cmdline: die("Please specify a command to execute in the sandbox") mounts = finalize_certificate_mounts(config, relaxed=True) if config.tools() != Path("/") and (config.tools() / "etc/crypto-policies").exists(): mounts += ["--ro-bind", config.tools() / "etc/crypto-policies", Path("/etc/crypto-policies")] # Since we reuse almost every top level directory from the host except /usr, the crypto mountpoints # have to exist already in these directories or we'll fail with a permission error. Let's check this # early and show a better error and a suggestion on how users can fix this issue. We use slice # notation to get every 3rd item from the mounts list which is the destination path. for dst in mounts[2::3]: if not Path(dst).exists(): die( f"Missing mountpoint {dst}", hint=f"Create an empty directory at {dst} using 'mkdir -p {dst}' as root and try again", ) hd, hr = detect_distribution() env = {"MKOSI_IN_SANDBOX": "1"} if hd: env |= {"MKOSI_HOST_DISTRIBUTION": str(hd)} if hr: env |= {"MKOSI_HOST_RELEASE": hr} cmdline = [*args.cmdline] if sys.stdin.isatty() and config.find_binary("systemd-pty-forward"): cmdline = [ "systemd-pty-forward", "--title=mkosi-sandbox", "--background=48;2;12;51;51", # cyan *cmdline, ] with contextlib.ExitStack() as stack: if config.tools() != Path("/"): d = stack.enter_context(tempfile.TemporaryDirectory(prefix="mkosi-path-")) # We have to point zipapp to a directory containing the mkosi module and set the entrypoint # manually instead of directly at the mkosi package, otherwise we get ModuleNotFoundError when # trying to run a zipapp created from a packaged version of mkosi. While zipapp.create_archive() # supports a filter= argument, trying to use this within a site-packages directory is rather slow # so we copy the mkosi package to a temporary directory instead which is much faster. with ( tempfile.TemporaryDirectory(prefix="mkosi-zipapp-") as tmp, resource_path(sys.modules[__package__ or __name__]) as module, ): copy_tree(module, Path(tmp) / module.name, sandbox=config.sandbox) zipapp.create_archive( source=tmp, target=Path(d) / "mkosi", main="mkosi.__main__:main", interpreter="/usr/bin/env python3", ) make_executable(Path(d) / "mkosi") mounts += ["--ro-bind", d, "/mkosi"] stack.enter_context(scopedenv({"PATH": f"/mkosi:{os.environ['PATH']}"})) run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | env, log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir", *mounts], ), ) def run_shell(args: Args, config: Config) -> None: opname = "acquire shell in" if args.verb == Verb.shell else "boot" if config.output_format not in (OutputFormat.directory, OutputFormat.disk): die(f"Cannot {opname} {config.output_format} images with systemd-nspawn") if config.output_format.use_outer_compression() and config.compress_output: die(f"Cannot {opname} compressed {config.output_format} images with systemd-nspawn") cmdline: list[PathString] = ["systemd-nspawn", "--quiet", "--link-journal=no"] if config.runtime_network == Network.user: cmdline += ["--resolv-conf=auto"] elif config.runtime_network == Network.interface: cmdline += ["--private-network", "--network-veth"] elif config.runtime_network == Network.none: cmdline += ["--private-network"] # If we copied in a .nspawn file, make sure it's actually honoured if config.nspawn_settings: cmdline += ["--settings=trusted"] if args.verb == Verb.boot: cmdline += ["--boot"] else: cmdline += [ f"--rlimit=RLIMIT_CORE={format_rlimit(resource.RLIMIT_CORE)}", "--console=autopipe", ] # Underscores are not allowed in machine names so replace them with hyphens. name = config.machine_or_name().replace("_", "-") cmdline += ["--machine", name] for k, v in finalize_credentials(config).items(): cmdline += [f"--set-credential={k}:{v}"] cmdline += ["--register", yes_no(finalize_register(config))] with contextlib.ExitStack() as stack: # Make sure the latest nspawn settings are always used. if config.nspawn_settings: if not (config.output_dir_or_cwd() / f"{name}.nspawn").exists(): stack.callback( lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True) ) shutil.copy2(config.nspawn_settings, config.output_dir_or_cwd() / f"{name}.nspawn") # If we're booting a directory image that wasn't built by root, we always make an ephemeral # copy to avoid ending up with files not owned by the directory image owner in the # directory image. if config.ephemeral or ( config.output_format == OutputFormat.directory and args.verb == Verb.boot and (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 ): fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: fname = stack.enter_context(flock_or_die(config.output_dir_or_cwd() / config.output)) if config.output_format == OutputFormat.disk and args.verb == Verb.boot: run( [ "systemd-repart", "--image", workdir(fname), *([f"--size={config.runtime_size}"] if config.runtime_size else []), "--no-pager", "--dry-run=no", "--offline=no", "--pretty=no", workdir(fname), ], stdin=sys.stdin, env=config.environment, sandbox=config.sandbox( network=True, devices=True, options=["--bind", fname, workdir(fname)], setup=become_root_cmd(), ), ) # fmt: skip if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: # Let's allow running a shell in a non-ephemeral image but in that case only map a # single user into the image so it can't get polluted with files or directories # owned by other users. if ( args.verb == Verb.shell and config.output_format == OutputFormat.directory and not config.ephemeral ): range = 1 else: range = 65536 cmdline += [f"--private-users={owner}:{range}"] else: cmdline += ["--image", fname] if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") uidmap = "rootidmap" if src.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{src}:{dst}:norbind,{uidmap}"] if config.build_dir: uidmap = "rootidmap" if config.build_dir.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{config.build_dir}:/work/build:norbind,{uidmap}"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") # We add norbind because very often RuntimeTrees= will be used to mount the source # directory into the container and the output directory from which we're running will # very likely be a subdirectory of the source directory which would mean we'd be # mounting the container root directory as a subdirectory in itself which tends to lead # to all kinds of weird issues, which we avoid by not doing a recursive mount which # means the container root directory mounts will be skipped. uidmap = "rootidmap" if tree.source.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{tree.source}:{target}:norbind,{uidmap}"] if config.runtime_home and (path := current_home_dir()): uidmap = "rootidmap" if path.stat().st_uid != 0 else "noidmap" cmdline += ["--bind", f"{path}:/root:norbind,{uidmap}"] if config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk ): scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp")) os.chmod(scratch, 0o1777) cmdline += ["--bind", f"{scratch}:/var/tmp"] if args.verb == Verb.boot and config.forward_journal: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: addr = ( Path(os.getenv("TMPDIR", "/tmp")) / f"mkosi-journal-remote-unix-{uuid.uuid4().hex[:16]}" ) sock.bind(os.fspath(addr)) sock.listen() if config.output_format == OutputFormat.directory and (stat := os.stat(fname)).st_uid != 0: os.chown(addr, stat.st_uid, stat.st_gid) stack.enter_context(start_journal_remote(config, sock.fileno())) cmdline += [ "--bind", f"{addr}:/run/host/journal/socket", "--set-credential=journal.forward_to_socket:/run/host/journal/socket", ] # fmt: skip for p in config.unit_properties: cmdline += ["--property", p] if args.verb == Verb.boot: # Add nspawn options first since systemd-nspawn ignores all options after the first argument. argv = args.cmdline # When invoked by the kernel, all unknown arguments are passed as environment variables # to pid1. Let's mimic the same behavior when we invoke nspawn as a container. for arg in itertools.chain( config.kernel_command_line, finalize_kernel_command_line_extra(config), ): name, sep, value = arg.partition("=") # If there's a '.' in the argument name, it's not considered an environment # variable by the kernel. if sep and "." not in name: cmdline += ["--setenv", f"{name.replace('-', '_')}={value}"] else: # kernel cmdline config of the form systemd.xxx= get interpreted by systemd # when running in nspawn as well. argv += [arg] cmdline += argv elif args.cmdline: cmdline += ["--"] cmdline += args.cmdline run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir"], setup=become_root_cmd(), ), ) def run_systemd_tool(tool: str, args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.directory): die(f"{config.output_format} images cannot be inspected with {tool}") if ( args.verb in (Verb.journalctl, Verb.coredumpctl) and config.output_format == OutputFormat.disk and os.getuid() != 0 ): need_root = True else: need_root = False if (tool_path := config.find_binary(tool)) is None: die(f"Failed to find {tool}") if config.ephemeral: die(f"Images booted in ephemeral mode cannot be inspected with {tool}") if not (output := config.output_dir_or_cwd() / config.output).exists(): die( f"Output {output} does not exist, cannot inspect with {tool}", hint=f"Build and boot the image first before inspecting it with {tool}", ) run( [tool_path, "--root" if output.is_dir() else "--image", output, *args.cmdline], stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( network=True, devices=config.output_format == OutputFormat.disk, relaxed=True, setup=become_root_cmd() if need_root else [], ), ) def run_journalctl(args: Args, config: Config) -> None: run_systemd_tool("journalctl", args, config) def run_coredumpctl(args: Args, config: Config) -> None: run_systemd_tool("coredumpctl", args, config) def run_serve(args: Args, config: Config) -> None: """Serve the output directory via a tiny HTTP server""" run( [python_binary(config), "-m", "http.server", "8081"], stdin=sys.stdin, stdout=sys.stdout, sandbox=config.sandbox( network=True, relaxed=True, options=["--chdir", config.output_dir_or_cwd()], ), ) def generate_key_cert_pair(args: Args) -> None: """Generate a private key and accompanying X509 certificate using openssl""" keylength = 2048 expiration_date = datetime.date.today() + datetime.timedelta(int(args.genkey_valid_days)) cn = expand_specifier(args.genkey_common_name) for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: die( f"{f} already exists", hint="To generate new keys, first remove mkosi.key and mkosi.crt", ) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( textwrap.dedent( f""" The keys will expire in {args.genkey_valid_days} days ({expiration_date:%A %d. %B %Y}). Remember to roll them over to new ones before then. """ ) ) run( [ "openssl", "req", "-new", "-x509", "-newkey", f"rsa:{keylength}", "-keyout", "mkosi.key", "-out", "mkosi.crt", "-days", str(args.genkey_valid_days), "-subj", f"/CN={cn}/", "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), ) # fmt: skip def bump_image_version() -> None: """Write current image version plus one to mkosi.version""" version_file = Path("mkosi.version") if not version_file.exists(): die(f"Cannot bump image version, '{version_file}' not found") if os.access(version_file, os.X_OK): die(f"Cannot bump image version, '{version_file}' is executable") version = version_file.read_text().strip() v = version.split(".") try: v[-1] = str(int(v[-1]) + 1) except ValueError: v += ["2"] logging.warning("Last component of current version is not a decimal integer, appending '.2'") new_version = ".".join(v) logging.info(f"Bumping version: '{version}' → '{new_version}'") version_file.write_text(f"{new_version}\n") def expand_specifier(s: str) -> str: return s.replace("%u", INVOKING_USER.name()) def finalize_default_tools(config: Config, *, resources: Path) -> Config: if not config.tools_tree_distribution: die( f"{config.distribution} does not have a default tools tree distribution", hint="use ToolsTreeDistribution= to set one explicitly", ) cmdline = [ "--directory", "", "--distribution", str(config.tools_tree_distribution), *(["--release", config.tools_tree_release] if config.tools_tree_release else []), *(["--mirror", config.tools_tree_mirror] if config.tools_tree_mirror else []), *([f"--repositories={repository}" for repository in config.tools_tree_repositories]), *([f"--sandbox-tree={tree}" for tree in config.tools_tree_sandbox_trees]), "--repository-key-check", str(config.repository_key_check), "--repository-key-fetch", str(config.repository_key_fetch), "--cache-only", str(config.cacheonly), *(["--output-directory", str(config.output_dir)] if config.output_dir else []), *(["--workspace-directory", str(config.workspace_dir)] if config.workspace_dir else []), *(["--cache-directory", str(config.cache_dir)] if config.cache_dir else []), *(["--package-cache-directory", str(config.package_cache_dir)] if config.package_cache_dir else []), "--incremental", str(config.incremental), *([f"--package={package}" for package in config.tools_tree_packages]), *([f"--package-directory={directory}" for directory in config.tools_tree_package_directories]), "--output=tools", *(["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else []), # noqa: E501 *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *(["--proxy-url", config.proxy_url] if config.proxy_url else []), *([f"--proxy-exclude={host}" for host in config.proxy_exclude]), *(["--proxy-peer-certificate", str(p)] if (p := config.proxy_peer_certificate) else []), *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), ] # fmt: skip _, [tools] = parse_config( cmdline + ["--include=mkosi-tools", "build"], resources=resources, ) tools = dataclasses.replace(tools, image="tools") return tools def check_workspace_directory(config: Config) -> None: wd = config.workspace_dir_or_default() for tree in config.build_sources: if wd.is_relative_to(tree.source): die( f"The workspace directory ({wd}) cannot be a subdirectory of " f"any source directory ({tree.source})", hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure " "a different workspace directory", ) def run_clean_scripts(config: Config) -> None: if not config.clean_scripts: return for script in config.clean_scripts: if not os.access(script, os.X_OK): die(f"{script} is not executable") env = dict( DISTRIBUTION=str(config.distribution), RELEASE=config.release, ARCHITECTURE=str(config.architecture), DISTRIBUTION_ARCHITECTURE=config.distribution.architecture(config.architecture), SRCDIR="/work/src", OUTPUTDIR="/work/out", MKOSI_UID=str(os.getuid()), MKOSI_GID=str(os.getgid()), MKOSI_CONFIG="/work/config.json", ) if config.profiles: env["PROFILES"] = " ".join(config.profiles) with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, ): for script in config.clean_scripts: with complete_step(f"Running clean script {script}…"): run( ["/work/clean"], env=env | config.environment, sandbox=config.sandbox( tools=False, options=[ "--dir", "/work/src", "--chdir", "/work/src", "--dir", "/work/out", "--ro-bind", script, "/work/clean", "--ro-bind", json, "/work/config.json", *(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), # noqa: E501 *sources, ], ), stdin=sys.stdin, ) # fmt: skip def validate_certificates_and_keys(config: Config) -> None: keyutil = config.find_binary("systemd-keyutil", "/usr/lib/systemd/systemd-keyutil") if not keyutil: return if config.verity != ConfigFeature.disabled and config.verity_certificate and config.verity_key: run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.verity_certificate, certificate_source=config.verity_certificate_source, key=config.verity_key, key_source=config.verity_key_source, stdout=subprocess.DEVNULL, ) if ( config.bootable != ConfigFeature.disabled and config.secure_boot and config.secure_boot_certificate and config.secure_boot_key ): run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.secure_boot_certificate, certificate_source=config.secure_boot_certificate_source, key=config.secure_boot_key, key_source=config.secure_boot_key_source, stdout=subprocess.DEVNULL, ) if ( config.bootable != ConfigFeature.disabled and config.sign_expected_pcr != ConfigFeature.disabled and config.sign_expected_pcr_certificate and config.sign_expected_pcr_key ): run_systemd_sign_tool( config, cmdline=[keyutil, "validate"], options=[], certificate=config.sign_expected_pcr_certificate, certificate_source=config.sign_expected_pcr_certificate_source, key=config.sign_expected_pcr_key, key_source=config.sign_expected_pcr_key_source, stdout=subprocess.DEVNULL, ) def needs_build(args: Args, config: Config, force: int = 1) -> bool: return ( args.force >= force or not (config.output_dir_or_cwd() / config.output_with_compression).exists() # When the output is a directory, its name is the same as the symlink we create that points to the # actual output when not building a directory. So if the full output path exists, we have to check # that it's not a symlink as well. or (config.output_dir_or_cwd() / config.output_with_compression).is_symlink() ) def remove_cache_entries(config: Config, *, extra: Sequence[Path] = ()) -> None: if not config.cache_dir: return sandbox = functools.partial(config.sandbox, tools=False) if any(p.exists() for p in itertools.chain(cache_tree_paths(config), extra)): with complete_step(f"Removing cache entries of {config.name()} image…"): rmtree( *(p for p in itertools.chain(cache_tree_paths(config), extra) if p.exists()), sandbox=sandbox, ) def run_clean(args: Args, config: Config) -> None: # We remove any cached images if either the user used --force twice, or he/she called "clean" # with it passed once. Let's also remove the downloaded package cache if the user specified one # additional "--force". # We don't want to require a tools tree to run mkosi clean so we pass in a sandbox that # disables use of the tools tree. We still need a sandbox as we need to acquire privileges to # be able to remove various files from the rootfs. sandbox = functools.partial(config.sandbox, tools=False) if args.verb == Verb.clean: remove_output_dir = config.output_format != OutputFormat.none remove_build_cache = args.force > 0 or args.wipe_build_dir remove_image_cache = args.force > 0 remove_package_cache = args.force > 1 else: remove_output_dir = config.output_format != OutputFormat.none and args.force > 0 remove_build_cache = args.force > 1 or args.wipe_build_dir remove_image_cache = args.force > 1 or not have_cache(config) remove_package_cache = args.force > 2 if remove_output_dir: outputs = { config.output_dir_or_cwd() / output for output in config.outputs if ( (config.output_dir_or_cwd() / output).exists() or (config.output_dir_or_cwd() / output).is_symlink() ) } # Make sure we resolve the symlink we create in the output directory and remove its target # as well as it might not be in the list of outputs anymore if the compression or output # format was changed. outputs |= {o.resolve() for o in outputs} if outputs: with ( complete_step(f"Removing output files of {config.name()} image…"), flock_or_die(config.output_dir_or_cwd() / config.output) if (config.output_dir_or_cwd() / config.output).exists() else contextlib.nullcontext(), ): rmtree(*outputs, sandbox=sandbox) run_clean_scripts(config) if ( remove_build_cache and config.build_dir and config.build_dir.exists() and any(config.build_dir.iterdir()) ): with complete_step(f"Clearing out build directory of {config.name()} image…"): rmtree(*config.build_dir.iterdir(), sandbox=sandbox) if remove_image_cache and config.cache_dir: extra = [keyring_cache(config), metadata_cache(config)] if not config.image else [] remove_cache_entries(config, extra=extra) if remove_package_cache and any(config.package_cache_dir_or_default().glob("*")): subdir = config.distribution.package_manager(config).subdir(config) with ( complete_step(f"Clearing out package cache of {config.name()} image…"), lock_repository_metadata(config), ): rmtree( *(config.package_cache_dir_or_default() / d / subdir for d in ("cache", "lib")), sandbox=sandbox, ) def ensure_directories_exist(config: Config) -> None: for p in ( config.output_dir, config.cache_dir, config.package_cache_dir_or_default(), config.build_dir, config.workspace_dir, ): if not p or p.exists(): continue p.mkdir(parents=True, exist_ok=True) if config.build_dir: st = config.build_dir.stat() # Discard setuid/setgid bits if set as these are inherited and can leak into the image. if stat.S_IMODE(st.st_mode) & (stat.S_ISGID | stat.S_ISUID): config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) def sync_repository_metadata( args: Args, images: Sequence[Config], *, resources: Path, keyring_dir: Path, metadata_dir: Path, ) -> None: last = images[-1] # If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, # reuse the metadata cache. if ( last.incremental and keyring_cache(last).exists() and metadata_cache(last).exists() and last.cacheonly != Cacheonly.never and any(have_cache(config) for config in images) ): if any(keyring_cache(last).iterdir()): with complete_step("Copying cached package manager keyring"): copy_tree( keyring_cache(last), keyring_dir, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox, ) with complete_step("Copying cached package manager metadata"): copy_tree( metadata_cache(last), metadata_dir, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox, ) return subdir = last.distribution.package_manager(last).subdir(last) for d in ("cache", "lib"): (last.package_cache_dir_or_default() / d / subdir).mkdir(parents=True, exist_ok=True) # Sync repository metadata unless explicitly disabled. if last.cacheonly not in (Cacheonly.always, Cacheonly.metadata): with setup_workspace(args, last) as workspace: context = Context( args, last, workspace=workspace, resources=resources, keyring_dir=keyring_dir, metadata_dir=last.package_cache_dir_or_default(), ) context.root.mkdir(mode=0o755) install_sandbox_trees(context.config, context.sandbox_tree) context.config.distribution.setup(context) context.config.distribution.keyring(context) with complete_step("Syncing package manager metadata"), lock_repository_metadata(last): context.config.distribution.package_manager(context.config).sync( context, force=context.args.force > 1 or context.config.cacheonly == Cacheonly.never, ) src = last.package_cache_dir_or_default() / "cache" / subdir for p in last.distribution.package_manager(last).cache_subdirs(src): p.mkdir(parents=True, exist_ok=True) # If we're in incremental mode and caching metadata is not explicitly disabled, cache the keyring and the # synced repository metadata so we can reuse them later. if last.incremental and last.cacheonly != Cacheonly.never: rmtree(keyring_cache(last), metadata_cache(last), sandbox=last.sandbox) for p in (keyring_cache(last), metadata_cache(last)): make_tree(p, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox) copy_tree(keyring_dir, keyring_cache(last), use_subvolumes=last.use_subvolumes, sandbox=last.sandbox) copy_repository_metadata(last, metadata_cache(last)) copy_tree( metadata_cache(last), metadata_dir, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox, ) else: copy_repository_metadata(last, metadata_dir) def run_build( args: Args, config: Config, *, resources: Path, keyring_dir: Path, metadata_dir: Path, package_dir: Optional[Path] = None, ) -> None: if os.getuid() != 0: acquire_privileges() unshare(CLONE_NEWNS) if os.getuid() == 0: mount("", "/", "", MS_SLAVE | MS_REC, "") # For extra safety when running as root, remount a bunch of directories read-only unless the output # directory is located in it. if os.getuid() == 0: remount = ["/etc", "/opt", "/boot", "/efi", "/media", "/usr"] for d in remount: if not Path(d).exists(): continue if any( p and p.is_relative_to(d) for p in ( config.workspace_dir_or_default(), config.package_cache_dir_or_default(), config.cache_dir, config.output_dir_or_cwd(), ) ): continue attrs = MOUNT_ATTR_RDONLY if d not in ("/usr", "/opt"): attrs |= MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | MOUNT_ATTR_NOEXEC mount_rbind(d, d, attrs) with ( complete_step(f"Building {config.name()} image"), setup_workspace(args, config) as workspace, ): build_image( Context( args, config, workspace=workspace, resources=resources, keyring_dir=keyring_dir, metadata_dir=metadata_dir, package_dir=package_dir, ) ) def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: images = list(images) if args.verb == Verb.completion: return print_completion(args, resources=resources) if args.verb == Verb.documentation: if args.cmdline: manual = { "addon": "mkosi-addon", "initrd": "mkosi-initrd", "sandbox": "mkosi-sandbox", "news": "mkosi.news", }.get(args.cmdline[0], args.cmdline[0]) else: manual = "mkosi" formats: list[DocFormat] = ( [args.doc_format] if args.doc_format != DocFormat.auto else DocFormat.all() ) chapter = {"mkosi.news": 7}.get(manual, 1) return show_docs(manual, formats, man_chapter=chapter, resources=resources, pager=args.pager) if args.verb == Verb.genkey: return generate_key_cert_pair(args) if args.verb == Verb.bump: return bump_image_version() if args.verb == Verb.dependencies: _, [deps] = parse_config( ["--directory", "", "--repositories", "", "--include=mkosi-tools", "build"], resources=resources, ) for p in deps.packages: print(p) return if all(config == Config.default() for config in images): die( "No configuration found", hint="Make sure mkosi is run from a directory with configuration files", ) if args.verb == Verb.summary: if args.json: text = json.dumps( {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True, ) else: text = "\n".join(summary(config) for config in images) page(text, args.pager) return if args.verb == Verb.cat_config: text = cat_config(images) page(text, args.pager) return last = images[-1] if (minversion := last.minimum_version) and minversion > __version__: die(f"mkosi {minversion} or newer is required by this configuration (found {__version__})") if not in_sandbox() and last.tools_tree and last.tools_tree == Path("default"): tools = finalize_default_tools(last, resources=resources) else: tools = None for i, config in enumerate(images): if in_sandbox(): tools_tree = None elif tools and config.tools_tree == Path("default"): tools_tree = tools.output_dir_or_cwd() / tools.output else: tools_tree = config.tools_tree images[i] = dataclasses.replace(config, tools_tree=tools_tree) # The images array has been modified so we need to reevaluate last again. last = images[-1] if args.verb == Verb.clean: if tools: run_clean(args, tools) for config in images: run_clean(args, config) if args.force > 0 and last.distribution != Distribution.custom: remove_cache_entries(finalize_default_initrd(last, tools=False, resources=resources)) rmtree(Path(".mkosi-private")) return if ( tools and ( not (tools.output_dir_or_cwd() / tools.output).exists() or (tools.incremental and not have_cache(tools)) ) and (args.verb != Verb.build or last.output_format == OutputFormat.none) and not args.force ): die( f"Default tools tree requested for image '{last.name()}' but it is out-of-date or has not been " "built yet", hint="Make sure to (re)build the image first with 'mkosi build' or use '--force'", ) # If we're doing an incremental build and the cache is not out of date, don't clean up the # tools tree so that we can reuse the previous one. if tools and ( not tools.incremental or ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) ): if tools.incremental == Incremental.strict: die( "Tools tree does not exist or is out-of-date but the strict incremental mode is enabled", hint="Build once with '-i yes' to update the tools tree", ) run_clean(args, tools) if tools and not (tools.output_dir_or_cwd() / tools.output).exists(): check_tools(tools, Verb.build) ensure_directories_exist(tools) with ( tempfile.TemporaryDirectory( dir=tools.workspace_dir_or_default(), prefix="mkosi-keyring-", ) as keyring_dir, tempfile.TemporaryDirectory( dir=tools.workspace_dir_or_default(), prefix="mkosi-metadata-", ) as metadata_dir, ): sync_repository_metadata( args, [tools], resources=resources, keyring_dir=Path(keyring_dir), metadata_dir=Path(metadata_dir), ) fork_and_wait( run_build, args, tools, resources=resources, keyring_dir=Path(keyring_dir), metadata_dir=Path(metadata_dir), ) if not args.verb.needs_build(): return { Verb.ssh: run_ssh, Verb.journalctl: run_journalctl, Verb.coredumpctl: run_coredumpctl, Verb.sandbox: run_sandbox, }[args.verb](args, last) output = last.output_dir_or_cwd() / last.output_with_compression if ( args.verb == Verb.build and not args.force and output.exists() and not output.is_symlink() and last.output_format != OutputFormat.none ): logging.info(f"Output path {output} exists already. (Use --force to rebuild.)") return if args.verb.needs_build(): if args.verb != Verb.build and not args.force and not output.exists(): die( f"Image '{last.name()}' has not been built yet", hint="Make sure to build the image first with 'mkosi build' or use '--force'", ) if not last.repart_offline and os.getuid() != 0: die(f"Must be root to build {last.name()} image configured with RepartOffline=no") check_workspace_directory(last) if last.incremental == Incremental.strict: if args.force > 1: die( "Cannot remove incremental caches when building with Incremental=strict", hint="Build once with '-i yes' to update the image cache", ) for config in images: if have_cache(config): continue die( f"Strict incremental mode is enabled and cache for image {config.name()} is out-of-date", hint="Build once with '-i yes' to update the image cache", ) # First, process all directory removals because otherwise if different images share directories # a later image build could end up deleting the output generated by an earlier image build. if args.verb.needs_build() and (needs_build(args, last) or args.wipe_build_dir): for config in images: run_clean(args, config) if last.distribution != Distribution.custom: initrd = finalize_default_initrd(last, resources=resources) if args.force > 1 or not have_cache(initrd): remove_cache_entries(initrd) for i, config in enumerate(images): if args.verb != Verb.build: check_tools(config, args.verb) images[i] = config = run_configure_scripts(config) # The images array has been modified so we need to reevaluate last again. # Also ensure that all other images are reordered in case their dependencies were modified. last = images[-1] if not have_history(args): images = resolve_deps(images[:-1], last.dependencies) + [last] if not (last.output_dir_or_cwd() / last.output).exists() or last.output_format == OutputFormat.none: for config in images: if any( source.type != KeySourceType.file for source in ( config.verity_key_source, config.secure_boot_key_source, config.sign_expected_pcr_key_source, ) ): join_new_session_keyring() break with complete_step("Validating certificates and keys"): for config in images: validate_certificates_and_keys(config) ensure_directories_exist(last) with ( tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-keyring-", ) as keyring_dir, tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-metadata-", ) as metadata_dir, tempfile.TemporaryDirectory( dir=last.workspace_dir_or_default(), prefix="mkosi-packages-", ) as package_dir, ): sync_repository_metadata( args, images, resources=resources, keyring_dir=Path(keyring_dir), metadata_dir=Path(metadata_dir), ) for config in images: run_sync_scripts(config) for config in images: # If the output format is "none" and there are no build scripts, there's nothing to # do so exit early. if config.output_format == OutputFormat.none and not config.build_scripts: continue check_tools(config, Verb.build) check_inputs(config) ensure_directories_exist(config) fork_and_wait( run_build, args, config, resources=resources, keyring_dir=Path(keyring_dir), metadata_dir=Path(metadata_dir), package_dir=Path(package_dir), ) if args.auto_bump: bump_image_version() if last.history: Path(".mkosi-private/history").mkdir(parents=True, exist_ok=True) Path(".mkosi-private/history/latest.json").write_text(last.to_json()) if args.verb == Verb.build: return if ( last.output_format == OutputFormat.directory and (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 and os.getuid() != 0 ): die( "Cannot operate on directory images built as root when running unprivileged", hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image", ) run_vm = { Vmm.qemu: run_qemu, Vmm.vmspawn: run_vmspawn, }[last.vmm] { Verb.shell: run_shell, Verb.boot: run_shell, Verb.vm: run_vm, Verb.qemu: run_vm, Verb.serve: run_serve, Verb.burn: run_burn, Verb.sysupdate: run_sysupdate, }[args.verb](args, last) mkosi-25.3/mkosi/__main__.py000066400000000000000000000021411474711424400160070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # PYTHON_ARGCOMPLETE_OK import faulthandler import signal import sys from types import FrameType from typing import Optional import mkosi.resources from mkosi import run_verb from mkosi.config import parse_config from mkosi.log import log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.util import resource_path def onsignal(signal: int, frame: Optional[FrameType]) -> None: raise KeyboardInterrupt() @uncaught_exception_handler() def main() -> None: signal.signal(signal.SIGTERM, onsignal) signal.signal(signal.SIGHUP, onsignal) log_setup() with resource_path(mkosi.resources) as resources: args, images = parse_config(sys.argv[1:], resources=resources) if args.debug: faulthandler.enable() try: run_verb(args, images, resources=resources) finally: if sys.stderr.isatty() and find_binary("tput"): run(["tput", "cnorm"], check=False) run(["tput", "smam"], check=False) if __name__ == "__main__": main() mkosi-25.3/mkosi/addon.py000066400000000000000000000045631474711424400153660ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import os import sys import tempfile from pathlib import Path import mkosi.resources from mkosi.config import DocFormat from mkosi.documentation import show_docs from mkosi.initrd import include_system_config, initrd_common_args, initrd_finalize, process_crypttab from mkosi.log import log_setup from mkosi.run import run, uncaught_exception_handler from mkosi.util import PathString, resource_path @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( prog="mkosi-addon", description="Build initrd/cmdline/ucode addon for the current system using mkosi", allow_abbrev=False, usage="mkosi-addon [options...]", ) parser.add_argument( "-o", "--output", metavar="NAME", help="Output name", default="mkosi-local.addon.efi", ) initrd_common_args(parser) args = parser.parse_args() if args.show_documentation: with resource_path(mkosi.resources) as r: show_docs("mkosi-addon", DocFormat.all(), resources=r) return with tempfile.TemporaryDirectory() as staging_dir: cmdline: list[PathString] = [ "mkosi", "--force", "--directory", "", "--output", args.output, "--output-directory", staging_dir, "--build-sources", "", "--include=mkosi-addon", "--extra-tree", f"/usr/lib/modules/{args.kernel_version}:/usr/lib/modules/{args.kernel_version}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", "--kernel-modules-exclude=.*", ] # fmt: skip if args.debug: cmdline += ["--debug"] if args.debug_shell: cmdline += ["--debug-shell"] if os.getuid() == 0: cmdline += [ "--workspace-dir=/var/tmp", "--output-mode=600", ] cmdline += include_system_config("mkosi-addon") cmdline += process_crypttab(staging_dir) if Path("/etc/kernel/cmdline").exists(): cmdline += ["--kernel-command-line", Path("/etc/kernel/cmdline").read_text()] run(cmdline, stdin=sys.stdin, stdout=sys.stdout) initrd_finalize(staging_dir, args.output, args.output_dir) if __name__ == "__main__": main() mkosi-25.3/mkosi/archive.py000066400000000000000000000104161474711424400157140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os from collections.abc import Iterable, Sequence from pathlib import Path from typing import Optional from mkosi.log import log_step from mkosi.run import SandboxProtocol, finalize_passwd_symlinks, nosandbox, run, workdir from mkosi.sandbox import umask from mkosi.util import PathString, chdir def tar_exclude_apivfs_tmp() -> list[str]: return [ "--exclude", "./dev/*", "--exclude", "./proc/*", "--exclude", "./sys/*", "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", ] # fmt: skip def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None: log_step(f"Creating tar archive {dst}…") with dst.open("wb") as f: run( [ "tar", "--create", "--file", "-", "--directory", workdir(src, sandbox), "--acls", "--selinux", # --xattrs implies --format=pax "--xattrs", # PAX format emits additional headers for atime, ctime and mtime # that would make the archive non-reproducible. "--pax-option=delete=atime,delete=ctime,delete=mtime", "--sparse", "--force-local", *(["--owner=root:0"] if os.getuid() != 0 else []), *(["--group=root:0"] if os.getuid() != 0 else []), *tar_exclude_apivfs_tmp(), ".", ], stdout=f, # Make sure tar uses user/group information from the root directory instead of the host. sandbox=sandbox( options=[ "--ro-bind", src, workdir(src, sandbox), *finalize_passwd_symlinks(workdir(src, sandbox)), ], ), ) # fmt: skip def can_extract_tar(src: Path) -> bool: return ".tar" in src.suffixes[-2:] def extract_tar( src: Path, dst: Path, *, log: bool = True, dirs: Sequence[PathString] = (), options: Sequence[PathString] = (), sandbox: SandboxProtocol = nosandbox, ) -> None: if log: log_step(f"Extracting tar archive {src}…") with umask(~0o755): dst.mkdir(exist_ok=True) run( [ "tar", "--extract", "--file", workdir(src, sandbox), "--directory", workdir(dst, sandbox), "--keep-directory-symlink", "--no-overwrite-dir", "--same-permissions", "--same-owner" if (dst / "etc/passwd").exists() and os.getuid() == 0 else "--numeric-owner", "--same-order", "--acls", "--selinux", "--xattrs", "--force-local", *tar_exclude_apivfs_tmp(), *options, *dirs, ], sandbox=sandbox( # Make sure tar uses user/group information from the root directory instead of the host. options=[ "--ro-bind", src, workdir(src, sandbox), "--bind", dst, workdir(dst, sandbox), *finalize_passwd_symlinks(workdir(dst, sandbox)), ], ), ) # fmt: skip def make_cpio( src: Path, dst: Path, *, files: Optional[Iterable[Path]] = None, sandbox: SandboxProtocol = nosandbox, ) -> None: if not files: with chdir(src): files = sorted(Path(".").rglob("*")) else: files = sorted(files) log_step(f"Creating cpio archive {dst}…") with dst.open("wb") as f: run( [ "cpio", "--create", "--reproducible", "--renumber-inodes", "--null", "--format=newc", "--quiet", "--directory", workdir(src, sandbox), *(["--owner=0:0"] if os.getuid() != 0 else []), ], input="\0".join(os.fspath(f) for f in files), stdout=f, sandbox=sandbox( options=[ "--ro-bind", src, workdir(src, sandbox), *finalize_passwd_symlinks(workdir(src, sandbox)) ], ), ) # fmt: skip mkosi-25.3/mkosi/backport.py000066400000000000000000000051671474711424400161070ustar00rootroot00000000000000# SPDX-License-Identifier: PSF-2.0 # Copied from https://github.com/python/cpython/blob/main/Lib/importlib/resources/_common.py # We backport as_file() from python 3.12 here temporarily since it added directory support. # TODO: Remove once minimum python version is 3.12. import contextlib import functools import os import tempfile from pathlib import Path from typing import no_type_check @no_type_check @contextlib.contextmanager def _tempfile( reader, suffix="", # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, _os_remove=os.remove, ): # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try' # blocks due to the need to close the temporary file to work on Windows # properly. fd, raw_path = tempfile.mkstemp(suffix=suffix) try: try: os.write(fd, reader()) finally: os.close(fd) yield Path(raw_path) finally: try: _os_remove(raw_path) except FileNotFoundError: pass @no_type_check def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) @no_type_check def _is_present_dir(path) -> bool: """ Some Traversables implement ``is_dir()`` to raise an exception (i.e. ``FileNotFoundError``) when the directory doesn't exist. This function wraps that call to always return a boolean and only return True if there's a dir and it exists. """ with contextlib.suppress(FileNotFoundError): return path.is_dir() return False @no_type_check @functools.singledispatch def as_file(path): """ Given a Traversable object, return that object as a path on the local file system in a context manager. """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) @no_type_check @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): """ Wrap tempfile.TemporyDirectory to return a pathlib object. """ with dir as result: yield Path(result) @no_type_check @contextlib.contextmanager def _temp_dir(path): """ Given a traversable dir, recursively replicate the whole tree to the file system in a context manager. """ assert path.is_dir() with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) @no_type_check def _write_contents(target, source): child = target.joinpath(source.name) if source.is_dir(): child.mkdir() for item in source.iterdir(): _write_contents(child, item) else: child.write_bytes(source.read_bytes()) return child mkosi-25.3/mkosi/bootloader.py000066400000000000000000000775611474711424400164430ustar00rootroot00000000000000import itertools import logging import shutil import subprocess import sys import tempfile import textwrap from collections.abc import Iterator, Mapping, Sequence from pathlib import Path from typing import Optional from mkosi.config import ( BiosBootloader, Bootloader, CertificateSource, CertificateSourceType, Config, ConfigFeature, KeySource, KeySourceType, OutputFormat, SecureBootSignTool, ShimBootloader, systemd_tool_version, ) from mkosi.context import Context from mkosi.distributions import Distribution from mkosi.log import complete_step, die, log_step from mkosi.partition import Partition from mkosi.qemu import KernelType from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.util import _FILE, PathString, flatten from mkosi.versioncomp import GenericVersion def want_efi(config: Config) -> bool: # Do we want to make the image bootable on EFI firmware? # Note that this returns True also in the case where autodetection might later cause the system to not be # made bootable on EFI firmware after the filesystem has been populated. if config.output_format in (OutputFormat.uki, OutputFormat.esp): return True if config.bootable == ConfigFeature.disabled: return False if config.bootloader == Bootloader.none: return False if ( config.output_format == OutputFormat.cpio or config.output_format.is_extension_or_portable_image() or config.overlay ) and config.bootable == ConfigFeature.auto: return False if config.architecture.to_efi() is None: if config.bootable == ConfigFeature.enabled: die(f"Cannot make image bootable on UEFI on {config.architecture} architecture") return False return True def want_grub_efi(context: Context) -> bool: if not want_efi(context.config): return False if not context.config.bootloader.is_grub(): return False if not (arch := context.config.architecture.to_grub()): return False if not context.config.bootloader.is_signed(): have = find_grub_directory(context, target=f"{arch}-efi") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with grub was requested but grub for EFI is not installed") return True def want_grub_bios(context: Context, partitions: Sequence[Partition] = ()) -> bool: if context.config.bootable == ConfigFeature.disabled: return False if context.config.output_format != OutputFormat.disk: return False if context.config.bios_bootloader != BiosBootloader.grub: return False if context.config.overlay: return False have = find_grub_directory(context, target="i386-pc") is not None if not have and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but grub for BIOS is not installed") bios = any(p.type == Partition.GRUB_BOOT_PARTITION_UUID for p in partitions) if partitions and not bios and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no BIOS Boot Partition was configured") esp = any(p.type == "esp" for p in partitions) if partitions and not esp and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no ESP partition was configured") root = any(p.type.startswith("root") or p.type.startswith("usr") for p in partitions) if partitions and not root and context.config.bootable == ConfigFeature.enabled: die("A BIOS bootable image with grub was requested but no root or usr partition was configured") installed = True for binary in ("mkimage", "bios-setup"): if find_grub_binary(context.config, binary): continue if context.config.bootable == ConfigFeature.enabled: die(f"A BIOS bootable image with grub was requested but {binary} was not found") installed = False return (have and bios and esp and root and installed) if partitions else have def find_grub_directory(context: Context, *, target: str) -> Optional[Path]: for d in ("usr/lib/grub", "usr/share/grub2"): if (p := context.root / d / target).exists() and any(p.iterdir()): return p return None def find_grub_binary(config: Config, binary: str) -> Optional[Path]: assert "grub" not in binary # Debian has a bespoke setup where if only grub-pc-bin is installed, grub-bios-setup is installed in # /usr/lib/i386-pc instead of in /usr/bin. Let's take that into account and look for binaries in # /usr/lib/grub/i386-pc as well. return config.find_binary(f"grub-{binary}", f"grub2-{binary}", f"/usr/lib/grub/i386-pc/grub-{binary}") def prepare_grub_config(context: Context) -> Optional[Path]: config = context.root / "efi" / context.config.distribution.grub_prefix() / "grub.cfg" with umask(~0o700): config.parent.mkdir(exist_ok=True) # For some unknown reason, if we don't set the timeout to zero, grub never leaves its menu, so we default # to a zero timeout, but only if the config file hasn't been provided by the user. if not config.exists(): with umask(~0o600), config.open("w") as f: f.write("set timeout=0\n") if want_grub_efi(context): # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg # (except in openSUSE) in the ESP so let's put a shim there to redirect to the actual configuration # file. if context.config.distribution == Distribution.opensuse: earlyconfig = context.root / "efi/EFI/BOOT/grub.cfg" else: earlyconfig = context.root / "efi/EFI" / context.config.distribution.name / "grub.cfg" with umask(~0o700): earlyconfig.parent.mkdir(parents=True, exist_ok=True) # Read the actual config file from the root of the ESP. earlyconfig.write_text(f"configfile /{context.config.distribution.grub_prefix()}/grub.cfg\n") return config def grub_mkimage( context: Context, *, target: str, modules: Sequence[str] = (), output: Optional[Path] = None, sbat: Optional[Path] = None, ) -> None: mkimage = find_grub_binary(context.config, "mkimage") assert mkimage directory = find_grub_directory(context, target=target) assert directory with ( complete_step(f"Generating grub image for {target}"), tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig, ): earlyconfig.write( textwrap.dedent( f"""\ search --no-floppy --set=root --file /{context.config.distribution.grub_prefix()}/grub.cfg set prefix=($root)/{context.config.distribution.grub_prefix()} """ ) ) earlyconfig.flush() run( [ mkimage, "--directory", "/grub", "--config", workdir(Path(earlyconfig.name)), "--prefix", f"/{context.config.distribution.grub_prefix()}", "--output", workdir(output) if output else "/grub/core.img", "--format", target, *(["--sbat", str(workdir(sbat))] if sbat else []), *(["--disable-shim-lock"] if context.config.shim_bootloader == ShimBootloader.none else []), "cat", "cmp", "div", "echo", "fat", "hello", "help", *(["keylayouts"] if context.config.architecture.is_x86_variant() else []), "linux", "loadenv", "ls", "normal", "part_gpt", "read", "reboot", "search_fs_file", "search", "sleep", "test", "tr", "true", *modules, ], sandbox=context.sandbox( options=[ "--bind", directory, "/grub", "--ro-bind", earlyconfig.name, workdir(Path(earlyconfig.name)), *(["--bind", str(output.parent), str(workdir(output.parent))] if output else []), *(["--ro-bind", str(sbat), str(workdir(sbat))] if sbat else []), ], ), ) # fmt: skip def find_signed_grub_image(context: Context) -> Optional[Path]: arch = context.config.architecture.to_efi() patterns = [ f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS "usr/share/efi/*/grub.efi", # openSUSE ] for p in flatten(context.root.glob(pattern) for pattern in patterns): if p.is_symlink() and p.readlink().is_absolute(): logging.warning(f"Ignoring signed grub EFI binary which is an absolute path to {p.readlink()}") continue return p return None def python_binary(config: Config) -> PathString: # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools # tree, just use the default python3 interpreter. exe = Path(sys.executable) return "python3" if config.tools_tree or not exe.is_relative_to("/usr") else exe def extract_pe_section(context: Context, binary: Path, section: str, output: Path) -> Path: # When using a tools tree, we want to use the pefile module from the tools tree instead of requiring that # python-pefile is installed on the host. So we execute python as a subprocess to make sure we load # pefile from the tools tree if one is used. # TODO: Use ignore_padding=True instead of length once we can depend on a newer pefile. # TODO: Drop KeyError logic once we drop support for Ubuntu Jammy and sdmagic will always be available. # Misc_VirtualSize is the section size in memory, which can be bigger or smaller than SizeOfRawData, # which is the aligned section size on disk. The closest approximation of the actual section size will be # the minimum of these two. If Misc_VirtualSize < SizeOfRawData, we'll get the actual size. Otherwise # padding might be inclduded. pefile = textwrap.dedent( f"""\ import pefile import sys from pathlib import Path pe = pefile.PE("{workdir(binary)}", fast_load=True) section = {{s.Name.decode().strip("\\0"): s for s in pe.sections}}.get("{section}") if not section: sys.exit(67) sys.stdout.buffer.write( section.get_data(length=min(section.Misc_VirtualSize, section.SizeOfRawData)) ) """ ) with open(output, "wb") as f: result = run( [python_binary(context.config)], input=pefile, stdout=f, sandbox=context.sandbox( options=["--ro-bind", binary, workdir(binary)], ), success_exit_status=(0, 67), ) if result.returncode == 67: raise KeyError(f"{section} section not found in {binary}") return output def install_grub(context: Context) -> None: if not want_grub_bios(context) and not want_grub_efi(context): return if want_grub_bios(context): grub_mkimage(context, target="i386-pc", modules=("biosdisk",)) if want_grub_efi(context): if context.config.shim_bootloader != ShimBootloader.none: output = context.root / shim_second_stage_binary(context) else: output = context.root / efi_boot_binary(context) with umask(~0o700): output.parent.mkdir(parents=True, exist_ok=True) if context.config.bootloader.is_signed(): if not (signed := find_signed_grub_image(context)): if context.config.bootable == ConfigFeature.enabled: die("Couldn't find a signed grub EFI binary installed in the image") return rel = output.relative_to(context.root) log_step(f"Installing signed grub EFI binary from /{signed.relative_to(context.root)} to /{rel}") shutil.copy2(signed, output) else: if context.config.secure_boot and context.config.shim_bootloader != ShimBootloader.none: if not (signed := find_signed_grub_image(context)): die("Couldn't find a signed grub EFI binary installed in the image to extract SBAT from") sbat = extract_pe_section(context, signed, ".sbat", context.workspace / "sbat") else: sbat = None grub_mkimage( context, target=f"{context.config.architecture.to_grub()}-efi", output=output, modules=("chain",), sbat=sbat, ) if context.config.secure_boot: sign_efi_binary(context, output, output) dst = context.root / "efi" / context.config.distribution.grub_prefix() / "fonts" with umask(~0o700): dst.mkdir(parents=True, exist_ok=True) for d in ("grub", "grub2"): unicode = context.root / "usr/share" / d / "unicode.pf2" if unicode.exists(): shutil.copy2(unicode, dst) def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: if not want_grub_bios(context, partitions): return setup = find_grub_binary(context.config, "bios-setup") assert setup directory = find_grub_directory(context, target="i386-pc") assert directory with ( complete_step("Installing grub boot loader for BIOS…"), tempfile.NamedTemporaryFile(mode="w") as mountinfo, ): # grub-bios-setup insists on being able to open the root device that --directory is located on, which # needs root privileges. However, it only uses the root device when it is unable to embed itself in # the bios boot partition. To make installation work unprivileged, we trick grub to think that the # root device is our image by mounting over its /proc/self/mountinfo file (where it gets its # information from) with our own file correlating the root directory to our image file. mountinfo.write( f"1 0 1:1 / / - fat {workdir(context.staging / context.config.output_with_format)}\n" ) mountinfo.flush() run( [ setup, "--directory", "/grub", workdir(context.staging / context.config.output_with_format), ], sandbox=context.sandbox( options=[ "--bind", directory, "/grub", "--bind", context.staging, workdir(context.staging), "--bind", mountinfo.name, "/proc/self/mountinfo", ], ), ) # fmt: skip def efi_boot_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch return Path(f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI") def shim_second_stage_binary(context: Context) -> Path: arch = context.config.architecture.to_efi() assert arch if context.config.distribution == Distribution.opensuse: return Path("efi/EFI/BOOT/grub.EFI") else: return Path(f"efi/EFI/BOOT/grub{arch}.EFI") def run_systemd_sign_tool( config: Config, *, cmdline: Sequence[PathString], options: Sequence[PathString], certificate: Optional[Path], certificate_source: CertificateSource, key: Optional[Path], key_source: KeySource, env: Mapping[str, str] = {}, stdout: _FILE = None, devices: bool = False, ) -> CompletedProcess: if not certificate and not key: return run( cmdline, stdout=stdout, env={**config.environment, **env}, sandbox=config.sandbox(options=options, devices=devices), ) assert certificate assert key cmd: list[PathString] = [*cmdline] opt: list[PathString] = [*options] if certificate_source.type != CertificateSourceType.file or key_source.type != KeySourceType.file: opt += ["--bind", "/run", "/run"] if certificate_source.type != CertificateSourceType.file: cmd += ["--certificate-source", str(certificate_source)] if certificate.exists(): cmd += ["--certificate", workdir(certificate)] opt += ["--ro-bind", certificate, workdir(certificate)] else: cmd += ["--certificate", certificate] if key_source.type != KeySourceType.file: cmd += ["--private-key-source", str(key_source)] if key.exists(): cmd += ["--private-key", workdir(key)] opt += ["--ro-bind", key, workdir(key)] else: cmd += ["--private-key", key] return run( cmd, stdin=(sys.stdin if key_source.type != KeySourceType.file else subprocess.DEVNULL), stdout=stdout, env={**config.environment, **env}, sandbox=config.sandbox( options=opt, devices=( devices or key_source.type != KeySourceType.file or certificate_source.type != CertificateSourceType.file ), ), ) def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: assert context.config.secure_boot_key assert context.config.secure_boot_certificate sbsign = context.config.find_binary("systemd-sbsign", "/usr/lib/systemd/systemd-sbsign") if context.config.secure_boot_sign_tool == SecureBootSignTool.systemd_sbsign and not sbsign: die("Could not find systemd-sbsign") cmd: list[PathString] options: list[PathString] if context.config.secure_boot_sign_tool == SecureBootSignTool.systemd_sbsign or ( context.config.secure_boot_sign_tool == SecureBootSignTool.auto and sbsign ): assert sbsign run_systemd_sign_tool( context.config, cmdline=[sbsign, "sign", "--output", workdir(output), workdir(input)], options=["--ro-bind", input, workdir(input), "--bind", output.parent, workdir(output.parent)], certificate=context.config.secure_boot_certificate, certificate_source=context.config.secure_boot_certificate_source, key=context.config.secure_boot_key, key_source=context.config.secure_boot_key_source, ) elif ( context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or context.config.secure_boot_sign_tool == SecureBootSignTool.auto and context.config.find_binary("sbsign") is not None ): if context.config.secure_boot_certificate_source.type != CertificateSourceType.file: die("Secure boot certificate source must be 'file' when using sbsign as the signing tool") cmd = [ "sbsign", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(output), ] # fmt: skip options = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), # noqa: E501 "--ro-bind", input, workdir(input), "--bind", output.parent, workdir(output.parent), ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] options += ["--bind", "/run", "/run"] if context.config.secure_boot_key.exists(): cmd += ["--key", workdir(context.config.secure_boot_key)] options += ["--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key)] else: cmd += ["--key", context.config.secure_boot_key] cmd += [workdir(input)] run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), env=context.config.environment, sandbox=context.sandbox( options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) else: die("One of systemd-sbsign or sbsign is required to use SecureBoot=") return output def find_and_install_shim_binary( context: Context, name: str, signed: Sequence[str], unsigned: Sequence[str], output: Path, ) -> None: if context.config.shim_bootloader == ShimBootloader.signed: for pattern in signed: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning( f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}" ) continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name log_step(f"Installing signed {name} EFI binary from /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find signed {name} EFI binary installed in the image") else: for pattern in unsigned: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): logging.warning( f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}" ) continue rel = p.relative_to(context.root) if (context.root / output).is_dir(): output /= rel.name if context.config.secure_boot: log_step(f"Signing and installing unsigned {name} EFI binary from /{rel} to /{output}") sign_efi_binary(context, p, context.root / output) else: log_step(f"Installing unsigned {name} EFI binary /{rel} to /{output}") shutil.copy2(p, context.root / output) return if context.config.bootable == ConfigFeature.enabled: die(f"Couldn't find unsigned {name} EFI binary installed in the image") def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: if not (context.root / "usr/lib/modules").exists(): return for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), reverse=True, ): # Make sure we look for anything that remotely resembles vmlinuz, as the arch specific install # scripts in the kernel source tree sometimes do weird stuff. But let's make sure we're not returning # UKIs as the UKI on Fedora is named vmlinuz-virt.efi. Also look for uncompressed images (vmlinux) as # some architectures ship those. Prefer vmlinuz if both are present. for kimg in kver.glob("vmlinuz*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break else: for kimg in kver.glob("vmlinux*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg break def install_systemd_boot(context: Context) -> None: if not want_efi(context.config): return if not context.config.bootloader.is_systemd_boot(): return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return if not context.config.find_binary("bootctl"): if context.config.bootable == ConfigFeature.enabled: die("An EFI bootable image with systemd-boot was requested but bootctl was not found") return directory = context.root / "usr/lib/systemd/boot/efi" signed = context.config.bootloader.is_signed() if not directory.glob("*.efi.signed" if signed else "*.efi"): if context.config.bootable == ConfigFeature.enabled: die( f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" f"systemd-boot binary was not found at {directory.relative_to(context.root)}" ) return if context.config.secure_boot and not signed: with complete_step("Signing systemd-boot binaries…"): for input in itertools.chain(directory.glob("*.efi"), directory.glob("*.EFI")): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) cmd: list[PathString] = [ "bootctl", "install", "--root=/buildroot", "--install-source=image", "--all-architectures", "--no-variables", ] options: list[PathString] = ["--bind", context.root, "/buildroot"] bootctlver = systemd_tool_version("bootctl", sandbox=context.sandbox) if want_bootctl_auto_enroll := ( context.config.secure_boot and context.config.secure_boot_auto_enroll and bootctlver >= "257" ): cmd += ["--secure-boot-auto-enroll=yes"] with complete_step("Installing systemd-boot…"): run_systemd_sign_tool( context.config, cmdline=cmd, options=options, certificate=context.config.secure_boot_certificate if want_bootctl_auto_enroll else None, certificate_source=context.config.secure_boot_certificate_source, key=context.config.secure_boot_key if want_bootctl_auto_enroll else None, key_source=context.config.secure_boot_key_source, env={"SYSTEMD_ESP_PATH": "/efi", "SYSTEMD_XBOOTLDR_PATH": "/boot"}, ) # TODO: Use --random-seed=no when we can depend on systemd 256. Path(context.root / "efi/loader/random-seed").unlink(missing_ok=True) if context.config.shim_bootloader != ShimBootloader.none: shutil.copy2( context.root / f"efi/EFI/systemd/systemd-boot{context.config.architecture.to_efi()}.efi", context.root / shim_second_stage_binary(context), ) if context.config.secure_boot and context.config.secure_boot_auto_enroll and bootctlver < "257": assert context.config.secure_boot_key assert context.config.secure_boot_certificate with complete_step("Setting up secure boot auto-enrollment…"): keys = context.root / "efi/loader/keys/auto" with umask(~0o700): keys.mkdir(parents=True, exist_ok=True) # sbsiglist expects a DER certificate. with umask(~0o600): run( [ "openssl", "x509", "-outform", "DER", "-in", workdir(context.config.secure_boot_certificate), "-out", workdir(context.workspace / "mkosi.der"), ], sandbox=context.sandbox( options=[ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--bind", context.workspace, workdir(context.workspace), ], ), ) # fmt: skip with umask(~0o600): run( [ "sbsiglist", "--owner", "00000000-0000-0000-0000-000000000000", "--type", "x509", "--output", workdir(context.workspace / "mkosi.esl"), workdir(context.workspace / "mkosi.der"), ], sandbox=context.sandbox( options=[ "--bind", context.workspace, workdir(context.workspace), "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), # noqa: E501 ] ), ) # fmt: skip # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: with umask(~0o600): cmd = [ "sbvarsign", "--attr", "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(keys / f"{db}.auth"), ] # fmt: skip options = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), # noqa: E501 "--bind", keys, workdir(keys), ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] options += ["--bind", "/run", "/run"] if context.config.secure_boot_key.exists(): cmd += ["--key", workdir(context.config.secure_boot_key)] options += [ "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), # noqa: E501 ] # fmt: skip else: cmd += ["--key", context.config.secure_boot_key] cmd += [db, workdir(context.workspace / "mkosi.esl")] run( cmd, stdin=( sys.stdin if context.config.secure_boot_key_source.type != KeySourceType.file else subprocess.DEVNULL ), sandbox=context.sandbox( options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, ), ) def install_shim(context: Context) -> None: if not want_efi(context.config): return if context.config.shim_bootloader == ShimBootloader.none: return if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto: return dst = efi_boot_binary(context) with umask(~0o700): (context.root / dst).parent.mkdir(parents=True, exist_ok=True) arch = context.config.architecture.to_efi() signed = [ f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu f"usr/lib/shim/shim{arch}.efi.signed", # Debian f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS "usr/share/efi/*/shim.efi", # openSUSE ] unsigned = [ f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ f"usr/lib/shim/mm{arch}.efi.signed", # Debian f"usr/lib/shim/mm{arch}.efi", # Ubuntu f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS "usr/share/efi/*/MokManager.efi", # openSUSE ] unsigned = [ f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) mkosi-25.3/mkosi/burn.py000066400000000000000000000026021474711424400152370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import sys from mkosi.config import Args, Config, OutputFormat from mkosi.log import complete_step, die from mkosi.run import run from mkosi.user import become_root_cmd def run_burn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp): die(f"{config.output_format} images cannot be burned to disk") if not args.cmdline: die("Please specify a device to burn the image to", hint="For example /dev/disk/by-id/usb-foobar") fname = config.output_dir_or_cwd() / config.output if len(args.cmdline) != 1: die("Expected device argument.") cmd = [ "systemd-repart", "--no-pager", "--pretty=no", "--offline=yes", "--empty=force", "--dry-run=no", "--definitions=/", f"--copy-from={fname}", *args.cmdline, ] with complete_step("Burning 🔥🔥🔥 to medium…", "Burnt. 🔥🔥🔥"): run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, options=["--same-dir"], setup=become_root_cmd(), ), ) mkosi-25.3/mkosi/completion.py000066400000000000000000000204201474711424400164400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import dataclasses import enum import io import shlex from collections.abc import Iterable, Mapping from pathlib import Path from textwrap import indent from typing import Optional, Union from mkosi import config from mkosi.log import die from mkosi.util import StrEnum class CompGen(StrEnum): default = enum.auto() files = enum.auto() dirs = enum.auto() @staticmethod def from_action(action: argparse.Action) -> "CompGen": if isinstance(action.default, Path): if action.default.is_dir(): return CompGen.dirs else: return CompGen.files # TODO: the type of action.type is Union[Callable[[str], Any], FileType] # the type of Path is type, but Path also works in this position, # because the constructor is a callable from str -> Path elif action.type is not None and (isinstance(action.type, type) and issubclass(action.type, Path)): # type: ignore if isinstance(action.default, Path) and action.default.is_dir(): # type: ignore return CompGen.dirs else: return CompGen.files return CompGen.default def to_bash(self) -> str: return f"_mkosi_compgen_{self}" def to_fish(self) -> str: if self == CompGen.files: return "--force-files" elif self == CompGen.dirs: return "--force-files -a '(__fish_complete_directories)'" else: return "-f" def to_zsh(self) -> str: if self == CompGen.files: return ":path:_files -/" elif self == CompGen.dirs: return ":directory:_files -f" else: return "" @dataclasses.dataclass(frozen=True) class CompletionItem: short: Optional[str] long: Optional[str] help: Optional[str] nargs: Union[str, int] choices: list[str] compgen: CompGen def collect_completion_arguments() -> list[CompletionItem]: parser = config.create_argument_parser() options = [ CompletionItem( short=next((s for s in action.option_strings if not s.startswith("--")), None), long=next((s for s in action.option_strings if s.startswith("--")), None), help=action.help, nargs=action.nargs or 0, choices=[str(c) for c in action.choices] if action.choices is not None else [], compgen=CompGen.from_action(action), ) for action in parser._actions if ( action.option_strings and action.help != argparse.SUPPRESS and action.dest not in config.SETTINGS_LOOKUP_BY_DEST ) ] options += [ CompletionItem( short=setting.short, long=setting.long, help=setting.help, nargs=setting.nargs or 1, choices=[str(c) for c in setting.choices] if setting.choices is not None else [], compgen=CompGen.default, ) for setting in config.SETTINGS ] return options def finalize_completion_bash(options: list[CompletionItem], resources: Path) -> str: def to_bash_array(name: str, entries: Iterable[str]) -> str: return f"{name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: return ( f"{name.replace('-', '_')}=(" + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")" ) completion = resources / "completion.bash" options_by_key = {o.short: o for o in options if o.short} | {o.long: o for o in options if o.long} template = completion.read_text() with io.StringIO() as c: c.write(to_bash_array("_mkosi_options", options_by_key.keys())) c.write("\n\n") nargs = to_bash_hasharray( "_mkosi_nargs", {optname: v.nargs for optname, v in options_by_key.items()} ) c.write(nargs) c.write("\n\n") choices = to_bash_hasharray( "_mkosi_choices", {optname: " ".join(v.choices) for optname, v in options_by_key.items() if v.choices}, ) c.write(choices) c.write("\n\n") compgen = to_bash_hasharray( "_mkosi_compgen", { optname: v.compgen.to_bash() for optname, v in options_by_key.items() if v.compgen != CompGen.default }, ) c.write(compgen) c.write("\n\n") c.write(to_bash_array("_mkosi_verbs", [str(v) for v in config.Verb])) definitions = c.getvalue() return template.replace("##VARIABLEDEFINITIONS##", indent(definitions, " " * 4)) def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> str: with io.StringIO() as c: c.write("# SPDX-License-Identifier: LGPL-2.1-or-later\n\n") c.write("complete -c mkosi -f\n") c.write("complete -c mkosi -n '__fish_is_first_token' -a \"") c.write(" ".join(str(v) for v in config.Verb)) c.write('"\n') for option in options: if not option.short and not option.long: continue c.write("complete -c mkosi ") if option.short: c.write(f"-s {option.short.lstrip('-')} ") if option.long: c.write(f"-l {option.long.lstrip('-')} ") if isinstance(option.nargs, int) and option.nargs > 0: c.write("-r ") if option.choices: c.write('-a "') c.write(" ".join(option.choices)) c.write('" ') if option.help is not None: help = option.help.replace("'", "\\'") c.write(f'-d "{help}" ') c.write(option.compgen.to_fish()) c.write("\n") return c.getvalue() def finalize_completion_zsh(options: list[CompletionItem], resources: Path) -> str: def to_zsh_array(name: str, entries: Iterable[str]) -> str: return ( f"declare -a {name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" ) completion = resources / "completion.zsh" with io.StringIO() as c: c.write(completion.read_text()) c.write("\n") c.write(to_zsh_array("_mkosi_verbs", [str(v) for v in config.Verb])) c.write("\n\n") c.write("_arguments -s \\\n") c.write(" '(- *)'{-h,--help}'[Show this help]' \\\n") c.write(" '(- *)--version[Show package version]' \\\n") for option in options: if not option.short and not option.long: continue posix = option.help and "'" in option.help open_quote = "$'" if posix else "'" if option.short and option.long: c.write(f" '({option.short} {option.long})'{{{option.short},{option.long}}}{open_quote}") else: c.write(f" {open_quote}{option.short or option.long}") if option.help: help = option.help.replace("'", r"\'") c.write(f"[{help}]") if option.choices: # TODO: maybe use metavar here? At least for me it's not shown, though c.write(":arg:(") c.write(" ".join(option.choices)) c.write(")") c.write(option.compgen.to_zsh()) c.write("' \\\n") c.write(" '*::mkosi verb:_mkosi_verb'\n\n") return c.getvalue() def print_completion(args: config.Args, *, resources: Path) -> None: if not args.cmdline: die( "No shell to generate completion script for specified", hint="Please specify either one of: bash, fish, zsh", ) shell = args.cmdline[0] if shell == "bash": func = finalize_completion_bash elif shell == "fish": func = finalize_completion_fish elif shell == "zsh": func = finalize_completion_zsh else: die( f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh", ) completion_args = collect_completion_arguments() print(func(completion_args, resources)) mkosi-25.3/mkosi/config.py000066400000000000000000005264241474711424400155530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import base64 import copy import dataclasses import enum import fnmatch import functools import graphlib import inspect import io import json import logging import math import operator import os.path import platform import re import shlex import string import subprocess import sys import tempfile import textwrap import typing import uuid from collections.abc import Collection, Iterable, Iterator, Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Any, Callable, Generic, Optional, TypeVar, Union, cast from mkosi.distributions import Distribution, detect_distribution from mkosi.log import ARG_DEBUG, ARG_DEBUG_SANDBOX, ARG_DEBUG_SHELL, die from mkosi.pager import page from mkosi.run import SandboxProtocol, find_binary, nosandbox, run, sandbox_cmd, workdir from mkosi.sandbox import Style, __version__ from mkosi.user import INVOKING_USER from mkosi.util import ( PathString, StrEnum, SupportsRead, chdir, flatten, is_power_of_2, make_executable, startswith, ) from mkosi.versioncomp import GenericVersion T = TypeVar("T") SE = TypeVar("SE", bound=StrEnum) ConfigParseCallback = Callable[[Optional[str], Optional[T]], Optional[T]] ConfigMatchCallback = Callable[[str, T], bool] ConfigDefaultCallback = Callable[[argparse.Namespace], T] BUILTIN_CONFIGS = ("mkosi-tools", "mkosi-initrd", "mkosi-vm", "mkosi-addon") class Verb(StrEnum): build = enum.auto() clean = enum.auto() summary = enum.auto() cat_config = enum.auto() shell = enum.auto() boot = enum.auto() vm = enum.auto() qemu = enum.auto() ssh = enum.auto() serve = enum.auto() bump = enum.auto() help = enum.auto() genkey = enum.auto() documentation = enum.auto() journalctl = enum.auto() coredumpctl = enum.auto() burn = enum.auto() dependencies = enum.auto() completion = enum.auto() sysupdate = enum.auto() sandbox = enum.auto() def supports_cmdline(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.vm, Verb.qemu, Verb.ssh, Verb.journalctl, Verb.coredumpctl, Verb.burn, Verb.completion, Verb.documentation, Verb.sysupdate, Verb.sandbox, ) def needs_build(self) -> bool: return self in ( Verb.build, Verb.shell, Verb.boot, Verb.vm, Verb.qemu, Verb.serve, Verb.burn, Verb.sysupdate, ) def needs_config(self) -> bool: return self not in ( Verb.help, Verb.genkey, Verb.documentation, Verb.dependencies, Verb.completion, ) class ConfigFeature(StrEnum): auto = enum.auto() enabled = enum.auto() disabled = enum.auto() def to_tristate(self) -> str: if self == ConfigFeature.enabled: return "yes" if self == ConfigFeature.disabled: return "no" return "" @dataclasses.dataclass(frozen=True) class ConfigTree: source: Path target: Optional[Path] def with_prefix(self, prefix: PathString = "/") -> tuple[Path, Path]: return ( self.source, Path(prefix) / os.fspath(self.target).lstrip("/") if self.target else Path(prefix), ) def __str__(self) -> str: return f"{self.source}:{self.target}" if self.target else f"{self.source}" @dataclasses.dataclass(frozen=True) class Drive: id: str size: int directory: Optional[Path] options: Optional[str] file_id: str # We use negative numbers for specifying special constants # for VSock CIDs since they're not valid CIDs anyway. class VsockCID(enum.IntEnum): auto = -1 hash = -2 @classmethod def format(cls, cid: int) -> str: if cid == VsockCID.auto: return "auto" if cid == VsockCID.hash: return "hash" return str(cid) class SecureBootSignTool(StrEnum): auto = enum.auto() sbsign = enum.auto() systemd_sbsign = enum.auto() class OutputFormat(StrEnum): confext = enum.auto() cpio = enum.auto() directory = enum.auto() disk = enum.auto() esp = enum.auto() none = enum.auto() portable = enum.auto() sysext = enum.auto() tar = enum.auto() uki = enum.auto() oci = enum.auto() addon = enum.auto() def extension(self) -> str: return { OutputFormat.confext: ".raw", OutputFormat.cpio: ".cpio", OutputFormat.disk: ".raw", OutputFormat.esp: ".raw", OutputFormat.portable: ".raw", OutputFormat.sysext: ".raw", OutputFormat.tar: ".tar", OutputFormat.uki: ".efi", OutputFormat.addon: ".efi", }.get(self, "") # fmt: skip def use_outer_compression(self) -> bool: return self in ( OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk, OutputFormat.sysext, OutputFormat.confext, OutputFormat.portable, ) def is_extension_image(self) -> bool: return self in (OutputFormat.sysext, OutputFormat.confext, OutputFormat.addon) def is_extension_or_portable_image(self) -> bool: return self.is_extension_image() or self == OutputFormat.portable class ManifestFormat(StrEnum): json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): # fmt: off none = enum.auto() zstd = enum.auto() zst = zstd xz = enum.auto() bz2 = enum.auto() gz = enum.auto() gzip = gz lz4 = enum.auto() lzma = enum.auto() # fmt: on def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: return {Compression.zstd: ".zst"}.get(self, f".{self}") def oci_media_type_suffix(self) -> str: suffix = { Compression.none: "", Compression.gz: "+gzip", Compression.zstd: "+zstd", }.get(self) # fmt: skip if not suffix: die(f"Compression {self} not supported for OCI layers") return suffix class DocFormat(StrEnum): auto = enum.auto() markdown = enum.auto() man = enum.auto() pandoc = enum.auto() system = enum.auto() @classmethod def all(cls) -> list["DocFormat"]: # this excludes auto and encodes the order in which these should be # checked when searching for docs return [cls.man, cls.pandoc, cls.markdown, cls.system] class Bootloader(StrEnum): none = enum.auto() uki = enum.auto() systemd_boot = enum.auto() grub = enum.auto() uki_signed = enum.auto() systemd_boot_signed = enum.auto() grub_signed = enum.auto() def is_uki(self) -> bool: return self in (Bootloader.uki, Bootloader.uki_signed) def is_systemd_boot(self) -> bool: return self in (Bootloader.systemd_boot, Bootloader.systemd_boot_signed) def is_grub(self) -> bool: return self in (Bootloader.grub, Bootloader.grub_signed) def is_signed(self) -> bool: return self in (Bootloader.uki_signed, Bootloader.systemd_boot_signed, Bootloader.grub_signed) class BiosBootloader(StrEnum): none = enum.auto() grub = enum.auto() class ShimBootloader(StrEnum): none = enum.auto() signed = enum.auto() unsigned = enum.auto() class Cacheonly(StrEnum): always = enum.auto() auto = enum.auto() none = auto metadata = enum.auto() never = enum.auto() class Firmware(StrEnum): auto = enum.auto() linux = enum.auto() uefi = enum.auto() uefi_secure_boot = enum.auto() bios = enum.auto() def is_uefi(self) -> bool: return self in (Firmware.uefi, Firmware.uefi_secure_boot) class ConsoleMode(StrEnum): interactive = enum.auto() read_only = enum.auto() native = enum.auto() gui = enum.auto() class Network(StrEnum): interface = enum.auto() user = enum.auto() none = enum.auto() class Vmm(StrEnum): qemu = enum.auto() vmspawn = enum.auto() class Incremental(StrEnum): yes = enum.auto() no = enum.auto() strict = enum.auto() def __bool__(self) -> bool: return self != Incremental.no class BuildSourcesEphemeral(StrEnum): yes = enum.auto() no = enum.auto() buildcache = enum.auto() def __bool__(self) -> bool: return self != BuildSourcesEphemeral.no class Architecture(StrEnum): alpha = enum.auto() arc = enum.auto() arm = enum.auto() arm64 = enum.auto() ia64 = enum.auto() loongarch64 = enum.auto() mips_le = enum.auto() mips64_le = enum.auto() parisc = enum.auto() ppc = enum.auto() ppc64 = enum.auto() ppc64_le = enum.auto() riscv32 = enum.auto() riscv64 = enum.auto() s390 = enum.auto() s390x = enum.auto() tilegx = enum.auto() x86 = enum.auto() x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { "aarch64": Architecture.arm64, "aarch64_be": Architecture.arm64, "armv8l": Architecture.arm, "armv8b": Architecture.arm, "armv7ml": Architecture.arm, "armv7mb": Architecture.arm, "armv7l": Architecture.arm, "armv7b": Architecture.arm, "armv6l": Architecture.arm, "armv6b": Architecture.arm, "armv5tl": Architecture.arm, "armv5tel": Architecture.arm, "armv5tejl": Architecture.arm, "armv5tejb": Architecture.arm, "armv5teb": Architecture.arm, "armv5tb": Architecture.arm, "armv4tl": Architecture.arm, "armv4tb": Architecture.arm, "armv4l": Architecture.arm, "armv4b": Architecture.arm, "alpha": Architecture.alpha, "arc": Architecture.arc, "arceb": Architecture.arc, "x86_64": Architecture.x86_64, "i686": Architecture.x86, "i586": Architecture.x86, "i486": Architecture.x86, "i386": Architecture.x86, "ia64": Architecture.ia64, "parisc64": Architecture.parisc, "parisc": Architecture.parisc, "loongarch64": Architecture.loongarch64, "mips64": Architecture.mips64_le, "mips": Architecture.mips_le, "ppc64le": Architecture.ppc64_le, "ppc64": Architecture.ppc64, "ppc": Architecture.ppc, "riscv64": Architecture.riscv64, "riscv32": Architecture.riscv32, "riscv": Architecture.riscv64, "s390x": Architecture.s390x, "s390": Architecture.s390, "tilegx": Architecture.tilegx, }.get(s) # fmt: skip if not a: die(f"Architecture {s} is not supported") return a def to_efi(self) -> Optional[str]: return { Architecture.x86_64: "x64", Architecture.x86: "ia32", Architecture.arm64: "aa64", Architecture.arm: "arm", Architecture.riscv64: "riscv64", Architecture.loongarch64: "loongarch64", }.get(self) # fmt: skip def to_grub(self) -> Optional[str]: return { Architecture.x86_64: "x86_64", Architecture.x86: "i386", Architecture.arm64: "arm64", Architecture.arm: "arm", }.get(self) # fmt: skip def to_qemu(self) -> str: a = { Architecture.alpha: "alpha", Architecture.arm: "arm", Architecture.arm64: "aarch64", Architecture.loongarch64: "loongarch64", Architecture.mips64_le: "mips", Architecture.mips_le: "mips", Architecture.parisc: "hppa", Architecture.ppc: "ppc", Architecture.ppc64: "ppc64", Architecture.ppc64_le: "ppc64", Architecture.riscv32: "riscv32", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86: "i386", Architecture.x86_64: "x86_64", }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by QEMU") return a def to_oci(self) -> str: a = { Architecture.arm: "arm", Architecture.arm64: "arm64", Architecture.loongarch64: "loong64", Architecture.mips64_le: "mips64le", Architecture.mips_le: "mipsle", Architecture.ppc: "ppc", Architecture.ppc64: "ppc64", Architecture.ppc64_le: "ppc64le", Architecture.riscv32: "riscv", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86: "386", Architecture.x86_64: "amd64", }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by OCI") return a def supports_smbios(self, firmware: Firmware) -> bool: if self.is_x86_variant(): return True return self.is_arm_variant() and firmware.is_uefi() def supports_fw_cfg(self) -> bool: return self.is_x86_variant() or self.is_arm_variant() def supports_smm(self) -> bool: return self.is_x86_variant() def can_kvm(self) -> bool: return self == Architecture.native() or ( Architecture.native() == Architecture.x86_64 and self == Architecture.x86 ) def default_qemu_machine(self) -> str: m = { Architecture.x86: "q35", Architecture.x86_64: "q35", Architecture.arm: "virt", Architecture.arm64: "virt", Architecture.s390: "s390-ccw-virtio", Architecture.s390x: "s390-ccw-virtio", Architecture.ppc: "pseries", Architecture.ppc64: "pseries", Architecture.ppc64_le: "pseries", Architecture.riscv64: "virt", } # fmt: skip if self not in m: die(f"No qemu machine defined for architecture {self}") return m[self] def default_qemu_nic_model(self) -> str: return { Architecture.s390: "virtio", Architecture.s390x: "virtio", }.get(self, "virtio-net-pci") # fmt: skip def is_native(self) -> bool: return self == self.native() def is_x86_variant(self) -> bool: return self in (Architecture.x86, Architecture.x86_64) def is_arm_variant(self) -> bool: return self in (Architecture.arm, Architecture.arm64) @classmethod def native(cls) -> "Architecture": return cls.from_uname(platform.machine()) class ArtifactOutput(StrEnum): uki = enum.auto() kernel = enum.auto() initrd = enum.auto() partitions = enum.auto() @staticmethod def compat_no() -> list["ArtifactOutput"]: return [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ] @staticmethod def compat_yes() -> list["ArtifactOutput"]: return [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ArtifactOutput.partitions, ] def try_parse_boolean(s: str) -> Optional[bool]: "Parse 1/true/yes/y/t/on as true and 0/false/no/n/f/off/None as false" s_l = s.lower() if s_l in {"1", "true", "yes", "y", "t", "on", "always"}: return True if s_l in {"0", "false", "no", "n", "f", "off", "never"}: return False return None def parse_boolean(s: str) -> bool: value = try_parse_boolean(s) if value is None: die(f"Invalid boolean literal: {s!r}") return value def parse_path( value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, directory: bool = False, exclude: Sequence[PathString] = (), constants: Sequence[str] = (), ) -> Path: if value in constants: return Path(value) if expandvars: value = os.path.expandvars(value) path = Path(value) if expanduser: path = path.expanduser() if required: if not path.exists(): die(f"{value} does not exist") if directory and not path.is_dir(): die(f"{value} is not a directory") if absolute and not path.is_absolute(): die(f"{value} must be an absolute path") for e in exclude: if path.is_relative_to(e): die(f"{path} can not be relative to {e}") if resolve: path = path.resolve() if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: die( textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. """) ) return path def parse_paths_from_directory( value: str, *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, absolute: bool = False, constants: Sequence[str] = (), ) -> list[Path]: base = os.path.dirname(value) glob = os.path.basename(value) path = parse_path( base, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, absolute=absolute, constants=constants, ) if not path.exists(): return [] if path.exists() and not path.is_dir(): die(f"{path} should be a directory, but isn't.") return sorted(parse_path(os.fspath(p), resolve=resolve, secret=secret) for p in path.glob(glob)) def config_parse_key(value: Optional[str], old: Optional[str]) -> Optional[Path]: if not value: return None return parse_path(value, secret=True) if Path(value).exists() else Path(value) def config_parse_certificate(value: Optional[str], old: Optional[str]) -> Optional[Path]: if not value: return None return parse_path(value) if Path(value).exists() else Path(value) def make_tree_parser( absolute: bool = True, required: bool = False, directory: bool = False, ) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: src, sep, tgt = value.partition(":") return ConfigTree( source=parse_path( src, required=required, directory=directory, ), target=parse_path( tgt, required=False, resolve=False, expanduser=False, absolute=absolute, ) if sep else None, ) return parse_tree def config_match_build_sources(match: str, value: list[ConfigTree]) -> bool: return Path(match.lstrip("/")) in [tree.target for tree in value if tree.target] def config_make_list_matcher(parse: Callable[[str], T]) -> ConfigMatchCallback[list[T]]: def config_match_list(match: str, value: list[T]) -> bool: if not match: return len(value) == 0 return parse(match) in value return config_match_list def config_parse_string(value: Optional[str], old: Optional[str]) -> Optional[str]: return value or None def config_make_string_matcher(allow_globs: bool = False) -> ConfigMatchCallback[str]: def config_match_string(match: str, value: str) -> bool: if allow_globs: return fnmatch.fnmatchcase(value, match) else: return match == value return config_match_string def config_match_key_value(match: str, value: dict[str, str]) -> bool: k, sep, v = match.partition("=") if not sep: return k in value return value.get(k, None) == v def config_parse_boolean(value: Optional[str], old: Optional[bool]) -> Optional[bool]: if value is None: return False if not value: return None return parse_boolean(value) def parse_feature(value: str) -> ConfigFeature: try: return ConfigFeature(value) except ValueError: return ConfigFeature.enabled if parse_boolean(value) else ConfigFeature.disabled def config_parse_feature(value: Optional[str], old: Optional[ConfigFeature]) -> Optional[ConfigFeature]: if value is None: return ConfigFeature.auto if not value: return None return parse_feature(value) def config_match_feature(match: str, value: ConfigFeature) -> bool: return value == parse_feature(match) def config_parse_compression(value: Optional[str], old: Optional[Compression]) -> Optional[Compression]: if not value: return None try: return Compression[value] except KeyError: return Compression.zstd if parse_boolean(value) else Compression.none def config_parse_uuid(value: Optional[str], old: Optional[str]) -> Optional[uuid.UUID]: if not value: return None if value == "random": return uuid.uuid4() try: return uuid.UUID(value) except ValueError: die(f"{value} is not a valid UUID") def config_parse_source_date_epoch(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: timestamp = int(value) except ValueError: die(f"Timestamp {value!r} is not a valid integer") if timestamp < 0: die(f"Source date epoch timestamp cannot be negative (got {value})") return timestamp def config_parse_compress_level(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: level = int(value) except ValueError: die(f"Compression level {value!r} is not a valid integer") if level < 0: die(f"Compression level cannot be negative (got {value})") return level def config_parse_mode(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: mode = int(value, base=8) except ValueError: die(f"Access mode {value!r} is not a valid integer in base 8") if mode < 0: die(f"Access mode cannot be negative (got {value})") if mode > 0o1777: die(f"Access mode cannot be greater than 1777 (got {value})") return mode def config_default_compression(namespace: argparse.Namespace) -> Compression: if namespace.output_format in ( OutputFormat.tar, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.addon, ): return Compression.zstd elif namespace.output_format == OutputFormat.oci: return Compression.gz else: return Compression.none def config_default_output(namespace: argparse.Namespace) -> str: output = namespace.image or namespace.image_id or "image" if namespace.image_version: output += f"_{namespace.image_version}" return output def config_default_distribution(namespace: argparse.Namespace) -> Distribution: if d := os.getenv("MKOSI_HOST_DISTRIBUTION"): return Distribution(d) detected = detect_distribution()[0] if not detected: logging.info( "Distribution of your host can't be detected or isn't a supported target. " "Defaulting to Distribution=custom." ) return Distribution.custom return detected def config_default_release(namespace: argparse.Namespace) -> str: hd: Optional[Distribution] hr: Optional[str] if (d := os.getenv("MKOSI_HOST_DISTRIBUTION")) and (r := os.getenv("MKOSI_HOST_RELEASE")): hd, hr = Distribution(d), r else: hd, hr = detect_distribution() # If the configured distribution matches the host distribution, use the same release as the host. if namespace.distribution == hd and hr is not None: return hr return cast(str, namespace.distribution.default_release()) def config_default_tools_tree_distribution(namespace: argparse.Namespace) -> Distribution: if d := os.getenv("MKOSI_HOST_DISTRIBUTION"): return Distribution(d).default_tools_tree_distribution() detected = detect_distribution()[0] if not detected: return Distribution.custom return detected.default_tools_tree_distribution() def config_default_repository_key_fetch(namespace: argparse.Namespace) -> bool: def needs_repository_key_fetch(distribution: Distribution) -> bool: return distribution == Distribution.arch or distribution.is_rpm_distribution() if detect_distribution()[0] != Distribution.ubuntu: return False if namespace.tools_tree is None: return needs_repository_key_fetch(namespace.distribution) if namespace.tools_tree != Path("default"): return ( detect_distribution(namespace.tools_tree)[0] == Distribution.ubuntu and needs_repository_key_fetch(namespace.distribution) ) # fmt: skip return ( namespace.tools_tree_distribution == Distribution.ubuntu and needs_repository_key_fetch(namespace.distribution) ) or needs_repository_key_fetch(namespace.tools_tree_distribution) def config_default_source_date_epoch(namespace: argparse.Namespace) -> Optional[int]: for env in namespace.environment: if s := startswith(env, "SOURCE_DATE_EPOCH="): break else: s = os.environ.get("SOURCE_DATE_EPOCH") return config_parse_source_date_epoch(s, None) def config_default_proxy_url(namespace: argparse.Namespace) -> Optional[str]: names = ("http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY") for env in namespace.environment: k, _, v = env.partition("=") if k in names: return cast(str, v) for k, v in os.environ.items(): if k in names: return cast(str, v) return None def make_enum_parser(type: type[SE]) -> Callable[[str], SE]: def parse_enum(value: str) -> SE: try: return type(value) except ValueError: die(f"'{value}' is not a valid {type.__name__}") return parse_enum def config_make_enum_parser(type: type[SE]) -> ConfigParseCallback[SE]: def config_parse_enum(value: Optional[str], old: Optional[SE]) -> Optional[SE]: return make_enum_parser(type)(value) if value else None return config_parse_enum def config_make_enum_parser_with_boolean(type: type[SE], *, yes: SE, no: SE) -> ConfigParseCallback[SE]: def config_parse_enum(value: Optional[str], old: Optional[SE]) -> Optional[SE]: if not value: return None if value in type.values(): return type(value) return yes if parse_boolean(value) else no return config_parse_enum def config_make_enum_matcher(type: type[SE]) -> ConfigMatchCallback[SE]: def config_match_enum(match: str, value: SE) -> bool: return make_enum_parser(type)(match) == value return config_match_enum def config_make_list_parser( *, delimiter: Optional[str] = None, parse: Callable[[str], T] = str, # type: ignore # see mypy#3737 unescape: bool = False, reset: bool = True, ) -> ConfigParseCallback[list[T]]: def config_parse_list(value: Optional[str], old: Optional[list[T]]) -> Optional[list[T]]: new = old.copy() if old else [] if value is None: return [] # Empty strings reset the list. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter or ''}" lex.commenters = "" values = list(lex) if reset and not values: return None else: if delimiter: value = value.replace(delimiter, "\n") values = value.split("\n") if reset and len(values) == 1 and values[0] == "": return None return new + [parse(v) for v in values if v] return config_parse_list def config_match_version(match: str, value: str) -> bool: version = GenericVersion(value) for sigil, opfunc in { "==": operator.eq, "!=": operator.ne, "<=": operator.le, ">=": operator.ge, ">": operator.gt, "<": operator.lt, }.items(): if (rhs := startswith(match, sigil)) is not None: op = opfunc comp_version = GenericVersion(rhs) break else: # default to equality if no operation is specified op = operator.eq comp_version = GenericVersion(match) # all constraints must be fulfilled if not op(version, comp_version): return False return True def config_make_dict_parser( *, delimiter: Optional[str] = None, parse: Callable[[str], tuple[str, str]], unescape: bool = False, allow_paths: bool = False, reset: bool = True, ) -> ConfigParseCallback[dict[str, str]]: def config_parse_dict(value: Optional[str], old: Optional[dict[str, str]]) -> Optional[dict[str, str]]: new = old.copy() if old else {} if value is None: return {} if allow_paths and value and "=" not in value: if Path(value).is_dir(): for p in sorted(Path(value).iterdir()): if p.is_dir(): continue if os.access(p, os.X_OK): new[p.name] = run([p], stdout=subprocess.PIPE, env=os.environ).stdout else: new[p.name] = p.read_text() elif (p := Path(value)).exists(): if os.access(p, os.X_OK): new[p.name] = run([p], stdout=subprocess.PIPE, env=os.environ).stdout else: new[p.name] = p.read_text() else: die(f"{p} does not exist") return new # Empty strings reset the dict. if unescape: lex = shlex.shlex(value, posix=True) lex.whitespace_split = True lex.whitespace = f"\n{delimiter or ''}" lex.commenters = "" values = list(lex) if reset and not values: return None else: if delimiter: value = value.replace(delimiter, "\n") values = value.split("\n") if reset and len(values) == 1 and values[0] == "": return None return new | dict(parse(v) for v in values if v) return config_parse_dict def parse_environment(value: str) -> tuple[str, str]: key, sep, value = value.partition("=") key, value = key.strip(), value.strip() value = value if sep else os.getenv(key, "") return (key, value) def parse_key_value(value: str) -> tuple[str, str]: key, _, value = value.partition("=") key, value = key.strip(), value.strip() return (key, value) def make_path_parser( *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, exclude: Sequence[PathString] = (), constants: Sequence[str] = (), ) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, exclude=exclude, constants=constants, ) def config_make_path_parser( *, required: bool = True, resolve: bool = True, expanduser: bool = True, expandvars: bool = True, secret: bool = False, constants: Sequence[str] = (), ) -> ConfigParseCallback[Path]: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None return parse_path( value, required=required, resolve=resolve, expanduser=expanduser, expandvars=expandvars, secret=secret, constants=constants, ) return config_parse_path def is_valid_filename(s: str) -> bool: s = s.strip() return not (s == "." or s == ".." or "/" in s) def config_make_filename_parser(hint: str) -> ConfigParseCallback[str]: def config_parse_filename(value: Optional[str], old: Optional[str]) -> Optional[str]: if not value: return None if not is_valid_filename(value): die( f"{value!r} is not a valid filename.", hint=hint, ) return value return config_parse_filename def match_path_exists(value: str) -> bool: if not value: return False return Path(value).exists() def config_parse_root_password( value: Optional[str], old: Optional[tuple[str, bool]] ) -> Optional[tuple[str, bool]]: if not value: return None value = value.strip() hashed = value.startswith("hashed:") value = value.removeprefix("hashed:") return (value, hashed) def match_systemd_version(value: str) -> bool: if not value: return False version = run(["systemctl", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[1] return config_match_version(value, version) def match_host_architecture(value: str) -> bool: return Architecture(value) == Architecture.native() def parse_bytes(value: str) -> int: if value.endswith("G"): factor = 1024**3 elif value.endswith("M"): factor = 1024**2 elif value.endswith("K"): factor = 1024 else: factor = 1 if factor > 1: value = value[:-1] result = math.ceil(float(value) * factor) if result <= 0: die("Size out of range") rem = result % 4096 if rem != 0: result += 4096 - rem return result def config_parse_bytes(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None return parse_bytes(value) def config_parse_number(value: Optional[str], old: Optional[int] = None) -> Optional[int]: if not value: return None try: return int(value) except ValueError: die(f"{value!r} is not a valid number") def parse_profile(value: str) -> str: if not is_valid_filename(value): die( f"{value!r} is not a valid profile", hint="Profiles= or --profile= requires a name with no path components.", ) return value def parse_drive(value: str) -> Drive: parts = value.split(":", maxsplit=3) if not parts or not parts[0]: die(f"No ID specified for drive '{value}'") if len(parts) < 2: die(f"Missing size in drive '{value}") if len(parts) > 5: die(f"Too many components in drive '{value}") id = parts[0] if not is_valid_filename(id): die(f"Unsupported path character in drive id '{id}'") size = parse_bytes(parts[1]) directory = parse_path(parts[2]) if len(parts) > 2 and parts[2] else None options = parts[3] if len(parts) > 3 and parts[3] else None file_id = parts[4] if len(parts) > 4 and parts[4] else id return Drive(id=id, size=size, directory=directory, options=options, file_id=file_id) def config_parse_sector_size(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None try: size = int(value) except ValueError: die(f"'{value}' is not a valid number") if size < 512 or size > 4096: die(f"Sector size not between 512 and 4096: {size}") if not is_power_of_2(size): die(f"Sector size not power of 2: {size}") return size def config_parse_vsock_cid(value: Optional[str], old: Optional[int]) -> Optional[int]: if not value: return None if value == "auto": return VsockCID.auto if value == "hash": return VsockCID.hash try: cid = int(value) except ValueError: die(f"VSock connection ID '{value}' is not a valid number or one of 'auto' or 'hash'") if cid not in range(3, 0xFFFFFFFF): die(f"{cid} is not in the valid VSock connection ID range [3, 0xFFFFFFFF)") return cid def config_parse_minimum_version( value: Optional[str], old: Optional[GenericVersion] ) -> Optional[GenericVersion]: if not value: return old new = GenericVersion(value) if not old: return new return max(old, new) def file_run_or_read(file: Path) -> str: "Run the specified file and capture its output if it's executable, else read file contents" if os.access(file, os.X_OK): return run([file.absolute()], stdout=subprocess.PIPE, env=os.environ).stdout content = file.read_text() if content.startswith("#!/"): die( f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable", ) return content class KeySourceType(StrEnum): file = enum.auto() engine = enum.auto() provider = enum.auto() @dataclasses.dataclass(frozen=True) class KeySource: type: KeySourceType source: str = "" def __str__(self) -> str: return f"{self.type}:{self.source}" if self.source else str(self.type) def config_parse_key_source(value: Optional[str], old: Optional[KeySource]) -> Optional[KeySource]: if not value: return old typ, _, source = value.partition(":") try: type = KeySourceType(typ) except ValueError: die(f"'{value}' is not a valid key source") return KeySource(type=type, source=source) class CertificateSourceType(StrEnum): file = enum.auto() provider = enum.auto() @dataclasses.dataclass(frozen=True) class CertificateSource: type: CertificateSourceType source: str = "" def __str__(self) -> str: return f"{self.type}:{self.source}" if self.source else str(self.type) def config_parse_certificate_source( value: Optional[str], old: Optional[CertificateSource], ) -> Optional[CertificateSource]: if not value: return old typ, _, source = value.partition(":") try: type = CertificateSourceType(typ) except ValueError: die(f"'{value}' is not a valid certificate source") return CertificateSource(type=type, source=source) def config_parse_artifact_output_list( value: Optional[str], old: Optional[list[ArtifactOutput]] ) -> Optional[list[ArtifactOutput]]: if not value: return [] # Keep for backwards compatibility boolean_value = try_parse_boolean(value) if boolean_value is not None: return ArtifactOutput.compat_yes() if boolean_value else ArtifactOutput.compat_no() list_parser = config_make_list_parser(delimiter=",", parse=make_enum_parser(ArtifactOutput)) return list_parser(value, old) class SettingScope(StrEnum): # Not passed down to subimages local = enum.auto() # Passed down to subimages, cannot be overridden universal = enum.auto() # Passed down to subimages, can be overridden inherit = enum.auto() @dataclasses.dataclass(frozen=True) class ConfigSetting(Generic[T]): dest: str section: str parse: ConfigParseCallback[T] = config_parse_string # type: ignore # see mypy#3737 match: Optional[ConfigMatchCallback[T]] = None name: str = "" default: Optional[T] = None default_factory: Optional[ConfigDefaultCallback[T]] = None default_factory_depends: tuple[str, ...] = tuple() paths: tuple[str, ...] = () recursive_paths: tuple[str, ...] = () path_read_text: bool = False path_secret: bool = False specifier: str = "" scope: SettingScope = SettingScope.local # settings for argparse short: Optional[str] = None long: str = "" choices: Optional[list[str]] = None metavar: Optional[str] = None nargs: Optional[str] = None const: Optional[Any] = None help: Optional[str] = None # backward compatibility compat_names: tuple[str, ...] = () compat_longs: tuple[str, ...] = () def __post_init__(self) -> None: if not self.name: object.__setattr__(self, "name", "".join(x.capitalize() for x in self.dest.split("_") if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @dataclasses.dataclass(frozen=True) class Match: name: str match: Callable[[str], bool] @dataclasses.dataclass(frozen=True) class Specifier: char: str callback: Callable[[argparse.Namespace, Path], str] depends: tuple[str, ...] = tuple() class CustomHelpFormatter(argparse.HelpFormatter): def _format_action_invocation(self, action: argparse.Action) -> str: if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string def _split_lines(self, text: str, width: int) -> list[str]: """Wraps text to width, each line separately. If the first line of text ends in a colon, we assume that this is a list of option descriptions, and subindent them. Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() subindent = " " if lines[0].endswith(":") else "" return flatten( textwrap.wrap( line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent ) for line in lines ) def parse_chdir(path: str) -> Optional[Path]: if not path: # The current directory should be ignored return None # Immediately change the current directory so that it's taken into # account when parsing the following options that take a relative path try: os.chdir(path) except (FileNotFoundError, NotADirectoryError): die(f"{path} is not a directory!") except OSError as e: die(f"Cannot change the directory to {path}: {e}") # Keep track of the current directory return Path.cwd() class IgnoreAction(argparse.Action): """Argparse action for deprecated options that can be ignored.""" def __init__( self, option_strings: Sequence[str], dest: str, nargs: Union[int, str, None] = None, default: Any = argparse.SUPPRESS, help: Optional[str] = argparse.SUPPRESS, ) -> None: super().__init__(option_strings, dest, nargs=nargs, default=default, help=help) def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None, ) -> None: logging.warning(f"{option_string} is no longer supported") class PagerHelpAction(argparse._HelpAction): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, option_string: Optional[str] = None, ) -> None: page(parser.format_help(), namespace.pager) parser.exit() def dict_with_capitalised_keys_factory(pairs: list[tuple[str, T]]) -> dict[str, T]: def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_DEST.get(k)) is not None: return s.name return "".join(p.capitalize() for p in k.split("_")) return {key_transformer(k): v for k, v in dict(pairs).items()} @dataclasses.dataclass(frozen=True) class Args: verb: Verb cmdline: list[str] force: int directory: Optional[Path] debug: bool debug_shell: bool debug_workspace: bool debug_sandbox: bool pager: bool genkey_valid_days: str genkey_common_name: str auto_bump: bool doc_format: DocFormat json: bool wipe_build_dir: bool @classmethod def default(cls) -> "Args": """Alternative constructor to generate an all-default Args. This prevents Args being generated with defaults values implicitly. """ with tempfile.TemporaryDirectory() as tempdir: with chdir(tempdir): args, _ = parse_config([]) return args @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Args": return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump Args as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args": """Instantiate a Args object from a (partial) JSON dump.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError( f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." ) def key_transformer(k: str) -> str: return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) for k, v in j.items(): k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): die( f"Serialized JSON has unknown field {k} with value {v}", hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} return dataclasses.replace( cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} ) PACKAGE_GLOBS = ( "*.rpm", "*.pkg.tar*", "*.deb", "*.ddeb", ) @dataclasses.dataclass(frozen=True) class UKIProfile: profile: dict[str, str] cmdline: list[str] def make_simple_config_parser( settings: Sequence[ConfigSetting[object]], valtype: type[T], ) -> Callable[[str], T]: lookup_by_name = {s.name: s for s in settings} lookup_by_dest = {s.dest: s for s in settings} def finalize_value(config: argparse.Namespace, setting: ConfigSetting[object]) -> None: if hasattr(config, setting.dest): return if setting.default_factory: for d in setting.default_factory_depends: finalize_value(config, lookup_by_dest[d]) default = setting.default_factory(config) elif setting.default: default = setting.default else: default = setting.parse(None, None) setattr(config, setting.dest, default) def parse_simple_config(value: str) -> T: path = parse_path(value) config = argparse.Namespace() for section, name, value in parse_ini(path, only_sections=[s.section for s in settings]): if not name and not value: continue if not (s := lookup_by_name.get(name)): die(f"{path.absolute()}: Unknown setting {name}") if section != s.section: logging.warning( f"{path.absolute()}: Setting {name} should be configured in [{s.section}], not " f"[{section}]." ) if name != s.name: logging.warning( f"{path.absolute()}: Setting {name} is deprecated, please use {s.name} instead." ) setattr(config, s.dest, s.parse(value, getattr(config, s.dest, None))) for setting in settings: finalize_value(config, setting) return valtype( **{k: v for k, v in vars(config).items() if k in inspect.signature(valtype).parameters} ) return parse_simple_config @dataclasses.dataclass(frozen=True) class Config: """Type-hinted storage for command line arguments. Only user configuration is stored here while dynamic state exists in Mkosicontext. If a field of the same name exists in both classes always access the value from context. """ profiles: list[str] files: list[Path] dependencies: list[str] minimum_version: Optional[GenericVersion] pass_environment: list[str] distribution: Distribution release: str architecture: Architecture mirror: Optional[str] local_mirror: Optional[str] repository_key_check: bool repository_key_fetch: bool repositories: list[str] output_format: OutputFormat manifest_format: list[ManifestFormat] output: str compress_output: Compression compress_level: int output_dir: Optional[Path] output_mode: Optional[int] image_id: Optional[str] image_version: Optional[str] split_artifacts: list[ArtifactOutput] repart_dirs: list[Path] sysupdate_dir: Optional[Path] sector_size: Optional[int] overlay: bool seed: uuid.UUID packages: list[str] build_packages: list[str] volatile_packages: list[str] package_directories: list[Path] volatile_package_directories: list[Path] with_recommends: bool with_docs: bool base_trees: list[Path] skeleton_trees: list[ConfigTree] extra_trees: list[ConfigTree] remove_packages: list[str] remove_files: list[str] clean_package_metadata: ConfigFeature source_date_epoch: Optional[int] configure_scripts: list[Path] sync_scripts: list[Path] prepare_scripts: list[Path] build_scripts: list[Path] postinst_scripts: list[Path] finalize_scripts: list[Path] postoutput_scripts: list[Path] clean_scripts: list[Path] bootable: ConfigFeature bootloader: Bootloader bios_bootloader: BiosBootloader shim_bootloader: ShimBootloader unified_kernel_images: ConfigFeature unified_kernel_image_format: str unified_kernel_image_profiles: list[UKIProfile] initrds: list[Path] initrd_packages: list[str] initrd_volatile_packages: list[str] microcode_host: bool devicetree: Optional[Path] kernel_command_line: list[str] kernel_modules_include: list[str] kernel_modules_exclude: list[str] kernel_modules_include_host: bool kernel_modules_initrd: bool kernel_modules_initrd_include: list[str] kernel_modules_initrd_exclude: list[str] kernel_modules_initrd_include_host: bool locale: Optional[str] locale_messages: Optional[str] keymap: Optional[str] timezone: Optional[str] hostname: Optional[str] root_password: Optional[tuple[str, bool]] root_shell: Optional[str] machine_id: Optional[uuid.UUID] autologin: bool make_initrd: bool ssh: bool selinux_relabel: ConfigFeature secure_boot: bool secure_boot_auto_enroll: bool secure_boot_key: Optional[Path] secure_boot_key_source: KeySource secure_boot_certificate: Optional[Path] secure_boot_certificate_source: CertificateSource secure_boot_sign_tool: SecureBootSignTool verity: ConfigFeature verity_key: Optional[Path] verity_key_source: KeySource verity_certificate: Optional[Path] verity_certificate_source: CertificateSource sign_expected_pcr: ConfigFeature sign_expected_pcr_key: Optional[Path] sign_expected_pcr_key_source: KeySource sign_expected_pcr_certificate: Optional[Path] sign_expected_pcr_certificate_source: CertificateSource passphrase: Optional[Path] checksum: bool sign: bool openpgp_tool: str key: Optional[str] tools_tree: Optional[Path] tools_tree_distribution: Optional[Distribution] tools_tree_release: Optional[str] tools_tree_mirror: Optional[str] tools_tree_repositories: list[str] tools_tree_sandbox_trees: list[ConfigTree] tools_tree_packages: list[str] tools_tree_package_directories: list[Path] tools_tree_certificates: bool extra_search_paths: list[Path] incremental: Incremental cacheonly: Cacheonly sandbox_trees: list[ConfigTree] workspace_dir: Optional[Path] cache_dir: Optional[Path] package_cache_dir: Optional[Path] build_dir: Optional[Path] use_subvolumes: ConfigFeature repart_offline: bool history: bool build_sources: list[ConfigTree] build_sources_ephemeral: BuildSourcesEphemeral environment: dict[str, str] environment_files: list[Path] with_tests: bool with_network: bool proxy_url: Optional[str] proxy_exclude: list[str] proxy_peer_certificate: Optional[Path] proxy_client_certificate: Optional[Path] proxy_client_key: Optional[Path] nspawn_settings: Optional[Path] ephemeral: bool credentials: dict[str, str] kernel_command_line_extra: list[str] register: ConfigFeature runtime_trees: list[ConfigTree] runtime_size: Optional[int] runtime_scratch: ConfigFeature runtime_network: Network runtime_build_sources: bool runtime_home: bool unit_properties: list[str] ssh_key: Optional[Path] ssh_certificate: Optional[Path] machine: Optional[str] forward_journal: Optional[Path] vmm: Vmm console: ConsoleMode cpus: int ram: int kvm: ConfigFeature vsock: ConfigFeature vsock_cid: int tpm: ConfigFeature cdrom: bool removable: bool firmware: Firmware firmware_variables: Optional[Path] linux: Optional[Path] drives: list[Drive] qemu_args: list[str] image: Optional[str] def name(self) -> str: return self.image or self.image_id or "default" def machine_or_name(self) -> str: return self.machine or self.name() def output_dir_or_cwd(self) -> Path: return self.output_dir or Path.cwd() def workspace_dir_or_default(self) -> Path: if self.workspace_dir: return self.workspace_dir if ( (cache := INVOKING_USER.cache_dir()) and cache != Path("/var/cache/mkosi") and os.access(cache, os.W_OK) ): return cache return Path("/var/tmp") def package_cache_dir_or_default(self) -> Path: key = f"{self.distribution}~{self.release}~{self.architecture}" if self.mirror: key += f"-{self.mirror.replace('/', '-')}" return self.package_cache_dir or (INVOKING_USER.cache_dir() / key) def tools(self) -> Path: return self.tools_tree or Path("/") @classmethod def default(cls) -> "Config": """Alternative constructor to generate an all-default Config. This prevents Config being generated with defaults values implicitly. """ with chdir("/proc"): _, [config] = parse_config([]) return config @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Config": return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) @property def output_with_format(self) -> str: return self.output + self.output_format.extension() @property def output_with_compression(self) -> str: output = self.output_with_format if self.compress_output and self.output_format.use_outer_compression(): output += self.compress_output.extension() return output @property def output_split_uki(self) -> str: return f"{self.output}.efi" @property def output_split_kernel(self) -> str: return f"{self.output}.vmlinuz" @property def output_split_initrd(self) -> str: return f"{self.output}.initrd" @property def output_nspawn_settings(self) -> str: return f"{self.output}.nspawn" @property def output_checksum(self) -> str: return f"{self.output}.SHA256SUMS" @property def output_signature(self) -> str: return f"{self.output}.SHA256SUMS.gpg" @property def output_manifest(self) -> str: return f"{self.output}.manifest" @property def output_changelog(self) -> str: return f"{self.output}.changelog" @property def outputs(self) -> list[str]: return [ self.output, self.output_with_format, self.output_with_compression, self.output_split_uki, self.output_split_kernel, self.output_split_initrd, self.output_nspawn_settings, self.output_checksum, self.output_signature, self.output_manifest, self.output_changelog, ] def cache_manifest(self) -> dict[str, Any]: return { "distribution": self.distribution, "release": self.release, "mirror": self.mirror, "architecture": self.architecture, "package_manager": self.distribution.package_manager(self).executable(self), "packages": sorted(self.packages), "build_packages": sorted(self.build_packages), "package_directories": [ (p.name, p.stat().st_mtime_ns) for d in self.package_directories for p in sorted(flatten(d.glob(glob) for glob in PACKAGE_GLOBS)) ], "repositories": sorted(self.repositories), "overlay": self.overlay, "prepare_scripts": sorted( base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), } def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str: """Dump Config as JSON string.""" return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys) @classmethod def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config": """Instantiate a Config object from a (partial) JSON dump.""" if isinstance(s, str): j = json.loads(s) elif isinstance(s, dict): j = s elif hasattr(s, "read"): j = json.load(s) else: raise ValueError( f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." ) def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_NAME.get(k)) is not None: return s.dest return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) for k, v in j.items(): k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): die( f"Serialized JSON has unknown field {k} with value {v}", hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} return dataclasses.replace( cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} ) def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]: return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths) def sandbox( self, *, network: bool = False, devices: bool = False, relaxed: bool = False, tools: bool = True, scripts: Optional[Path] = None, overlay: Optional[Path] = None, options: Sequence[PathString] = (), setup: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: opt: list[PathString] = [*options] if not relaxed: opt += flatten(("--ro-bind", d, d) for d in self.extra_search_paths) if p := self.proxy_peer_certificate: opt += ["--ro-bind", os.fspath(p), "/proxy.cacert"] if p := self.proxy_client_certificate: opt += ["--ro-bind", os.fspath(p), "/proxy.clientcert"] if p := self.proxy_client_key: opt += ["--ro-bind", os.fspath(p), "/proxy.clientkey"] return sandbox_cmd( network=network, devices=devices, relaxed=relaxed, scripts=scripts, tools=self.tools() if tools else Path("/"), overlay=overlay, options=opt, setup=setup, extra=self.extra_search_paths, ) def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple[str, str, str]]: """ We have our own parser instead of using configparser as the latter does not support specifying the same setting multiple times in the same configuration file. """ section: Optional[str] = None setting: Optional[str] = None value: Optional[str] = None for line in textwrap.dedent(path.read_text()).splitlines(): comment = line.find("#") if comment >= 0: line = line[:comment] if not line.strip(): continue # If we have a section, setting and value, any line that's indented is considered part of the # setting's value. if section and setting and value is not None and line[0].isspace(): value = f"{value}\n{line.strip()}" continue # So the line is not indented, that means we either found a new section or a new setting. Either way, # let's yield the previous setting and its value before parsing the new section/setting. if section and setting and value is not None: yield section, setting, value setting = value = None line = line.strip() if line[0] == "[": if line[-1] != "]": die(f"{line} is not a valid section") # Yield the section name with an empty key and value to indicate we've finished the current # section. if section: yield section, "", "" section = line[1:-1].strip() if not section: die("Section name cannot be empty or whitespace") continue if not section: die(f"Setting {line} is located outside of section") if only_sections and section not in only_sections: continue setting, delimiter, value = line.partition("=") if not delimiter: die(f"Setting {setting} must be followed by '='") if not setting: die(f"Missing setting name before '=' in {line}") setting = setting.strip() value = value.strip() # Make sure we yield any final setting and its value. if section and setting and value is not None: yield section, setting, value if section: yield section, "", "" UKI_PROFILE_SETTINGS: list[ConfigSetting[Any]] = [ ConfigSetting( dest="profile", section="UKIProfile", parse=config_make_dict_parser(parse=parse_key_value), ), ConfigSetting( dest="cmdline", section="UKIProfile", parse=config_make_list_parser(delimiter=" "), ), ] SETTINGS: list[ConfigSetting[Any]] = [ # Include section ConfigSetting( dest="include", short="-I", section="Include", parse=config_make_list_parser( delimiter=",", reset=False, parse=make_path_parser(constants=BUILTIN_CONFIGS), ), help="Include configuration from the specified file or directory", ), # Config section ConfigSetting( dest="profiles", long="--profile", section="Config", help="Build the specified profiles", parse=config_make_list_parser(delimiter=",", parse=parse_profile), match=config_make_list_matcher(parse=parse_profile), scope=SettingScope.universal, compat_names=("Profile",), ), ConfigSetting( dest="dependencies", long="--dependency", section="Config", parse=config_make_list_parser(delimiter=","), help="Specify other images that this image depends on", ), ConfigSetting( dest="minimum_version", section="Config", parse=config_parse_minimum_version, help="Specify the minimum required mkosi version", ), ConfigSetting( dest="configure_scripts", long="--configure-script", metavar="PATH", section="Config", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.configure",), help="Configure script to run before doing anything", ), ConfigSetting( dest="pass_environment", metavar="NAME", section="Config", parse=config_make_list_parser(delimiter=" "), help="Environment variables to pass to subimages", ), # Distribution section ConfigSetting( dest="distribution", short="-d", section="Distribution", specifier="d", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), default_factory=config_default_distribution, choices=Distribution.choices(), help="Distribution to install", scope=SettingScope.universal, ), ConfigSetting( dest="release", short="-r", section="Distribution", specifier="r", parse=config_parse_string, match=config_make_string_matcher(), default_factory=config_default_release, default_factory_depends=("distribution",), help="Distribution release to install", scope=SettingScope.universal, ), ConfigSetting( dest="architecture", section="Distribution", specifier="a", parse=config_make_enum_parser(Architecture), match=config_make_enum_matcher(Architecture), default=Architecture.native(), choices=Architecture.choices(), help="Override the architecture of installation", scope=SettingScope.universal, ), ConfigSetting( dest="mirror", short="-m", section="Distribution", help="Distribution mirror to use", scope=SettingScope.universal, ), ConfigSetting( dest="local_mirror", section="Distribution", help="Use a single local, flat and plain mirror to build the image", scope=SettingScope.universal, ), ConfigSetting( dest="repository_key_check", metavar="BOOL", nargs="?", section="Distribution", default=True, parse=config_parse_boolean, help="Controls signature and key checks on repositories", scope=SettingScope.universal, ), ConfigSetting( dest="repository_key_fetch", metavar="BOOL", nargs="?", section="Distribution", default_factory_depends=("distribution", "tools_tree", "tools_tree_distribution"), default_factory=config_default_repository_key_fetch, parse=config_parse_boolean, help="Controls whether distribution GPG keys can be fetched remotely", scope=SettingScope.universal, ), ConfigSetting( dest="repositories", metavar="REPOS", section="Distribution", parse=config_make_list_parser(delimiter=","), match=config_make_list_matcher(parse=str), help="Repositories to use", scope=SettingScope.universal, ), # Output section ConfigSetting( dest="output_format", short="-t", long="--format", name="Format", section="Output", specifier="t", parse=config_make_enum_parser(OutputFormat), match=config_make_enum_matcher(OutputFormat), default=OutputFormat.disk, choices=OutputFormat.choices(), help="Output Format", ), ConfigSetting( dest="manifest_format", metavar="FORMAT", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_enum_parser(ManifestFormat)), help="Manifest Format", ), ConfigSetting( dest="output", short="-o", metavar="NAME", section="Output", specifier="o", parse=config_make_filename_parser( "Output= or --output= requires a filename with no path components. " "Use OutputDirectory= or --output-directory= to configure the output directory." ), default_factory=config_default_output, default_factory_depends=("image_id", "image_version"), help="Output name", ), ConfigSetting( dest="compress_output", metavar="ALG", nargs="?", section="Output", parse=config_parse_compression, default_factory=config_default_compression, default_factory_depends=("distribution", "release", "output_format"), help="Enable whole-output compression (with images or archives)", ), ConfigSetting( dest="compress_level", metavar="LEVEL", section="Output", parse=config_parse_compress_level, default=3, help="Set the compression level to use", ), ConfigSetting( dest="output_dir", short="-O", long="--output-directory", compat_longs=("--output-dir",), metavar="DIR", name="OutputDirectory", section="Output", specifier="O", parse=config_make_path_parser(required=False), paths=("mkosi.output",), help="Output directory", scope=SettingScope.universal, ), ConfigSetting( dest="output_mode", metavar="MODE", section="Output", parse=config_parse_mode, help="Set file system access mode for image", scope=SettingScope.universal, ), ConfigSetting( dest="image_version", match=config_match_version, section="Output", specifier="v", help="Set version for image", paths=("mkosi.version",), path_read_text=True, scope=SettingScope.inherit, ), ConfigSetting( dest="image_id", match=config_make_string_matcher(allow_globs=True), section="Output", specifier="i", help="Set ID for image", scope=SettingScope.inherit, ), ConfigSetting( dest="split_artifacts", nargs="?", section="Output", parse=config_parse_artifact_output_list, default=ArtifactOutput.compat_no(), help="Split artifacts out of the final image", ), ConfigSetting( dest="repart_dirs", long="--repart-directory", compat_longs=("--repart-dir",), metavar="PATH", name="RepartDirectories", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.repart",), help="Directory containing systemd-repart partition definitions", ), ConfigSetting( dest="sector_size", section="Output", parse=config_parse_sector_size, help="Set the disk image sector size", scope=SettingScope.inherit, ), ConfigSetting( dest="overlay", metavar="BOOL", nargs="?", section="Output", parse=config_parse_boolean, help="Only output the additions on top of the given base trees", ), ConfigSetting( dest="seed", metavar="UUID", section="Output", parse=config_parse_uuid, default=uuid.uuid4(), paths=("mkosi.seed",), path_read_text=True, help="Set the seed for systemd-repart", ), ConfigSetting( dest="clean_scripts", long="--clean-script", metavar="PATH", section="Output", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.clean",), recursive_paths=("mkosi.clean.d/*",), help="Clean script to run after cleanup", ), # Content section ConfigSetting( dest="packages", short="-p", long="--package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add an additional package to the OS image", ), ConfigSetting( dest="build_packages", long="--build-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Additional packages needed for build scripts", ), ConfigSetting( dest="volatile_packages", long="--volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Packages to install after executing build scripts", ), ConfigSetting( dest="package_directories", long="--package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.packages",), help="Specify a directory containing extra packages", scope=SettingScope.universal, ), ConfigSetting( dest="volatile_package_directories", long="--volatile-package-directory", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra volatile packages", scope=SettingScope.universal, ), ConfigSetting( dest="with_recommends", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Install recommended packages", ), ConfigSetting( dest="with_docs", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="Install documentation", ), ConfigSetting( dest="base_trees", long="--base-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Use the given tree as base tree (e.g. lower sysext layer)", ), ConfigSetting( dest="skeleton_trees", long="--skeleton-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), paths=("mkosi.skeleton", "mkosi.skeleton.tar"), help="Use a skeleton tree to bootstrap the image before installing anything", ), ConfigSetting( dest="extra_trees", long="--extra-tree", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser()), paths=("mkosi.extra", "mkosi.extra.tar"), help="Copy an extra tree on top of image", ), ConfigSetting( dest="remove_packages", long="--remove-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove package from the image OS image after installation", ), ConfigSetting( dest="remove_files", metavar="GLOB", section="Content", parse=config_make_list_parser(delimiter=","), help="Remove files from built image", ), ConfigSetting( dest="clean_package_metadata", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Remove package manager database and other files", ), ConfigSetting( dest="source_date_epoch", metavar="TIMESTAMP", section="Content", parse=config_parse_source_date_epoch, default_factory=config_default_source_date_epoch, default_factory_depends=("environment",), help="Set the $SOURCE_DATE_EPOCH timestamp", scope=SettingScope.universal, ), ConfigSetting( dest="sync_scripts", long="--sync-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.sync",), recursive_paths=("mkosi.sync.d/*",), help="Sync script to run before starting the build", ), ConfigSetting( dest="prepare_scripts", long="--prepare-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.prepare", "mkosi.prepare.chroot"), recursive_paths=("mkosi.prepare.d/*",), help="Prepare script to run inside the image before it is cached", compat_names=("PrepareScript",), ), ConfigSetting( dest="build_scripts", long="--build-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.build", "mkosi.build.chroot"), recursive_paths=("mkosi.build.d/*",), help="Build script to run inside image", compat_names=("BuildScript",), ), ConfigSetting( dest="postinst_scripts", long="--postinst-script", metavar="PATH", name="PostInstallationScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.postinst", "mkosi.postinst.chroot"), recursive_paths=("mkosi.postinst.d/*",), help="Postinstall script to run inside image", compat_names=("PostInstallationScript",), ), ConfigSetting( dest="finalize_scripts", long="--finalize-script", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.finalize", "mkosi.finalize.chroot"), recursive_paths=("mkosi.finalize.d/*",), help="Postinstall script to run outside image", compat_names=("FinalizeScript",), ), ConfigSetting( dest="postoutput_scripts", long="--postoutput-script", metavar="PATH", name="PostOutputScripts", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.postoutput",), recursive_paths=("mkosi.postoutput.d/*",), help="Output postprocessing script to run outside image", ), ConfigSetting( dest="bootable", metavar="FEATURE", nargs="?", section="Content", parse=config_parse_feature, match=config_match_feature, help="Generate ESP partition with systemd-boot and UKIs for installed kernels", ), ConfigSetting( dest="bootloader", section="Content", parse=config_make_enum_parser(Bootloader), choices=Bootloader.choices(), default=Bootloader.systemd_boot, help="Specify which UEFI bootloader to use", ), ConfigSetting( dest="bios_bootloader", section="Content", parse=config_make_enum_parser(BiosBootloader), choices=BiosBootloader.choices(), default=BiosBootloader.none, help="Specify which BIOS bootloader to use", ), ConfigSetting( dest="shim_bootloader", section="Content", parse=config_make_enum_parser(ShimBootloader), choices=ShimBootloader.choices(), default=ShimBootloader.none, help="Specify whether to use shim", ), ConfigSetting( dest="unified_kernel_images", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to use UKIs with grub/systemd-boot in UEFI mode", ), ConfigSetting( dest="unified_kernel_image_format", section="Content", parse=config_make_filename_parser( "UnifiedKernelImageFormat= or --unified-kernel-image-format= " "requires a filename with no path components." ), # The default value is set in `__init__.py` in `install_uki`. # `None` is used to determine if the roothash and boot count format # should be appended to the filename if they are found. # default= help="Specify the format used for the UKI filename", ), ConfigSetting( dest="unified_kernel_image_profiles", long="--uki-profile", metavar="PATH", section="Content", parse=config_make_list_parser( delimiter=",", parse=make_simple_config_parser(UKI_PROFILE_SETTINGS, UKIProfile), ), recursive_paths=("mkosi.uki-profiles/*.conf",), help="Configuration files to generate UKI profiles", ), ConfigSetting( dest="initrds", long="--initrd", metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), help="Add a user-provided initrd to image", ), ConfigSetting( dest="microcode_host", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=False, help="Only include the host CPU's microcode", ), ConfigSetting( dest="initrd_packages", long="--initrd-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default initrd", ), ConfigSetting( dest="initrd_volatile_packages", long="--initrd-volatile-package", metavar="PACKAGE", section="Content", parse=config_make_list_parser(delimiter=","), help="Packages to install in the initrd that are not cached", ), ConfigSetting( dest="devicetree", section="Content", parse=config_parse_string, help="Devicetree to be used by the booting kernel", ), ConfigSetting( dest="kernel_command_line", metavar="OPTIONS", section="Content", parse=config_make_list_parser(delimiter=" "), help="Set the kernel command line (only bootable images)", ), ConfigSetting( dest="kernel_modules_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Include the specified kernel modules in the image", ), ConfigSetting( dest="kernel_modules_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="Include the currently loaded modules on the host in the image", ), ConfigSetting( dest="kernel_modules_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="Exclude the specified kernel modules from the image", ), ConfigSetting( dest="kernel_modules_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, default=True, help="When building a bootable image, add an extra initrd containing the kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, include the specified kernel modules", ), ConfigSetting( dest="kernel_modules_initrd_include_host", metavar="BOOL", section="Content", parse=config_parse_boolean, help="When building a kernel modules initrd, include the currently loaded modules " "on the host in the image", ), ConfigSetting( dest="kernel_modules_initrd_exclude", metavar="REGEX", section="Content", parse=config_make_list_parser(delimiter=","), help="When building a kernel modules initrd, exclude the specified kernel modules", ), ConfigSetting( dest="locale", section="Content", parse=config_parse_string, help="Set the system locale", ), ConfigSetting( dest="locale_messages", metavar="LOCALE", section="Content", parse=config_parse_string, help="Set the messages locale", ), ConfigSetting( dest="keymap", metavar="KEYMAP", section="Content", parse=config_parse_string, help="Set the system keymap", ), ConfigSetting( dest="timezone", metavar="TIMEZONE", section="Content", parse=config_parse_string, help="Set the system timezone", ), ConfigSetting( dest="hostname", metavar="HOSTNAME", section="Content", parse=config_parse_string, help="Set the system hostname", ), ConfigSetting( dest="root_password", metavar="PASSWORD", section="Content", parse=config_parse_root_password, paths=("mkosi.rootpw",), path_read_text=True, path_secret=True, help="Set the password for root", ), ConfigSetting( dest="root_shell", metavar="SHELL", section="Content", parse=config_parse_string, help="Set the shell for root", ), ConfigSetting( dest="machine_id", metavar="MACHINE_ID", section="Content", parse=config_parse_uuid, paths=("mkosi.machine-id",), path_read_text=True, help="Set the machine ID to use", ), ConfigSetting( dest="autologin", short="-a", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Enable root autologin", ), ConfigSetting( dest="make_initrd", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Make sure the image can be used as an initramfs", ), ConfigSetting( dest="ssh", metavar="BOOL", nargs="?", section="Content", parse=config_parse_boolean, help="Set up SSH access from the host to the final image via 'mkosi ssh'", ), ConfigSetting( dest="selinux_relabel", name="SELinuxRelabel", metavar="FEATURE", section="Content", parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), # Validation section ConfigSetting( dest="secure_boot", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Sign the resulting kernel/initrd image for UEFI SecureBoot", ), ConfigSetting( dest="secure_boot_auto_enroll", metavar="BOOL", section="Validation", parse=config_parse_boolean, default=True, help="Automatically enroll the secureboot signing key on first boot", ), ConfigSetting( dest="secure_boot_key", metavar="KEY", section="Validation", parse=config_parse_key, paths=("mkosi.key",), help="UEFI SecureBoot private key", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the secure boot signing key", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, paths=("mkosi.crt",), help="UEFI SecureBoot certificate in X509 format", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the secure boot signing certificate", scope=SettingScope.universal, ), ConfigSetting( dest="secure_boot_sign_tool", section="Validation", parse=config_make_enum_parser(SecureBootSignTool), default=SecureBootSignTool.auto, choices=SecureBootSignTool.choices(), help="Tool to use for signing PE binaries for secure boot", ), ConfigSetting( dest="verity", section="Validation", metavar="FEATURE", parse=config_parse_feature, help="Configure whether to enforce or disable verity partitions for disk images", ), ConfigSetting( dest="verity_key", metavar="KEY", section="Validation", parse=config_parse_key, paths=("mkosi.key",), help="Private key for signing verity signature", scope=SettingScope.universal, ), ConfigSetting( dest="verity_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the verity signing key", scope=SettingScope.universal, ), ConfigSetting( dest="verity_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, paths=("mkosi.crt",), help="Certificate for signing verity signature in X509 format", scope=SettingScope.universal, ), ConfigSetting( dest="verity_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the verity signing certificate", scope=SettingScope.universal, ), ConfigSetting( dest="sign_expected_pcr", metavar="FEATURE", section="Validation", parse=config_parse_feature, help="Measure the components of the unified kernel image (UKI) and " "embed the PCR signature into the UKI", ), ConfigSetting( dest="sign_expected_pcr_key", metavar="KEY", section="Validation", parse=config_parse_key, paths=("mkosi.key",), help="Private key for signing expected PCR signature", scope=SettingScope.universal, ), ConfigSetting( dest="sign_expected_pcr_key_source", section="Validation", metavar="SOURCE[:ENGINE]", parse=config_parse_key_source, default=KeySource(type=KeySourceType.file), help="The source to use to retrieve the expected PCR signing key", scope=SettingScope.universal, ), ConfigSetting( dest="sign_expected_pcr_certificate", metavar="PATH", section="Validation", parse=config_parse_certificate, paths=("mkosi.crt",), help="Certificate for signing expected PCR signature in X509 format", scope=SettingScope.universal, ), ConfigSetting( dest="sign_expected_pcr_certificate_source", section="Validation", metavar="SOURCE[:PROVIDER]", parse=config_parse_certificate_source, default=CertificateSource(type=CertificateSourceType.file), help="The source to use to retrieve the expected PCR signing certificate", scope=SettingScope.universal, ), ConfigSetting( dest="passphrase", metavar="PATH", section="Validation", parse=config_make_path_parser(required=False, secret=True), paths=("mkosi.passphrase",), help="Path to a file containing the passphrase to use when LUKS encryption is selected", ), ConfigSetting( dest="checksum", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write SHA256SUMS file", ), ConfigSetting( dest="sign", metavar="BOOL", nargs="?", section="Validation", parse=config_parse_boolean, help="Write and sign SHA256SUMS file", ), ConfigSetting( dest="key", section="Validation", help="GPG key to use for signing", ), ConfigSetting( name="OpenPGPTool", dest="openpgp_tool", section="Validation", default="gpg", help="OpenPGP implementation to use for signing", ), # Build section ConfigSetting( dest="tools_tree", metavar="PATH", section="Build", parse=config_make_path_parser(constants=("default",)), paths=("mkosi.tools",), help="Look up programs to execute inside the given tree", nargs="?", const="default", scope=SettingScope.universal, ), ConfigSetting( dest="tools_tree_distribution", section="Build", parse=config_make_enum_parser(Distribution), match=config_make_enum_matcher(Distribution), choices=Distribution.choices(), default_factory_depends=("distribution",), default_factory=config_default_tools_tree_distribution, help="Set the distribution to use for the default tools tree", ), ConfigSetting( dest="tools_tree_release", metavar="RELEASE", section="Build", parse=config_parse_string, match=config_make_string_matcher(), default_factory_depends=("tools_tree_distribution",), default_factory=lambda ns: d.default_release() if (d := ns.tools_tree_distribution) else None, help="Set the release to use for the default tools tree", ), ConfigSetting( dest="tools_tree_mirror", metavar="MIRROR", section="Build", default_factory_depends=("distribution", "mirror", "tools_tree_distribution"), default_factory=( lambda ns: ns.mirror if ns.mirror and ns.distribution == ns.tools_tree_distribution else None ), help="Set the mirror to use for the default tools tree", ), ConfigSetting( dest="tools_tree_repositories", long="--tools-tree-repository", metavar="REPOS", section="Build", parse=config_make_list_parser(delimiter=","), help="Repositories to use for the default tools tree", ), ConfigSetting( dest="tools_tree_sandbox_trees", long="--tools-tree-sandbox-tree", compat_names=("ToolsTreePackageManagerTrees",), compat_longs=("--tools-tree-package-manager-tree",), metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Sandbox trees for the default tools tree", ), ConfigSetting( dest="tools_tree_packages", long="--tools-tree-package", metavar="PACKAGE", section="Build", parse=config_make_list_parser(delimiter=","), help="Add additional packages to the default tools tree", ), ConfigSetting( dest="tools_tree_package_directories", long="--tools-tree-package-directory", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), help="Specify a directory containing extra tools tree packages", ), ConfigSetting( dest="tools_tree_certificates", metavar="BOOL", section="Build", parse=config_parse_boolean, help="Use certificates from the tools tree", default=True, scope=SettingScope.universal, ), ConfigSetting( dest="extra_search_paths", long="--extra-search-path", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(exclude=["/usr"])), help="List of comma-separated paths to look for programs before looking in PATH", scope=SettingScope.universal, ), ConfigSetting( dest="incremental", short="-i", nargs="?", section="Build", parse=config_make_enum_parser_with_boolean(Incremental, yes=Incremental.yes, no=Incremental.no), default=Incremental.no, help="Make use of and generate intermediary cache images", scope=SettingScope.universal, choices=Incremental.values(), ), ConfigSetting( dest="cacheonly", long="--cache-only", name="CacheOnly", section="Build", parse=config_make_enum_parser_with_boolean(Cacheonly, yes=Cacheonly.always, no=Cacheonly.auto), default=Cacheonly.auto, help="Only use the package cache when installing packages", choices=Cacheonly.choices(), scope=SettingScope.universal, ), ConfigSetting( dest="sandbox_trees", long="--sandbox-tree", compat_names=("PackageManagerTrees",), compat_longs=("--package-manager-tree",), metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Use a sandbox tree to configure the various tools that mkosi executes", paths=( "mkosi.sandbox", "mkosi.sandbox.tar", "mkosi.pkgmngr", "mkosi.pkgmngr.tar", ), scope=SettingScope.universal, ), ConfigSetting( dest="workspace_dir", long="--workspace-directory", compat_longs=("--workspace-dir",), metavar="DIR", name="WorkspaceDirectory", section="Build", parse=config_make_path_parser(required=False), help="Workspace directory", scope=SettingScope.universal, ), ConfigSetting( dest="cache_dir", long="--cache-directory", compat_longs=("--cache-dir",), metavar="PATH", name="CacheDirectory", section="Build", parse=config_make_path_parser(required=False), paths=("mkosi.cache",), help="Incremental cache directory", scope=SettingScope.universal, ), ConfigSetting( dest="package_cache_dir", long="--package-cache-directory", compat_longs=("--package-cache-dir",), metavar="PATH", name="PackageCacheDirectory", section="Build", parse=config_make_path_parser(required=False), paths=("mkosi.pkgcache",), help="Package cache directory", scope=SettingScope.universal, ), ConfigSetting( dest="build_dir", long="--build-directory", compat_longs=("--build-dir",), metavar="PATH", name="BuildDirectory", section="Build", parse=config_make_path_parser(required=False), paths=("mkosi.builddir",), help="Path to use as persistent build directory", scope=SettingScope.universal, ), ConfigSetting( dest="use_subvolumes", metavar="FEATURE", nargs="?", section="Build", parse=config_parse_feature, help="Use btrfs subvolumes for faster directory operations where possible", scope=SettingScope.universal, ), ConfigSetting( dest="repart_offline", section="Build", parse=config_parse_boolean, help="Build disk images without using loopback devices", default=True, scope=SettingScope.universal, ), ConfigSetting( dest="history", metavar="BOOL", section="Build", parse=config_parse_boolean, help="Whether mkosi can store information about previous builds", ), ConfigSetting( dest="build_sources", metavar="PATH", section="Build", parse=config_make_list_parser( delimiter=",", parse=make_tree_parser( absolute=False, required=True, directory=True, ), ), match=config_match_build_sources, default_factory=lambda ns: [ConfigTree(ns.directory, None)] if ns.directory else [], help="Path for sources to build", scope=SettingScope.universal, ), ConfigSetting( dest="build_sources_ephemeral", nargs="?", section="Build", parse=config_make_enum_parser_with_boolean( BuildSourcesEphemeral, yes=BuildSourcesEphemeral.yes, no=BuildSourcesEphemeral.no ), default=BuildSourcesEphemeral.no, help="Make build sources ephemeral when running scripts", scope=SettingScope.universal, choices=BuildSourcesEphemeral.values(), ), ConfigSetting( dest="environment", short="-E", metavar="NAME[=VALUE]", section="Build", parse=config_make_dict_parser(delimiter=" ", parse=parse_environment, unescape=True), match=config_match_key_value, help="Set an environment variable when running scripts", ), ConfigSetting( dest="environment_files", long="--env-file", metavar="PATH", section="Build", parse=config_make_list_parser(delimiter=",", parse=make_path_parser()), paths=("mkosi.env",), help="Environment files to set when running scripts", ), ConfigSetting( dest="with_tests", short="-T", long="--without-tests", nargs="?", const="no", section="Build", parse=config_parse_boolean, default=True, help="Do not run tests as part of build scripts, if supported", scope=SettingScope.universal, ), ConfigSetting( dest="with_network", metavar="BOOL", nargs="?", section="Build", parse=config_parse_boolean, help="Run build and postinst scripts with network access (instead of private network)", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_url", section="Build", default_factory=config_default_proxy_url, default_factory_depends=("environment",), metavar="URL", help="Set the proxy to use", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_exclude", section="Build", metavar="HOST", parse=config_make_list_parser(delimiter=","), help="Don't use the configured proxy for the specified host(s)", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_peer_certificate", section="Build", parse=config_make_path_parser(), paths=( "/etc/pki/tls/certs/ca-bundle.crt", "/etc/ssl/certs/ca-certificates.crt", ), help="Set the proxy peer certificate", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_client_certificate", section="Build", parse=config_make_path_parser(secret=True), help="Set the proxy client certificate", scope=SettingScope.universal, ), ConfigSetting( dest="proxy_client_key", section="Build", default_factory=lambda ns: ns.proxy_client_certificate, default_factory_depends=("proxy_client_certificate",), parse=config_make_path_parser(secret=True), help="Set the proxy client key", scope=SettingScope.universal, ), # Host section ConfigSetting( dest="nspawn_settings", name="NSpawnSettings", long="--settings", metavar="PATH", section="Runtime", parse=config_make_path_parser(), paths=("mkosi.nspawn",), help="Add in .nspawn settings file", ), ConfigSetting( dest="ephemeral", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help=( "If specified, the container/VM is run with a temporary snapshot of the output " "image that is removed immediately when the container/VM terminates" ), nargs="?", ), ConfigSetting( dest="credentials", long="--credential", metavar="NAME=VALUE", section="Runtime", parse=config_make_dict_parser(delimiter=" ", parse=parse_key_value, allow_paths=True, unescape=True), help="Pass a systemd credential to a systemd-nspawn container or a virtual machine", paths=("mkosi.credentials",), ), ConfigSetting( dest="kernel_command_line_extra", metavar="OPTIONS", section="Runtime", parse=config_make_list_parser(delimiter=" "), help="Append extra entries to the kernel command line when booting the image", ), ConfigSetting( dest="runtime_trees", long="--runtime-tree", metavar="SOURCE:[TARGET]", section="Runtime", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)), help="Additional mounts to add when booting the image", ), ConfigSetting( dest="runtime_size", metavar="SIZE", section="Runtime", parse=config_parse_bytes, help="Grow disk images to the specified size before booting them", ), ConfigSetting( dest="runtime_scratch", metavar="FEATURE", section="Runtime", parse=config_parse_feature, help="Mount extra scratch space to /var/tmp", ), ConfigSetting( dest="runtime_network", section="Runtime", parse=config_make_enum_parser(Network), choices=Network.choices(), help="Set networking backend to use when booting the image", default=Network.user, ), ConfigSetting( dest="runtime_build_sources", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help="Mount build sources and build directory in /work when booting the image", ), ConfigSetting( dest="runtime_home", metavar="BOOL", section="Runtime", parse=config_parse_boolean, help="Mount current home directory to /root when booting the image", ), ConfigSetting( dest="unit_properties", long="--unit-property", metavar="PROPERTY", section="Runtime", parse=config_make_list_parser(delimiter=" ", unescape=True), help="Set properties on the scopes spawned by systemd-nspawn or systemd-run", ), ConfigSetting( dest="ssh_key", metavar="PATH", section="Runtime", parse=config_make_path_parser(secret=True), paths=("mkosi.key",), help="Private key for use with mkosi ssh in PEM format", ), ConfigSetting( dest="ssh_certificate", metavar="PATH", section="Runtime", parse=config_make_path_parser(), paths=("mkosi.crt",), help="Certificate for use with mkosi ssh in X509 format", ), ConfigSetting( dest="vmm", name="VirtualMachineMonitor", section="Runtime", choices=Vmm.choices(), parse=config_make_enum_parser(Vmm), default=Vmm.qemu, help="Set the virtual machine monitor to use for mkosi vm", ), ConfigSetting( dest="machine", metavar="NAME", section="Runtime", help="Set the machine name to use when booting the image", ), ConfigSetting( dest="forward_journal", metavar="PATH", section="Runtime", parse=config_make_path_parser(required=False), help="Set the path used to store forwarded machine journals", ), ConfigSetting( dest="sysupdate_dir", long="--sysupdate-directory", compat_longs=("--sysupdate-dir",), metavar="PATH", name="SysupdateDirectory", section="Runtime", parse=config_make_path_parser(), paths=("mkosi.sysupdate",), help="Directory containing systemd-sysupdate transfer definitions", ), ConfigSetting( dest="console", metavar="MODE", nargs="?", section="Runtime", parse=config_make_enum_parser(ConsoleMode), help="Configure the virtual machine console mode to use", default=ConsoleMode.native, ), ConfigSetting( dest="cpus", name="CPUs", metavar="CPUS", section="Runtime", parse=config_parse_number, default=1, help="Configure number of CPUs in virtual machine", compat_longs=("--qemu-smp",), compat_names=("QemuSmp",), ), ConfigSetting( dest="ram", name="RAM", metavar="BYTES", section="Runtime", parse=config_parse_bytes, default=parse_bytes("2G"), help="Configure guest's RAM size", compat_longs=("--qemu-mem",), compat_names=("QemuMem",), ), ConfigSetting( dest="kvm", name="KVM", metavar="FEATURE", nargs="?", section="Runtime", parse=config_parse_feature, help="Configure whether to use KVM or not", compat_longs=("--qemu-kvm",), compat_names=("QemuKvm",), ), ConfigSetting( dest="vsock", name="VSock", metavar="FEATURE", nargs="?", section="Runtime", parse=config_parse_feature, help="Configure whether to use vsock or not", compat_longs=("--qemu-vsock",), compat_names=("QemuVsock",), ), ConfigSetting( dest="vsock_cid", name="VSockCID", long="--vsock-cid", metavar="NUMBER|auto|hash", section="Runtime", parse=config_parse_vsock_cid, default=VsockCID.auto, help="Specify the vsock connection ID to use", compat_longs=("--qemu-vsock-cid",), compat_names=("QemuVsockConnectionId",), ), ConfigSetting( dest="tpm", name="TPM", metavar="FEATURE", nargs="?", section="Runtime", parse=config_parse_feature, help="Configure whether to use a virtual tpm or not", compat_longs=("--qemu-swtpm",), compat_names=("QemuSwtpm",), ), ConfigSetting( dest="cdrom", name="CDROM", metavar="BOOLEAN", nargs="?", section="Runtime", parse=config_parse_boolean, help="Attach the image as a CD-ROM to the virtual machine", compat_longs=("--qemu-cdrom",), compat_names=("QemuCdrom",), ), ConfigSetting( dest="removable", metavar="BOOLEAN", nargs="?", section="Runtime", parse=config_parse_boolean, help="Attach the image as a removable drive to the virtual machine", compat_longs=("--qemu-removable",), compat_names=("QemuRemovable",), ), ConfigSetting( dest="firmware", section="Runtime", parse=config_make_enum_parser(Firmware), default=Firmware.auto, help="Select the virtual machine firmware to use", choices=Firmware.choices(), compat_longs=("--qemu-firmware",), compat_names=("QemuFirmware",), ), ConfigSetting( dest="firmware_variables", metavar="PATH", section="Runtime", parse=config_make_path_parser(constants=("custom", "microsoft", "microsoft-mok")), help="Set the path to the firmware variables file to use", compat_longs=("--qemu-firmware-variables",), compat_names=("QemuFirmwareVariables",), ), ConfigSetting( dest="linux", metavar="PATH", section="Runtime", parse=config_make_path_parser(), help="Specify the kernel to use for direct kernel boot", compat_longs=("--qemu-kernel",), compat_names=("QemuKernel",), ), ConfigSetting( dest="drives", long="--drive", metavar="DRIVE", section="Runtime", parse=config_make_list_parser(delimiter=" ", parse=parse_drive), help="Specify drive that mkosi should create and pass to the virtual machine", compat_longs=("--qemu-drive",), compat_names=("QemuDrives",), ), ConfigSetting( dest="qemu_args", metavar="ARGS", section="Runtime", parse=config_make_list_parser(delimiter=" ", unescape=True), # Suppress the command line option because it's already possible to pass qemu args as normal # arguments. help=argparse.SUPPRESS, ), ConfigSetting( dest="register", metavar="BOOL", section="Runtime", parse=config_parse_feature, default=ConfigFeature.auto, help="Register booted vm/container with systemd-machined", ), ] SETTINGS_LOOKUP_BY_NAME = {name: s for s in SETTINGS for name in [s.name, *s.compat_names]} SETTINGS_LOOKUP_BY_DEST = {s.dest: s for s in SETTINGS} SETTINGS_LOOKUP_BY_SPECIFIER = {s.specifier: s for s in SETTINGS if s.specifier} MATCHES = ( Match( name="PathExists", match=match_path_exists, ), Match( name="SystemdVersion", match=match_systemd_version, ), Match( name="HostArchitecture", match=match_host_architecture, ), ) MATCH_LOOKUP = {m.name: m for m in MATCHES} SPECIFIERS = ( Specifier( char="C", callback=lambda ns, config: os.fspath(config.resolve().parent), ), Specifier( char="P", callback=lambda ns, config: os.fspath(Path.cwd()), ), Specifier( char="D", callback=lambda ns, config: os.fspath(ns.directory.resolve()), ), Specifier( char="F", callback=lambda ns, config: ns.distribution.filesystem(), depends=("distribution",), ), Specifier( char="I", callback=lambda ns, config: ns.image or "", ), ) SPECIFIERS_LOOKUP_BY_CHAR = {s.char: s for s in SPECIFIERS} # This regular expression can be used to split "AutoBump" -> ["Auto", "Bump"] # and "NSpawnSettings" -> ["NSpawn", "Settings"] # The first part (?<=[a-z]) is a positive look behind for a lower case letter # and (?=[A-Z]) is a lookahead assertion matching an upper case letter but not # consuming it FALLBACK_NAME_TO_DEST_SPLITTER = re.compile("(?<=[a-z])(?=[A-Z])") def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: parser = argparse.ArgumentParser( prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces usage="\n " + textwrap.dedent("""\ mkosi [options…] {b}summary{e} mkosi [options…] {b}cat-config{e} mkosi [options…] {b}build{e} [command line…] mkosi [options…] {b}shell{e} [command line…] mkosi [options…] {b}boot{e} [nspawn settings…] mkosi [options…] {b}vm{e} [vmm parameters…] mkosi [options…] {b}ssh{e} [command line…] mkosi [options…] {b}journalctl{e} [command line…] mkosi [options…] {b}coredumpctl{e} [command line…] mkosi [options…] {b}sysupdate{e} [command line…] mkosi [options…] {b}sandbox{e} [command line…] mkosi [options…] {b}clean{e} mkosi [options…] {b}serve{e} mkosi [options…] {b}burn{e} [device] mkosi [options…] {b}bump{e} mkosi [options…] {b}genkey{e} mkosi [options…] {b}documentation{e} [manual] mkosi [options…] {b}completion{e} [shell] mkosi [options…] {b}dependencies{e} mkosi [options…] {b}help{e} mkosi -h | --help mkosi --version """).format(b=Style.bold, e=Style.reset), add_help=False, allow_abbrev=False, argument_default=argparse.SUPPRESS, formatter_class=CustomHelpFormatter, ) parser.add_argument( "--version", action="version", version="%(prog)s " + __version__, help=argparse.SUPPRESS, ) parser.add_argument( "-f", "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( "-C", "--directory", type=parse_chdir if chdir else str, default=Path.cwd(), help="Change to specified directory before doing anything", metavar="PATH", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn an interactive shell in the image if a chroot command fails", action="store_true", default=False, ) parser.add_argument( "--debug-workspace", help="When an error occurs, the workspace directory will not be deleted", action="store_true", default=False, ) parser.add_argument( "--debug-sandbox", help="Run mkosi-sandbox with strace", action="store_true", default=False, ) parser.add_argument( "--no-pager", action="store_false", dest="pager", default=True, help="Enable paging for long output", ) parser.add_argument( "--genkey-valid-days", metavar="DAYS", help="Number of days keys should be valid when generating keys", default="730", ) parser.add_argument( "--genkey-common-name", metavar="CN", help="Template for the CN when generating keys", default="mkosi of %u", ) parser.add_argument( "-B", "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, ) parser.add_argument( "--doc-format", help="The format to show documentation in", default=DocFormat.auto, type=DocFormat, choices=list(DocFormat), ) parser.add_argument( "--json", help="Show summary as JSON", action="store_true", default=False, ) parser.add_argument( "-w", "--wipe-build-dir", help="Remove the build directory before building the image", action="store_true", default=False, ) # These can be removed once mkosi v15 is available in LTS distros and compatibility with <= v14 # is no longer needed in build infrastructure (e.g.: OBS). parser.add_argument( "--nspawn-keep-unit", nargs=0, action=IgnoreAction, ) parser.add_argument( "--default", action=IgnoreAction, ) parser.add_argument( "--cache", action=IgnoreAction, ) parser.add_argument( "verb", type=Verb, choices=list(Verb), default=Verb.build, help=argparse.SUPPRESS, ) parser.add_argument( "cmdline", nargs=argparse.REMAINDER, help=argparse.SUPPRESS, ) parser.add_argument( "-h", "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) last_section: Optional[str] = None for s in SETTINGS: if s.section != last_section: group = parser.add_argument_group(f"{s.section} configuration options") last_section = s.section for long in [s.long, *s.compat_longs]: opts = [s.short, long] if s.short and long == s.long else [long] group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, nargs=s.nargs, # type: ignore const=s.const, help=s.help if long == s.long else argparse.SUPPRESS, action=ConfigAction, ) return parser def resolve_deps(images: Sequence[Config], include: Sequence[str]) -> list[Config]: graph = {config.image: config.dependencies for config in images} if any((missing := i) not in graph for i in include): die(f"No image found with name {missing}") deps = set() queue = [*include] while queue: if (image := queue.pop(0)) not in deps: deps.add(image) queue.extend(graph[image]) images = [config for config in images if config.image in deps] graph = {config.image: config.dependencies for config in images} try: order = list(graphlib.TopologicalSorter(graph).static_order()) except graphlib.CycleError as e: die(f"Image dependency cycle detected: {' => '.join(e.args[1])}") return sorted(images, key=lambda i: order.index(i.image)) class ConfigAction(argparse.Action): def __call__( self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], option_string: Optional[str] = None, ) -> None: assert option_string is not None if values is None and self.nargs == "?": values = self.const or "yes" s = SETTINGS_LOOKUP_BY_DEST[self.dest] if values is None or isinstance(values, str): values = [values] for v in values: assert isinstance(v, str) or v is None parsed_value = s.parse(v, getattr(namespace, self.dest, None)) if parsed_value is None: setattr(namespace, f"{s.dest}_was_none", True) setattr(namespace, s.dest, parsed_value) class ParseContext: def __init__(self, resources: Path = Path("/")) -> None: self.resources = resources # We keep two namespaces around, one for the settings specified on the CLI and one for # the settings specified in configuration files. This is required to implement both [Match] # support and the behavior where settings specified on the CLI always override settings # specified in configuration files. self.cli = argparse.Namespace() self.config = argparse.Namespace( files=[], ) self.defaults = argparse.Namespace() # Compare inodes instead of paths so we can't get tricked by bind mounts and such. self.includes: set[tuple[int, int]] = set() self.only_sections: tuple[str, ...] = tuple() def expand_specifiers(self, text: str, path: Path) -> str: percent = False result: list[str] = [] for c in text: if percent: percent = False if c == "%": result += "%" elif setting := SETTINGS_LOOKUP_BY_SPECIFIER.get(c): if (v := self.finalize_value(setting)) is None: logging.warning( f"{path.absolute()}: Setting {setting.name} specified by specifier '%{c}' " f"in {text} is not yet set, ignoring" ) continue result += str(v) elif specifier := SPECIFIERS_LOOKUP_BY_CHAR.get(c): specifierns = argparse.Namespace() # Some specifier methods might want to access the image name or directory mkosi was # invoked in so let's make sure those are available. setattr(specifierns, "image", getattr(self.config, "image", None)) setattr(specifierns, "directory", self.cli.directory) for d in specifier.depends: setting = SETTINGS_LOOKUP_BY_DEST[d] if (v := self.finalize_value(setting)) is None: logging.warning( f"{path.absolute()}: Setting {setting.name} which specifier '%{c}' in " f"{text} depends on is not yet set, ignoring" ) break setattr(specifierns, d, v) else: result += specifier.callback(specifierns, path) else: logging.warning(f"{path.absolute()}: Unknown specifier '%{c}' found in {text}, ignoring") elif c == "%": percent = True else: result += c if percent: result += "%" return "".join(result) def parse_new_includes(self) -> None: # Parse any includes that were added after yielding. for p in getattr(self.cli, "include", []) + getattr(self.config, "include", []): for c in BUILTIN_CONFIGS: if p == Path(c): path = self.resources / c break else: path = p st = path.stat() if (st.st_dev, st.st_ino) in self.includes: continue self.includes.add((st.st_dev, st.st_ino)) if any(p == Path(c) for c in BUILTIN_CONFIGS): _, [config] = parse_config( ["--directory", "", "--include", os.fspath(path)], only_sections=self.only_sections, ) make_executable( *config.configure_scripts, *config.clean_scripts, *config.sync_scripts, *config.prepare_scripts, *config.build_scripts, *config.postinst_scripts, *config.finalize_scripts, *config.postoutput_scripts, ) with chdir(path if path.is_dir() else Path.cwd()): self.parse_config_one(path if path.is_file() else Path.cwd(), parse_profiles=p.is_dir()) def finalize_value(self, setting: ConfigSetting[T]) -> Optional[T]: # If a value was specified on the CLI, it always takes priority. If the setting is a collection of # values, we merge the value from the CLI with the value from the configuration, making sure that the # value from the CLI always takes priority. if (v := cast(Optional[T], getattr(self.cli, setting.dest, None))) is not None: cfg_value = getattr(self.config, setting.dest, None) # We either have no corresponding value in the config files # or the values was assigned the empty string on the CLI # and should thus be treated as a reset and override of the value from the config file. if cfg_value is None or getattr(self.cli, f"{setting.dest}_was_none", False): return v # The instance asserts are pushed down to help mypy/pylance narrow the types. # Mypy still cannot properly infer that the merged collections conform to T # so we ignore the return-value error for it. if isinstance(v, list): assert isinstance(cfg_value, type(v)) return cfg_value + v # type: ignore[return-value] elif isinstance(v, dict): assert isinstance(cfg_value, type(v)) return cfg_value | v # type: ignore[return-value] elif isinstance(v, set): assert isinstance(cfg_value, type(v)) return cfg_value | v # type: ignore[return-value] else: return v # If the setting was assigned the empty string on the CLI, we don't use any value configured in the # configuration file. Additionally, if the setting is a collection of values, we won't use any # default value either if the setting is set to the empty string on the command line. if ( not hasattr(self.cli, setting.dest) and hasattr(self.config, setting.dest) and (v := cast(Optional[T], getattr(self.config, setting.dest))) is not None ): return v if (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and isinstance( setting.parse(None, None), (dict, list, set) ): default = setting.parse(None, None) elif hasattr(self.defaults, setting.dest): default = getattr(self.defaults, setting.dest) elif setting.default_factory: # To determine default values, we need the final values of various settings in a namespace # object, but we don't want to copy the final values into the config namespace object just yet so # we create a new namespace object instead. factoryns = argparse.Namespace( **{ d: self.finalize_value(SETTINGS_LOOKUP_BY_DEST[d]) for d in setting.default_factory_depends } ) # Some default factory methods want to access the image name or directory mkosi was invoked in so # let's make sure those are available. setattr(factoryns, "image", getattr(self.config, "image", None)) setattr(factoryns, "directory", self.cli.directory) default = setting.default_factory(factoryns) elif setting.default is not None: default = setting.default else: default = setting.parse(None, None) setattr(self.defaults, setting.dest, default) return default def match_config(self, path: Path) -> bool: condition_triggered: Optional[bool] = None match_triggered: Optional[bool] = None skip = False # If the config file does not exist, we assume it matches so that we look at the other files in the # directory as well (mkosi.conf.d/ and extra files). if not path.exists(): return True for section, k, v in parse_ini(path, only_sections=["Match", "TriggerMatch"]): if not k and not v: if section == "Match" and condition_triggered is False: return False if section == "TriggerMatch": match_triggered = bool(match_triggered) or condition_triggered is not False condition_triggered = None skip = False continue if skip: continue trigger = v.startswith("|") v = v.removeprefix("|") negate = v.startswith("!") v = v.removeprefix("!") v = self.expand_specifiers(v, path) if s := SETTINGS_LOOKUP_BY_NAME.get(k): if not s.match: die(f"{k} cannot be used in [{section}]") if k != s.name: logging.warning( f"{path.absolute()}: Setting {k} is deprecated, please use {s.name} instead." ) # If we encounter a setting that has not been explicitly configured yet, we assign the # default value first so that we can match on default values for settings. if (value := self.finalize_value(s)) is None: result = False else: result = s.match(v, value) elif m := MATCH_LOOKUP.get(k): result = m.match(v) else: die(f"{k} cannot be used in [{section}]") if negate: result = not result if not trigger and not result: if section == "TriggerMatch": skip = True condition_triggered = False continue return False if trigger: condition_triggered = bool(condition_triggered) or result return match_triggered is not False def parse_config_one(self, path: Path, parse_profiles: bool = False, parse_local: bool = False) -> bool: s: Optional[ConfigSetting[object]] # Hint to mypy that we might assign None extras = path.is_dir() assert path.is_absolute() if path.is_dir(): path /= "mkosi.conf" if not self.match_config(path): return False if extras: if parse_local: if ( ((localpath := path.parent / "mkosi.local") / "mkosi.conf").exists() or (localpath := path.parent / "mkosi.local.conf").exists() ): # fmt: skip with chdir(localpath if localpath.is_dir() else Path.cwd()): self.parse_config_one(localpath if localpath.is_file() else Path.cwd()) # Local configuration should override other file based # configuration but not the CLI itself so move the finalized # values to the CLI namespace. for s in SETTINGS: if hasattr(self.config, s.dest): setattr(self.cli, s.dest, self.finalize_value(s)) delattr(self.config, s.dest) for s in SETTINGS: if ( s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None ): continue if self.only_sections and s.section not in self.only_sections: continue for f in s.paths: extra = parse_path( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) if extra.exists(): setattr( self.config, s.dest, s.parse( file_run_or_read(extra).rstrip("\n") if s.path_read_text else f, getattr(self.config, s.dest, None), ), ) for f in s.recursive_paths: recursive_extras = parse_paths_from_directory( f, secret=s.path_secret, required=False, resolve=False, expanduser=False, expandvars=False, ) for e in recursive_extras: if e.exists(): setattr( self.config, s.dest, s.parse(os.fspath(e), getattr(self.config, s.dest, None)), ) if path.exists(): logging.debug(f"Loading configuration file {path}") files = getattr(self.config, "files") files += [path] for section, k, v in parse_ini( path, only_sections=self.only_sections or {s.section for s in SETTINGS} | {"Host"}, ): if not k and not v: continue name = k.removeprefix("@") if name != k: logging.warning( f"{path.absolute()}: The '@' specifier is deprecated, please use {name} instead of " f"{k}" ) if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"{path.absolute()}: Unknown setting {name}") if ( s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None ): die(f"{path.absolute()}: Setting {name} cannot be configured in subimage {image}") if section != s.section: logging.warning( f"{path.absolute()}: Setting {name} should be configured in [{s.section}], not " f"[{section}]." ) if name != s.name: logging.warning( f"{path.absolute()}: Setting {name} is deprecated, please use {s.name} instead." ) v = self.expand_specifiers(v, path) setattr(self.config, s.dest, s.parse(v, getattr(self.config, s.dest, None))) self.parse_new_includes() if extras and (path.parent / "mkosi.conf.d").exists(): for p in sorted((path.parent / "mkosi.conf.d").iterdir()): p = p.absolute() if p.is_dir() or p.suffix == ".conf": with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path.cwd()) if parse_profiles: for profile in self.finalize_value(SETTINGS_LOOKUP_BY_DEST["profiles"]) or []: for p in (Path(profile), Path(f"{profile}.conf")): p = Path.cwd() / "mkosi.profiles" / p if p.exists(): with chdir(p if p.is_dir() else Path.cwd()): self.parse_config_one(p if p.is_file() else Path.cwd()) return True def have_history(args: Args) -> bool: return ( args.verb.needs_build() and args.verb != Verb.build and not args.force and Path(".mkosi-private/history/latest.json").exists() ) def parse_config( argv: Sequence[str] = (), *, resources: Path = Path("/"), only_sections: Sequence[str] = (), ) -> tuple[Args, tuple[Config, ...]]: argv = list(argv) # Make sure the verb command gets explicitly passed. Insert a -- before the positional verb argument # otherwise it might be considered as an argument of a parameter with nargs='?'. For example mkosi -i # summary would be treated as -i=summary. for verb in Verb: try: v_i = argv.index(verb.value) except ValueError: continue # Hack to make sure mkosi -C build works. if argv[v_i - 1] in ("-C", "--directory"): continue if v_i > 0 and argv[v_i - 1] != "--": argv.insert(v_i, "--") break else: argv += ["--", "build"] context = ParseContext(resources) # The "image" field does not directly map to a setting but is required to determine some default values # for settings, so let's set it on the config namespace immediately so it's available. setattr(context.config, "image", None) # First, we parse the command line arguments into a separate namespace. argparser = create_argument_parser() argparser.parse_args(argv, context.cli) args = load_args(context.cli) # If --debug was passed, apply it as soon as possible. if ARG_DEBUG.get(): logging.getLogger().setLevel(logging.DEBUG) # Do the same for help. if args.verb == Verb.help: PagerHelpAction.__call__(None, argparser, context.cli) # type: ignore if not args.verb.needs_config(): return args, () if have_history(args): try: prev = Config.from_json(Path(".mkosi-private/history/latest.json").read_text()) except ValueError: die( "Unable to parse history from .mkosi-private/history/latest.json", hint="Build with -f to generate a new history file from scratch", ) # If we're operating on a previously built image (vm, boot, shell, ...), we're not rebuilding the # image and the configuration of the latest build is available, we load the config that was used to # build the previous image from there instead of parsing configuration files, except for the Host # section settings which we allow changing without requiring a rebuild of the image. for s in SETTINGS: if s.section in ("Include", "Runtime"): continue if hasattr(context.cli, s.dest) and getattr(context.cli, s.dest) != getattr(prev, s.dest): logging.warning( f"Ignoring {s.long} from the CLI. Run with -f to rebuild the image with this setting" ) setattr(context.cli, s.dest, getattr(prev, s.dest)) if hasattr(context.config, s.dest): delattr(context.config, s.dest) context.only_sections = ("Include", "Runtime", "Host") else: context.only_sections = tuple(only_sections) prev = None context.parse_new_includes() # One of the specifiers needs access to the directory, so make sure it is available. setattr(context.config, "directory", args.directory) setattr(context.config, "files", []) # Parse the global configuration unless the user explicitly asked us not to. if args.directory is not None: context.parse_config_one(Path.cwd(), parse_profiles=True, parse_local=True) config = copy.deepcopy(context.config) # After we've finished parsing the configuration, we'll have values in both namespaces (context.cli, # context.config). To be able to parse the values from a single namespace, we merge the final values of # each setting into one namespace. for s in SETTINGS: setattr(config, s.dest, context.finalize_value(s)) if prev: return args, (load_config(config),) images = [] # If Dependencies= was not explicitly specified on the CLI or in the configuration, # we want to default to all subimages. However, if a subimage has a [Match] section # and does not successfully match, we don't want to add it to the default dependencies. # To make this work, we can't use default_factory as it is evaluated too early, so # we check here to see if dependencies were explicitly provided and if not we gather # the list of default dependencies while we parse the subimages. dependencies: Optional[list[str]] = ( None if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") else [] ) if args.directory is not None and Path("mkosi.images").exists(): # For the subimages in mkosi.images/, we want settings that are marked as # "universal" to override whatever settings are specified in the subimage # configuration files. We achieve this by making it appear like these settings # were specified on the CLI by copying them to the CLI namespace. Any settings # that are not marked as "universal" are deleted from the CLI namespace. for s in SETTINGS: if s.scope == SettingScope.universal: setattr(context.cli, s.dest, copy.deepcopy(getattr(config, s.dest))) elif hasattr(context.cli, s.dest): delattr(context.cli, s.dest) setattr( context.cli, "environment", { name: getattr(config, "environment")[name] for name in getattr(config, "pass_environment", {}) if name in getattr(config, "environment", {}) }, ) for p in sorted(Path("mkosi.images").iterdir()): p = p.absolute() if not p.is_dir() and not p.suffix == ".conf": continue name = p.name.removesuffix(".conf") if not name: die(f"{p} is not a valid image name") context.config = argparse.Namespace() setattr(context.config, "image", name) setattr(context.config, "directory", args.directory) setattr(context.config, "files", []) # Settings that are marked as "inherit" are passed down to subimages but can # be overridden, so we copy these to the config namespace so that they'll be # overridden if the setting is explicitly configured by the subimage. for s in SETTINGS: if s.scope == SettingScope.inherit and hasattr(config, s.dest): setattr(context.config, s.dest, copy.deepcopy(getattr(config, s.dest))) # Allow subimage configuration to include everything again. context.includes = set() context.defaults = argparse.Namespace() with chdir(p if p.is_dir() else Path.cwd()): if not context.parse_config_one( p if p.is_file() else Path.cwd(), parse_profiles=p.is_dir(), parse_local=True, ): continue # Consolidate all settings into one namespace again. for s in SETTINGS: setattr(context.config, s.dest, context.finalize_value(s)) images += [context.config] if dependencies is not None: dependencies += [name] if dependencies is not None: setattr(config, "dependencies", dependencies) main = load_config(config) subimages = [load_config(ns) for ns in images] subimages = resolve_deps(subimages, main.dependencies) return args, tuple(subimages + [main]) def finalize_term() -> str: term = os.getenv("TERM", "unknown") if term == "unknown": term = "vt220" if sys.stderr.isatty() else "dumb" return term if sys.stderr.isatty() else "dumb" def load_environment(args: argparse.Namespace) -> dict[str, str]: env = { "SYSTEMD_TMPFILES_FORCE_SUBVOL": "0", "SYSTEMD_ASK_PASSWORD_KEYRING_TIMEOUT_SEC": "infinity", "SYSTEMD_ASK_PASSWORD_KEYRING_TYPE": "session", "TERM": finalize_term(), } if args.image is not None: env["SUBIMAGE"] = args.image if args.image_id is not None: env["IMAGE_ID"] = args.image_id if args.image_version is not None: env["IMAGE_VERSION"] = args.image_version if args.source_date_epoch is not None: env["SOURCE_DATE_EPOCH"] = str(args.source_date_epoch) if args.proxy_url is not None: for e in ("http_proxy", "https_proxy"): env[e] = args.proxy_url env[e.upper()] = args.proxy_url if args.proxy_exclude: env["no_proxy"] = ",".join(args.proxy_exclude) env["NO_PROXY"] = ",".join(args.proxy_exclude) if args.proxy_peer_certificate: env["GIT_PROXY_SSL_CAINFO"] = "/proxy.cacert" if args.proxy_client_certificate: env["GIT_PROXY_SSL_CERT"] = "/proxy.clientcert" if args.proxy_client_key: env["GIT_PROXY_SSL_KEY"] = "/proxy.clientkey" if dnf := os.getenv("MKOSI_DNF"): env["MKOSI_DNF"] = dnf if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = gnupghome env |= dict( parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines() ) env |= args.environment return env def load_args(args: argparse.Namespace) -> Args: if args.cmdline and not args.verb.supports_cmdline(): die(f"Arguments after verb are not supported for {args.verb}.") if args.debug: ARG_DEBUG.set(args.debug) if args.debug_shell: ARG_DEBUG_SHELL.set(args.debug_shell) if args.debug_sandbox: ARG_DEBUG_SANDBOX.set(args.debug_sandbox) return Args.from_namespace(args) def load_config(config: argparse.Namespace) -> Config: # Make sure we don't modify the input namespace. config = copy.deepcopy(config) if ( config.build_dir and config.build_dir.name != f"{config.distribution}~{config.release}~{config.architecture}" ): config.build_dir /= f"{config.distribution}~{config.release}~{config.architecture}" if config.sign: config.checksum = True config.environment = load_environment(config) if config.overlay and not config.base_trees: die("--overlay can only be used with --base-tree") if config.incremental and not config.cache_dir: die("A cache directory must be configured in order to use --incremental") # For unprivileged builds we need the userxattr OverlayFS mount option, which is only available # in Linux v5.11 and later. if ( (config.build_scripts or config.base_trees) and GenericVersion(platform.release()) < GenericVersion("5.11") and os.geteuid() != 0 ): die("This unprivileged build configuration requires at least Linux v5.11") return Config.from_namespace(config) def yes_no(b: bool) -> str: return "yes" if b else "no" def none_to_na(s: Optional[object]) -> str: return "n/a" if s is None else str(s) def none_to_random(s: Optional[object]) -> str: return "random" if s is None else str(s) def none_to_none(s: Optional[object]) -> str: return "none" if s is None else str(s) def none_to_default(s: Optional[object]) -> str: return "default" if s is None else str(s) def line_join_list(array: Iterable[object]) -> str: return "\n ".join(str(item) for item in array) if array else "none" def format_bytes(num_bytes: int) -> str: if num_bytes >= 1024**3: return f"{num_bytes / 1024**3:0.1f}G" if num_bytes >= 1024**2: return f"{num_bytes / 1024**2:0.1f}M" if num_bytes >= 1024: return f"{num_bytes / 1024:0.1f}K" return f"{num_bytes}B" def format_bytes_or_none(num_bytes: Optional[int]) -> str: return format_bytes(num_bytes) if num_bytes is not None else "none" def format_octal(oct_value: int) -> str: return f"{oct_value:>04o}" def format_octal_or_default(oct_value: Optional[int]) -> str: return format_octal(oct_value) if oct_value is not None else "default" def bold(s: Any) -> str: return f"{Style.bold}{s}{Style.reset}" def cat_config(images: Sequence[Config]) -> str: c = io.StringIO() for n, config in enumerate(images): if n > 0: print(file=c) print(bold(f"### IMAGE: {config.image or 'default'}"), file=c) for path in config.files: # Display the paths as relative to ., if underneath. if path.is_relative_to(Path.cwd()): path = path.relative_to(Path.cwd()) print(f"{Style.blue}# {path}{Style.reset}", file=c) print(path.read_text(), file=c) return c.getvalue() def summary(config: Config) -> str: maniformats = (" ".join(i.name for i in config.manifest_format)) or "(none)" env = [f"{k}={v}" for k, v in config.environment.items()] summary = f"""\ {bold(f"IMAGE: {config.image or 'default'}")} """ if not config.image: summary += f"""\ {bold("CONFIG")}: Profiles: {line_join_list(config.profiles)} Dependencies: {line_join_list(config.dependencies)} Minimum Version: {none_to_none(config.minimum_version)} Configure Scripts: {line_join_list(config.configure_scripts)} Pass Environment: {line_join_list(config.pass_environment)} {bold("DISTRIBUTION")}: Distribution: {bold(config.distribution)} Release: {bold(none_to_na(config.release))} Architecture: {config.architecture} Mirror: {none_to_default(config.mirror)} Local Mirror (build): {none_to_none(config.local_mirror)} Repo Signature/Key check: {yes_no(config.repository_key_check)} Fetch Repository Keys: {yes_no(config.repository_key_fetch)} Repositories: {line_join_list(config.repositories)} """ summary += f"""\ {bold("OUTPUT")}: Output Format: {config.output_format} Manifest Formats: {maniformats} Output: {bold(config.output_with_compression)} Compression: {config.compress_output} Compression Level: {config.compress_level} Output Directory: {config.output_dir_or_cwd()} Output Mode: {format_octal_or_default(config.output_mode)} Image ID: {config.image_id} Image Version: {config.image_version} Split Artifacts: {line_join_list(config.split_artifacts)} Repart Directories: {line_join_list(config.repart_dirs)} Sector Size: {none_to_default(config.sector_size)} Overlay: {yes_no(config.overlay)} Seed: {none_to_random(config.seed)} Clean Scripts: {line_join_list(config.clean_scripts)} {bold("CONTENT")}: Packages: {line_join_list(config.packages)} Build Packages: {line_join_list(config.build_packages)} Volatile Packages: {line_join_list(config.volatile_packages)} Package Directories: {line_join_list(config.package_directories)} Volatile Package Directories: {line_join_list(config.volatile_package_directories)} With Documentation: {yes_no(config.with_docs)} Base Trees: {line_join_list(config.base_trees)} Skeleton Trees: {line_join_list(config.skeleton_trees)} Extra Trees: {line_join_list(config.extra_trees)} Remove Packages: {line_join_list(config.remove_packages)} Remove Files: {line_join_list(config.remove_files)} Clean Package Manager Metadata: {config.clean_package_metadata} Source Date Epoch: {none_to_none(config.source_date_epoch)} Sync Scripts: {line_join_list(config.sync_scripts)} Prepare Scripts: {line_join_list(config.prepare_scripts)} Build Scripts: {line_join_list(config.build_scripts)} Postinstall Scripts: {line_join_list(config.postinst_scripts)} Finalize Scripts: {line_join_list(config.finalize_scripts)} Postoutput Scripts: {line_join_list(config.postoutput_scripts)} Bootable: {config.bootable} Bootloader: {config.bootloader} BIOS Bootloader: {config.bios_bootloader} Shim Bootloader: {config.shim_bootloader} Unified Kernel Images: {config.unified_kernel_images} Unified Kernel Image Format: {config.unified_kernel_image_format} Unified Kernel Image Profiles: {line_join_list(config.unified_kernel_image_profiles)} Initrds: {line_join_list(config.initrds)} Initrd Packages: {line_join_list(config.initrd_packages)} Initrd Volatile Packages: {line_join_list(config.initrd_volatile_packages)} Devicetree: {none_to_none(config.devicetree)} Kernel Command Line: {line_join_list(config.kernel_command_line)} Kernel Modules Include: {line_join_list(config.kernel_modules_include)} Kernel Modules Exclude: {line_join_list(config.kernel_modules_exclude)} Kernel Modules Include Host: {yes_no(config.kernel_modules_include_host)} Kernel Modules Initrd: {yes_no(config.kernel_modules_initrd)} Kernel Modules Initrd Include: {line_join_list(config.kernel_modules_initrd_include)} Kernel Modules Initrd Exclude: {line_join_list(config.kernel_modules_initrd_exclude)} Kernel Modules Initrd Include Host: {yes_no(config.kernel_modules_initrd_include_host)} Locale: {none_to_default(config.locale)} Locale Messages: {none_to_default(config.locale_messages)} Keymap: {none_to_default(config.keymap)} Timezone: {none_to_default(config.timezone)} Hostname: {none_to_default(config.hostname)} Root Password: {("(set)" if config.root_password else "(default)")} Root Shell: {none_to_default(config.root_shell)} Machine ID: {none_to_none(config.machine_id)} Autologin: {yes_no(config.autologin)} Make Initrd: {yes_no(config.make_initrd)} SSH: {yes_no(config.ssh)} SELinux Relabel: {config.selinux_relabel} """ if config.output_format.is_extension_or_portable_image() or config.output_format in ( OutputFormat.disk, OutputFormat.uki, OutputFormat.esp, ): summary += f"""\ {bold("VALIDATION")}: UEFI SecureBoot: {yes_no(config.secure_boot)} UEFI SecureBoot AutoEnroll: {yes_no(config.secure_boot_auto_enroll)} SecureBoot Signing Key: {none_to_none(config.secure_boot_key)} SecureBoot Signing Key Source: {config.secure_boot_key_source} SecureBoot Certificate: {none_to_none(config.secure_boot_certificate)} SecureBoot Certificate Source: {config.secure_boot_certificate_source} SecureBoot Sign Tool: {config.secure_boot_sign_tool} Verity: {config.verity} Verity Signing Key: {none_to_none(config.verity_key)} Verity Signing Key Source: {config.verity_key_source} Verity Certificate: {none_to_none(config.verity_certificate)} Verity Certificate Source: {config.verity_certificate_source} Sign Expected PCRs: {config.sign_expected_pcr} Expected PCRs Signing Key: {none_to_none(config.sign_expected_pcr_key)} Expected PCRs Key Source: {config.sign_expected_pcr_key_source} Expected PCRs Certificate: {none_to_none(config.sign_expected_pcr_certificate)} Expected PCRs Certificate Source: {config.sign_expected_pcr_certificate_source} Passphrase: {none_to_none(config.passphrase)} Checksum: {yes_no(config.checksum)} Sign: {yes_no(config.sign)} OpenPGP Tool: {config.openpgp_tool} GPG Key: ({"default" if config.key is None else config.key}) """ if not config.image: summary += f"""\ {bold("BUILD CONFIGURATION")}: Tools Tree: {config.tools_tree} Tools Tree Distribution: {none_to_none(config.tools_tree_distribution)} Tools Tree Release: {none_to_none(config.tools_tree_release)} Tools Tree Mirror: {none_to_default(config.tools_tree_mirror)} Tools Tree Repositories: {line_join_list(config.tools_tree_repositories)} Tools Tree Sandbox Trees: {line_join_list(config.tools_tree_sandbox_trees)} Tools Tree Packages: {line_join_list(config.tools_tree_packages)} Tools Tree Package Directories: {line_join_list(config.tools_tree_package_directories)} Tools Tree Certificates: {yes_no(config.tools_tree_certificates)} Extra Search Paths: {line_join_list(config.extra_search_paths)} Incremental: {config.incremental} Use Only Package Cache: {config.cacheonly} Sandbox Trees: {line_join_list(config.sandbox_trees)} Workspace Directory: {config.workspace_dir_or_default()} Cache Directory: {none_to_none(config.cache_dir)} Package Cache Directory: {none_to_default(config.package_cache_dir)} Build Directory: {none_to_none(config.build_dir)} Use Subvolumes: {config.use_subvolumes} Repart Offline: {yes_no(config.repart_offline)} Save History: {yes_no(config.history)} Build Sources: {line_join_list(config.build_sources)} Build Sources Ephemeral: {config.build_sources_ephemeral} Script Environment: {line_join_list(env)} Environment Files: {line_join_list(config.environment_files)} Run Tests in Build Scripts: {yes_no(config.with_tests)} Scripts With Network: {yes_no(config.with_network)} Proxy URL: {none_to_none(config.proxy_url)} Proxy Peer Certificate: {none_to_none(config.proxy_peer_certificate)} Proxy Client Certificate: {none_to_none(config.proxy_client_certificate)} Proxy Client Key: {none_to_none(config.proxy_client_key)} {bold("HOST CONFIGURATION")}: NSpawn Settings: {none_to_none(config.nspawn_settings)} Ephemeral: {config.ephemeral} Credentials: {line_join_list(config.credentials.keys())} Extra Kernel Command Line: {line_join_list(config.kernel_command_line_extra)} Runtime Trees: {line_join_list(config.runtime_trees)} Runtime Size: {format_bytes_or_none(config.runtime_size)} Runtime Scratch: {config.runtime_scratch} Runtime Network: {config.runtime_network} Runtime Build Sources: {config.runtime_build_sources} Runtime Home or Working Directory: {yes_no(config.runtime_home)} Unit Properties: {line_join_list(config.unit_properties)} SSH Signing Key: {none_to_none(config.ssh_key)} SSH Certificate: {none_to_none(config.ssh_certificate)} Machine: {config.machine_or_name()} Forward Journal: {none_to_none(config.forward_journal)} Register guest with machined: {config.register} Virtual Machine Monitor: {config.vmm} Console: {config.console} CPU Cores: {config.cpus} RAM: {format_bytes(config.ram)} KVM: {config.kvm} VSock: {config.vsock} VSock Connection ID: {VsockCID.format(config.vsock_cid)} TPM: {config.tpm} CD-ROM: {yes_no(config.cdrom)} Firmware: {config.firmware} Firmware Variables: {none_to_none(config.firmware_variables)} Linux: {none_to_none(config.linux)} QEMU Extra Arguments: {line_join_list(config.qemu_args)} """ return summary class JsonEncoder(json.JSONEncoder): def default(self, o: Any) -> Any: if isinstance(o, StrEnum): return str(o) elif isinstance(o, GenericVersion): return str(o) elif isinstance(o, os.PathLike): return os.fspath(o) elif isinstance(o, uuid.UUID): return str(o) elif isinstance(o, (Args, Config)): return o.to_dict() return super().default(o) E = TypeVar("E", bound=StrEnum) def json_type_transformer(refcls: Union[type[Args], type[Config]]) -> Callable[[str, Any], Any]: fields_by_name = {field.name: field for field in dataclasses.fields(refcls)} def path_transformer(path: str, fieldtype: type[Path]) -> Path: return Path(path) def optional_path_transformer(path: Optional[str], fieldtype: type[Optional[Path]]) -> Optional[Path]: return Path(path) if path is not None else None def path_list_transformer(pathlist: list[str], fieldtype: type[list[Path]]) -> list[Path]: return [Path(p) for p in pathlist] def uuid_transformer(uuidstr: str, fieldtype: type[uuid.UUID]) -> uuid.UUID: return uuid.UUID(uuidstr) def optional_uuid_transformer( uuidstr: Optional[str], fieldtype: type[Optional[uuid.UUID]] ) -> Optional[uuid.UUID]: return uuid.UUID(uuidstr) if uuidstr is not None else None def root_password_transformer( rootpw: Optional[list[Union[str, bool]]], fieldtype: type[Optional[tuple[str, bool]]] ) -> Optional[tuple[str, bool]]: if rootpw is None: return None return (cast(str, rootpw[0]), cast(bool, rootpw[1])) def config_tree_transformer( trees: list[dict[str, Any]], fieldtype: type[ConfigTree] ) -> list[ConfigTree]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in trees: assert "Source" in d assert "Target" in d ret.append( ConfigTree( source=Path(d["Source"]), target=Path(d["Target"]) if d["Target"] is not None else None, ) ) return ret def enum_transformer(enumval: str, fieldtype: type[E]) -> E: return fieldtype(enumval) def optional_enum_transformer(enumval: Optional[str], fieldtype: type[Optional[E]]) -> Optional[E]: return typing.get_args(fieldtype)[0](enumval) if enumval is not None else None def enum_list_transformer(enumlist: list[str], fieldtype: type[list[E]]) -> list[E]: enumtype = fieldtype.__args__[0] # type: ignore return [enumtype[e] for e in enumlist] def config_drive_transformer(drives: list[dict[str, Any]], fieldtype: type[Drive]) -> list[Drive]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in drives: assert "Id" in d assert "Size" in d ret.append( Drive( id=d["Id"], size=d["Size"] if isinstance(d["Size"], int) else parse_bytes(d["Size"]), directory=Path(d["Directory"]) if d.get("Directory") else None, options=d.get("Options"), file_id=d.get("FileId", d["Id"]), ) ) return ret def generic_version_transformer( version: Optional[str], fieldtype: type[Optional[GenericVersion]], ) -> Optional[GenericVersion]: return GenericVersion(version) if version is not None else None def certificate_source_transformer( certificate_source: dict[str, Any], fieldtype: type[CertificateSource] ) -> CertificateSource: assert "Type" in certificate_source return CertificateSource( type=CertificateSourceType(certificate_source["Type"]), source=certificate_source.get("Source", ""), ) def key_source_transformer(keysource: dict[str, Any], fieldtype: type[KeySource]) -> KeySource: assert "Type" in keysource return KeySource(type=KeySourceType(keysource["Type"]), source=keysource.get("Source", "")) def uki_profile_transformer( profiles: list[dict[str, Any]], fieldtype: type[UKIProfile], ) -> list[UKIProfile]: return [UKIProfile(profile=profile["Profile"], cmdline=profile["Cmdline"]) for profile in profiles] # The type of this should be # dict[ # type, # Callable[a stringy JSON object (str, null, list or dict of str), type of the key], type of the key # ] # though this seems impossible to express, since e.g. mypy will make this a # builtins.dict[builtins.object, builtins.function] # whereas pyright gives the type of the dict keys as the proper union of # all functions in the dict. We therefore squash all the types here to Any # to shut up the type checkers and rely on the tests. transformers: dict[Any, Callable[[Any, Any], Any]] = { Path: path_transformer, Optional[Path]: optional_path_transformer, list[Path]: path_list_transformer, uuid.UUID: uuid_transformer, Optional[uuid.UUID]: optional_uuid_transformer, Optional[tuple[str, bool]]: root_password_transformer, list[ConfigTree]: config_tree_transformer, Architecture: enum_transformer, BiosBootloader: enum_transformer, ShimBootloader: enum_transformer, Bootloader: enum_transformer, Compression: enum_transformer, ConfigFeature: enum_transformer, Distribution: enum_transformer, OutputFormat: enum_transformer, Firmware: enum_transformer, SecureBootSignTool: enum_transformer, Incremental: enum_transformer, BuildSourcesEphemeral: enum_transformer, Optional[Distribution]: optional_enum_transformer, list[ManifestFormat]: enum_list_transformer, Verb: enum_transformer, DocFormat: enum_transformer, list[Drive]: config_drive_transformer, GenericVersion: generic_version_transformer, Cacheonly: enum_transformer, Network: enum_transformer, KeySource: key_source_transformer, Vmm: enum_transformer, list[UKIProfile]: uki_profile_transformer, list[ArtifactOutput]: enum_list_transformer, CertificateSource: certificate_source_transformer, ConsoleMode: enum_transformer, } def json_transformer(key: str, val: Any) -> Any: fieldtype: Optional[dataclasses.Field[Any]] = fields_by_name.get(key) # It is unlikely that the type of a field will be None only, so let's not bother with a different # sentinel value if fieldtype is None: raise ValueError(f"{refcls} has no field {key}") transformer = transformers.get(fieldtype.type) if transformer is not None: try: return transformer(val, fieldtype.type) except (ValueError, IndexError, AssertionError) as e: raise ValueError( f"Unable to parse {val!r} for attribute {key!r} for {refcls.__name__}" ) from e return val return json_transformer def want_selinux_relabel( config: Config, root: Path, fatal: bool = True ) -> Optional[tuple[Path, str, Path, Path]]: if config.selinux_relabel == ConfigFeature.disabled: return None selinux = root / "etc/selinux/config" if not selinux.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None policy = run( ["sh", "-c", f". {workdir(selinux)} && echo $SELINUXTYPE"], sandbox=config.sandbox(options=["--ro-bind", selinux, workdir(selinux)]), stdout=subprocess.PIPE, ).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") return None if not (setfiles := config.find_binary("setfiles")): if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but setfiles is not installed") return None fc = root / "etc/selinux" / policy / "contexts/files/file_contexts" if not fc.exists(): if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux file contexts not found in {fc}") return None binpolicydir = root / "etc/selinux" / policy / "policy" # The policy file is named policy.XX where XX is the policy version that indicates what features are # available. We check for string.digits instead of using isdecimal() as the latter checks for more than # just digits. policies = [ p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:]) ] if not policies: if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux binary policy not found in {binpolicydir}") return None binpolicy = sorted(policies, key=lambda p: GenericVersion(p.name), reverse=True)[0] return setfiles, policy, fc, binpolicy def systemd_tool_version(*tool: PathString, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: return GenericVersion( run( [*tool, "--version"], stdout=subprocess.PIPE, sandbox=sandbox(), ) .stdout.split()[2] .strip("()") .removeprefix("v") ) mkosi-25.3/mkosi/context.py000066400000000000000000000037451474711424400157660ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from typing import Optional from mkosi.config import Args, Config from mkosi.util import PathString class Context: """State related properties.""" def __init__( self, args: Args, config: Config, *, workspace: Path, resources: Path, keyring_dir: Path, metadata_dir: Path, package_dir: Optional[Path] = None, ) -> None: self.args = args self.config = config self.workspace = workspace self.resources = resources self.keyring_dir = keyring_dir self.metadata_dir = metadata_dir self.package_dir = package_dir or (self.workspace / "packages") self.package_dir.mkdir(exist_ok=True) self.staging.mkdir() self.sandbox_tree.mkdir() self.repository.mkdir() self.artifacts.mkdir() self.install_dir.mkdir() @property def root(self) -> Path: return self.workspace / "root" @property def staging(self) -> Path: return self.workspace / "staging" @property def sandbox_tree(self) -> Path: return self.workspace / "sandbox" @property def repository(self) -> Path: return self.workspace / "repository" @property def artifacts(self) -> Path: return self.workspace / "artifacts" @property def install_dir(self) -> Path: return self.workspace / "dest" def sandbox( self, *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return self.config.sandbox( network=network, devices=devices, scripts=scripts, overlay=self.sandbox_tree, options=options, ) mkosi-25.3/mkosi/curl.py000066400000000000000000000021401474711424400152330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path from mkosi.config import Config from mkosi.mounts import finalize_certificate_mounts from mkosi.run import run, workdir def curl(config: Config, url: str, output_dir: Path) -> None: run( [ "curl", "--location", "--output-dir", workdir(output_dir), "--remote-name", "--no-progress-meter", "--fail", *(["--proxy", config.proxy_url] if config.proxy_url else []), *(["--noproxy", ",".join(config.proxy_exclude)] if config.proxy_exclude else []), *(["--proxy-capath", "/proxy.cacert"] if config.proxy_peer_certificate else []), *(["--proxy-cert", "/proxy.clientcert"] if config.proxy_client_certificate else []), *(["--proxy-key", "/proxy.clientkey"] if config.proxy_client_key else []), url, ], sandbox=config.sandbox( network=True, options=["--bind", output_dir, workdir(output_dir), *finalize_certificate_mounts(config)], ), ) # fmt: skip mkosi-25.3/mkosi/distributions/000077500000000000000000000000001474711424400166215ustar00rootroot00000000000000mkosi-25.3/mkosi/distributions/__init__.py000066400000000000000000000142651474711424400207420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import enum import importlib import urllib.parse from collections.abc import Sequence from pathlib import Path from typing import TYPE_CHECKING, Optional, cast from mkosi.util import StrEnum, read_env_file if TYPE_CHECKING: from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.installer import PackageManager class PackageType(StrEnum): none = enum.auto() rpm = enum.auto() deb = enum.auto() pkg = enum.auto() class DistributionInstaller: @classmethod def pretty_name(cls) -> str: raise NotImplementedError @classmethod def package_manager(cls, config: "Config") -> type["PackageManager"]: raise NotImplementedError @classmethod def keyring(cls, context: "Context") -> None: pass @classmethod def setup(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install(cls, context: "Context") -> None: raise NotImplementedError @classmethod def install_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def remove_packages(cls, context: "Context", packages: Sequence[str]) -> None: raise NotImplementedError @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def architecture(cls, arch: "Architecture") -> str: raise NotImplementedError @classmethod def package_type(cls) -> PackageType: return PackageType.none @classmethod def default_release(cls) -> str: return "" @classmethod def default_tools_tree_distribution(cls) -> Optional["Distribution"]: return None @classmethod def grub_prefix(cls) -> str: return "grub" class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. fedora = enum.auto() debian = enum.auto() kali = enum.auto() ubuntu = enum.auto() arch = enum.auto() opensuse = enum.auto() mageia = enum.auto() centos = enum.auto() rhel = enum.auto() rhel_ubi = enum.auto() openmandriva = enum.auto() rocky = enum.auto() alma = enum.auto() azure = enum.auto() custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( Distribution.centos, Distribution.alma, Distribution.rocky, Distribution.rhel, Distribution.rhel_ubi, ) def is_apt_distribution(self) -> bool: return self in (Distribution.debian, Distribution.ubuntu, Distribution.kali) def is_rpm_distribution(self) -> bool: return self in ( Distribution.azure, Distribution.fedora, Distribution.opensuse, Distribution.mageia, Distribution.centos, Distribution.rhel, Distribution.rhel_ubi, Distribution.openmandriva, Distribution.rocky, Distribution.alma, ) def pretty_name(self) -> str: return self.installer().pretty_name() def package_manager(self, config: "Config") -> type["PackageManager"]: return self.installer().package_manager(config) def keyring(self, context: "Context") -> None: return self.installer().keyring(context) def setup(self, context: "Context") -> None: return self.installer().setup(context) def install(self, context: "Context") -> None: return self.installer().install(context) def install_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().install_packages(context, packages) def remove_packages(self, context: "Context", packages: Sequence[str]) -> None: return self.installer().remove_packages(context, packages) def filesystem(self) -> str: return self.installer().filesystem() def architecture(self, arch: "Architecture") -> str: return self.installer().architecture(arch) def package_type(self) -> PackageType: return self.installer().package_type() def default_release(self) -> str: return self.installer().default_release() def default_tools_tree_distribution(self) -> "Distribution": return self.installer().default_tools_tree_distribution() or self def grub_prefix(self) -> str: return self.installer().grub_prefix() def createrepo(self, context: "Context") -> None: return self.installer().package_manager(context.config).createrepo(context) def installer(self) -> type[DistributionInstaller]: modname = str(self).replace("-", "_") mod = importlib.import_module(f"mkosi.distributions.{modname}") installer = getattr(mod, "Installer") assert issubclass(installer, DistributionInstaller) return cast(type[DistributionInstaller], installer) def detect_distribution(root: Path = Path("/")) -> tuple[Optional[Distribution], Optional[str]]: try: os_release = read_env_file(root / "etc/os-release") except FileNotFoundError: try: os_release = read_env_file(root / "usr/lib/os-release") except FileNotFoundError: return None, None dist_id = os_release.get("ID", "linux") dist_id_like = os_release.get("ID_LIKE", "").split() version_id = os_release.get("VERSION_ID", None) version_codename = os_release.get("VERSION_CODENAME", None) quirks = { "azurelinux": Distribution.azure, } d: Optional[Distribution] = None for the_id in [dist_id, *dist_id_like]: d = Distribution.__members__.get(the_id, quirks.get(the_id)) if d is not None: break if d and d.is_apt_distribution() and version_codename: version_id = version_codename return d, version_id def join_mirror(mirror: str, link: str) -> str: # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up # as needed. if not mirror.endswith("/"): mirror = f"{mirror}/" link = link.removeprefix("/") return urllib.parse.urljoin(mirror, link) mkosi-25.3/mkosi/distributions/alma.py000066400000000000000000000022301474711424400201020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "AlmaLinux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-AlmaLinux-{context.config.release}", f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{context.config.release}", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/$releasever/{repo.lower()}" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-25.3/mkosi/distributions/arch.py000066400000000000000000000100131474711424400201030ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distributions import DistributionInstaller, PackageType from mkosi.installer import PackageManager from mkosi.installer.pacman import Pacman, PacmanRepository from mkosi.log import complete_step, die from mkosi.util import sort_packages class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Arch Linux" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.pkg @classmethod def default_release(cls) -> str: return "rolling" @classmethod def package_manager(cls, config: "Config") -> type[PackageManager]: return Pacman @classmethod def keyring(cls, context: Context) -> None: if context.config.repository_key_fetch: with ( complete_step(f"Downloading {cls.pretty_name()} keyring"), tempfile.TemporaryDirectory() as d, ): curl( context.config, "https://archlinux.org/packages/core/any/archlinux-keyring/download", Path(d), ) extract_tar( next(Path(d).iterdir()), context.sandbox_tree, dirs=["usr/share/pacman/keyrings"], sandbox=context.sandbox, ) Pacman.keyring(context) @classmethod def setup(cls, context: Context) -> None: Pacman.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Pacman.invoke( context, "--sync", ["--needed", "--assume-installed", "initramfs", *sort_packages(packages)], apivfs=apivfs, ) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Pacman.invoke(context, "--remove", ["--nosave", "--recursive", *packages], apivfs=True) @classmethod def repositories(cls, context: Context) -> Iterable[PacmanRepository]: if context.config.local_mirror: yield PacmanRepository("core", context.config.local_mirror) else: if context.config.architecture.is_arm_variant(): url = f"{context.config.mirror or 'http://mirror.archlinuxarm.org'}/$arch/$repo" else: url = f"{context.config.mirror or 'https://geo.mirror.pkgbuild.com'}/$repo/os/$arch" # Testing repositories have to go before regular ones to to take precedence. repos = [ repo for repo in ( "core-testing", "core-testing-debug", "extra-testing", "extra-testing-debug", "core-debug", "extra-debug", "multilib-testing", "multilib", ) if repo in context.config.repositories ] + ["core", "extra"] if context.config.architecture.is_arm_variant(): repos += ["alarm"] for repo in repos: yield PacmanRepository(repo, url) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.arm: "armv7h", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Arch Linux") return a mkosi-25.3/mkosi/distributions/azure.py000066400000000000000000000064741474711424400203340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import ( fedora, join_mirror, ) from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die class Installer(fedora.Installer): @classmethod def pretty_name(cls) -> str: return "Azure Linux" @classmethod def default_release(cls) -> str: return "3.0" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def setup(cls, context: Context) -> None: setup_rpm(context, dbpath="/var/lib/rpm") Dnf.setup(context, list(cls.repositories(context)), filelists=False) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem", "azurelinux-release"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, "MICROSOFT-RPM-GPG-KEY", "https://raw.githubusercontent.com/rpm-software-management/distribution-gpg-keys/main/keys/azure-linux/MICROSOFT-RPM-GPG-KEY", ), ) if context.config.local_mirror: yield RpmRepository("base", f"baseurl={context.config.local_mirror}", gpgurls) return mirror = context.config.mirror or "https://packages.microsoft.com/azurelinux" url = join_mirror(mirror, context.config.release) for repo in ("base", "extended", "ms-oss", "ms-non-oss", "cloud-native", "nvidia"): yield RpmRepository( repo, f"baseurl={url}/prod/{repo}/$basearch", gpgurls, ) repo = "NVIDIA" if repo == "nvidia" else repo yield RpmRepository( f"{repo}-preview", f"baseurl={url}/preview/{repo}/$basearch", gpgurls, enabled=False, ) for repo in ("base", "cloud-native", "extended"): yield RpmRepository( f"{repo}-debuginfo", f"baseurl={url}/prod/{repo}/debuginfo/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-preview-debuginfo", f"baseurl={url}/preview/{repo}/debuginfo/$basearch", gpgurls, enabled=False, ) for repo in ("base", "cloud-native", "extended", "ms-oss"): yield RpmRepository( f"{repo}-source", f"baseurl={url}/prod/{repo}/srpms", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={url}/preview/{repo}/srpms", gpgurls, enabled=False, ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "aarch64", Architecture.x86_64: "x86_64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a mkosi-25.3/mkosi/distributions/centos.py000066400000000000000000000362051474711424400204740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable, Sequence from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import ( Distribution, DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.versioncomp import GenericVersion CENTOS_SIG_REPO_PRIORITY = 50 class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "CentOS" @classmethod def filesystem(cls) -> str: return "xfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "10" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.fedora @classmethod def package_manager(cls, config: "Config") -> type[PackageManager]: return Dnf @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def dbpath(cls, context: Context) -> str: # The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the # hyperscale-packages-experimental repository. if ( GenericVersion(context.config.release) > 9 or "hyperscale-packages-experimental" in context.config.repositories ): return "/usr/lib/sysimage/rpm" return "/var/lib/rpm" @classmethod def setup(cls, context: Context) -> None: if GenericVersion(context.config.release) <= 8: die(f"{cls.pretty_name()} Stream 8 or earlier variants are not supported") setup_rpm(context, dbpath=cls.dbpath(context)) Dnf.setup(context, list(cls.repositories(context))) (context.sandbox_tree / "etc/dnf/vars/stream").write_text(f"{context.config.release}-stream\n") @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Dnf.invoke(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.ppc64_le: "ppc64le", Architecture.s390x: "s390x", Architecture.arm64: "aarch64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") return a @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: # First, start with the names of the appropriate keys in /etc/pki/rpm-gpg. if context.config.release == "9": rel = "RPM-GPG-KEY-centosofficial" else: rel = "RPM-GPG-KEY-centosofficial-SHA256" one = find_rpm_gpgkey(context, rel, required=False) # Next, follow up with the names of the appropriate keys in /usr/share/distribution-gpg-keys. if context.config.release == "9": rel = "RPM-GPG-KEY-CentOS-Official" else: rel = "RPM-GPG-KEY-CentOS-Official-SHA256" # The key in /usr/share/distribution-gpg-keys is only required if we didn't find one in # /etc/pki/rpm-gpg. two = find_rpm_gpgkey(context, rel, f"https://www.centos.org/keys/{rel}", required=bool(one)) # Finally, look up the key for the SIG-Extras repository. sig = find_rpm_gpgkey( context, "RPM-GPG-KEY-CentOS-SIG-Extras", "https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-Extras", ) return tuple(key for key in (one, two, sig) if key is not None) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) elif mirror := context.config.mirror: if repo == "extras": yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/$basearch/extras-common')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/source/extras-common')}", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/os')}", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/debug/tree')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"baseurl={join_mirror(mirror, f'$stream/{repo}/source/tree')}", cls.gpgurls(context), enabled=False, ) else: url = "metalink=https://mirrors.centos.org/metalink" if repo == "extras": yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-extras-sig-extras-common-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-extras-sig-extras-common-source-$stream", cls.gpgurls(context), enabled=False, ) else: yield RpmRepository( repo.lower(), f"{url}?arch=$basearch&repo=centos-{repo.lower()}-$stream", cls.gpgurls(context), ) yield RpmRepository( f"{repo.lower()}-debuginfo", f"{url}?arch=$basearch&repo=centos-{repo.lower()}-debug-$stream", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"{repo.lower()}-source", f"{url}?arch=source&repo=centos-{repo.lower()}-source-$stream", cls.gpgurls(context), enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield from cls.repository_variants(context, "AppStream") return yield from cls.repository_variants(context, "BaseOS") yield from cls.repository_variants(context, "AppStream") yield from cls.repository_variants(context, "extras") yield from cls.repository_variants(context, "CRB") yield from cls.epel_repositories(context) yield from cls.sig_repositories(context) @classmethod def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-EPEL-{context.config.release}", f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{context.config.release}", ), ) if context.config.local_mirror: return if mirror := context.config.mirror: # epel-next does not exist anymore since EPEL 10. repodirs = [ ("epel", "epel"), ("epel-testing", "epel/testing"), ] if int(context.config.release) < 10: repodirs += [ ("epel-next", "epel/next"), ("epel-next-testing", "epel/testing/next"), ] for repo, dir in repodirs: # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror # URL and path we were given. Since this doesn't work for all scenarios, we also allow # overriding the mirror via an environment variable. url = context.config.environment.get("EPEL_MIRROR", join_mirror(mirror, "../fedora")) yield RpmRepository( repo, f"baseurl={url}/{dir}/$releasever/Everything/$basearch", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"baseurl={url}/{dir}/$releasever/Everything/$basearch/debug", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"baseurl={url}/{dir}/$releasever/Everything/source/tree", gpgurls, enabled=False, ) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" # epel-next does not exist anymore since EPEL 10. repos = ["epel"] if int(context.config.release) < 10: repos += ["epel-next"] for repo in repos: yield RpmRepository( repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False, ) yield RpmRepository( f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False, ) # epel-next does not exist anymore since EPEL 10. if int(context.config.release) < 10: yield RpmRepository( "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-debuginfo", f"{url}&repo=epel-testing-next-debug-$releasever", gpgurls, enabled=False, ) yield RpmRepository( "epel-next-testing-source", f"{url}&repo=epel-testing-next-source-$releasever", gpgurls, enabled=False, ) @classmethod def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: return sigs = ( ( "hyperscale", (f"packages-{c}" for c in ("main", "experimental", "facebook", "hotfixes", "spin", "intel")), ("RPM-GPG-KEY-CentOS-SIG-HyperScale",), ), ) for sig, components, keys in sigs: gpgurls = tuple( find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys ) for c in components: if mirror := context.config.mirror: yield RpmRepository( f"{sig}-{c}", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}/debug')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/source/{c}')}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) else: url = "metalink=https://mirrors.centos.org/metalink" yield RpmRepository( f"{sig}-{c}", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-debuginfo", f"{url}?arch=$basearch&repo=centos-{sig}-sig-{c}-debug-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-source", f"{url}?arch=source&repo=centos-{sig}-sig-{c}-source-$stream", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) yield RpmRepository( f"{sig}-{c}-testing-debuginfo", f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}", gpgurls, enabled=False, priority=CENTOS_SIG_REPO_PRIORITY, ) mkosi-25.3/mkosi/distributions/custom.py000066400000000000000000000022301474711424400205020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller from mkosi.installer import PackageManager from mkosi.log import die class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Custom" @classmethod def architecture(cls, arch: Architecture) -> str: return str(arch) @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return PackageManager @classmethod def setup(cls, context: Context) -> None: pass @classmethod def install(cls, context: Context) -> None: pass @classmethod def install_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Installing packages is not supported for custom distributions'") @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if packages: die("Removing packages is not supported for custom distributions") mkosi-25.3/mkosi/distributions/debian.py000066400000000000000000000301571474711424400204230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.archive import extract_tar from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.distributions import DistributionInstaller, PackageType from mkosi.installer import PackageManager from mkosi.installer.apt import Apt, AptRepository from mkosi.log import die from mkosi.run import run, workdir from mkosi.sandbox import umask class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Debian" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def package_type(cls) -> PackageType: return PackageType.deb @classmethod def default_release(cls) -> str: return "testing" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return Apt @classmethod def repositories(cls, context: Context, local: bool = True) -> Iterable[AptRepository]: types = ("deb", "deb-src") components = ("main", *context.config.repositories) if context.config.local_mirror and local: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return mirror = context.config.mirror or "http://deb.debian.org/debian" signedby = Path("/usr/share/keyrings/debian-archive-keyring.gpg") yield AptRepository( types=types, url=mirror, suite=context.config.release, components=components, signedby=signedby, ) # Debug repos are typically not mirrored. url = "http://deb.debian.org/debian-debug" yield AptRepository( types=types, url=url, suite=f"{context.config.release}-debug", components=components, signedby=signedby, ) if context.config.release in ("unstable", "sid"): return yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-updates", components=components, signedby=signedby, ) yield AptRepository( types=types, # Security updates repos are never mirrored. url="http://security.debian.org/debian-security", suite=f"{context.config.release}-security", components=components, signedby=signedby, ) @classmethod def setup(cls, context: Context) -> None: Apt.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: # Instead of using debootstrap, we replicate its core functionality here. Because dpkg does not have # an option to delay running pre-install maintainer scripts when it installs a package, it's # impossible to use apt directly to bootstrap a Debian chroot since dpkg will try to run a maintainer # script which depends on some basic tool to be available in the chroot from a deb which hasn't been # unpacked yet, causing the script to fail. To avoid these issues, we have to extract all the # essential debs first, and only then run the maintainer scripts for them. # First, we set up merged usr. This list is taken from # https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. subdirs = ["bin", "sbin", "lib"] + { "amd64" : ["lib32", "lib64", "libx32"], "i386" : ["lib64", "libx32"], "mips" : ["lib32", "lib64"], "mipsel" : ["lib32", "lib64"], "mips64el" : ["lib32", "lib64", "libo32"], "loong64" : ["lib32", "lib64"], "powerpc" : ["lib64"], "ppc64" : ["lib32", "lib64"], "ppc64el" : ["lib64"], "s390x" : ["lib32"], "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], }.get(context.config.distribution.architecture(context.config.architecture), []) # fmt: skip with umask(~0o755): for d in subdirs: (context.root / d).symlink_to(f"usr/{d}") (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True) # Next, we invoke apt-get install to download all the essential packages. With # DPkg::Pre-Install-Pkgs, we specify a shell command that will receive the list of packages that will # be installed on stdin. By configuring Debug::pkgDpkgPm=1, apt-get install will not actually # execute any dpkg commands, so all it does is download the essential debs and tell us their full in # the apt cache without actually installing them. with tempfile.NamedTemporaryFile(mode="r") as f: Apt.invoke( context, "install", [ "-oDebug::pkgDPkgPm=1", f"-oDPkg::Pre-Install-Pkgs::=cat >{workdir(Path(f.name))}", "?essential", "base-files", ], options=["--bind", f.name, workdir(Path(f.name))], ) essential = f.read().strip().splitlines() # Now, extract the debs to the chroot by first extracting the sources tar file out of the deb and # then extracting the tar file into the chroot. for deb in essential: # If a deb path is in the form of "/var/cache/apt/", we transform it to the corresponding # path in mkosi's package cache directory. If it's relative to /repository, we transform it to # the corresponding path in mkosi's local package repository. Otherwise, we use the path as is. if Path(deb).is_relative_to("/var/cache"): path = context.config.package_cache_dir_or_default() / Path(deb).relative_to("/var") elif Path(deb).is_relative_to("/repository"): path = context.repository / Path(deb).relative_to("/repository") else: path = Path(deb) with open(path, "rb") as i, tempfile.NamedTemporaryFile() as o: run( ["dpkg-deb", "--fsys-tarfile", "/dev/stdin"], stdin=i, stdout=o, sandbox=context.sandbox(), ) extract_tar( Path(o.name), context.root, log=False, options=( [f"--exclude=./{glob}" for glob in Apt.documentation_exclude_globs] if not context.config.with_docs else [] ), sandbox=context.sandbox, ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. cls.install_packages( context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential] ) fixup_os_release(context) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones # to start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be # managed by the admin. policyrcd = context.root / "usr/sbin/policy-rc.d" with umask(~0o755): policyrcd.parent.mkdir(parents=True, exist_ok=True) with umask(~0o644): policyrcd.write_text("#!/bin/sh\nexit 101\n") Apt.invoke(context, "install", packages, apivfs=apivfs) install_apt_sources(context, cls.repositories(context, local=False)) policyrcd.unlink() # systemd-gpt-auto-generator is disabled by default in Ubuntu: # https://git.launchpad.net/ubuntu/+source/systemd/tree/debian/systemd.links?h=ubuntu/noble-proposed. # Let's make sure it is enabled by default in our images. (context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Apt.invoke(context, "purge", packages, apivfs=True) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "arm64", Architecture.arm: "armhf", Architecture.alpha: "alpha", Architecture.x86_64: "amd64", Architecture.x86: "i386", Architecture.ia64: "ia64", Architecture.loongarch64: "loong64", Architecture.mips64_le: "mips64el", Architecture.mips_le: "mipsel", Architecture.parisc: "hppa", Architecture.ppc64_le: "ppc64el", Architecture.ppc64: "ppc64", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.s390: "s390", }.get(arch) # fmt: skip if not a: die(f"Architecture {arch} is not supported by {cls.pretty_name()}") return a def install_apt_sources(context: Context, repos: Iterable[AptRepository]) -> None: if not (context.root / "usr/bin/apt").exists(): return sources = context.root / f"etc/apt/sources.list.d/{context.config.release}.sources" if not sources.exists(): with sources.open("w") as f: for repo in repos: f.write(str(repo)) def fixup_os_release(context: Context) -> None: if context.config.release not in ("unstable", "sid"): return # Debian being Debian means we need to special case handling os-release. Fix the content to actually # match what we are building, and set up a diversion so that dpkg doesn't overwrite it on package # updates. Upstream bug report: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008735. for candidate in ["etc/os-release", "usr/lib/os-release", "usr/lib/initrd-release"]: osrelease = context.root / candidate newosrelease = osrelease.with_suffix(".new") if not osrelease.is_file(): continue if osrelease.is_symlink() and candidate != "etc/os-release": continue with osrelease.open("r") as old, newosrelease.open("w") as new: for line in old.readlines(): if line.startswith("VERSION_CODENAME="): new.write("VERSION_CODENAME=sid\n") else: new.write(line) # On dpkg distributions we cannot simply overwrite /etc/os-release as it is owned by a package. We # need to set up a diversion first, so that it is not overwritten by package updates. We do this for # /etc/os-release as that will be overwritten on package updates and has precedence over # /usr/lib/os-release, and ignore the latter and assume that if an usr-only image is built then the # package manager will not run on it. if candidate == "etc/os-release": run( [ "dpkg-divert", "--quiet", "--root=/buildroot", "--local", "--add", "--rename", "--divert", f"/{candidate}.dpkg", f"/{candidate}", ], sandbox=context.sandbox(options=["--bind", context.root, "/buildroot"]), ) newosrelease.rename(osrelease) mkosi-25.3/mkosi/distributions/fedora.py000066400000000000000000000213051474711424400204340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import re import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distributions import ( DistributionInstaller, PackageType, join_mirror, ) from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.log import die from mkosi.util import startswith, tuplify @tuplify def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: key1 = find_rpm_gpgkey( context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary", required=False ) key2 = find_rpm_gpgkey( context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-secondary", required=False ) if key1: # During branching, there is always a kerfuffle with the key transition. # For Rawhide, try to load the N+1 key, just in case our local configuration # still indicates that Rawhide==N, but really Rawhide==N+1. if context.config.release == "rawhide" and (rhs := startswith(key1, "file://")): path = Path(rhs).resolve() if m := re.match(r"RPM-GPG-KEY-fedora-(\d+)-(primary|secondary)", path.name): version = int(m.group(1)) if key3 := find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{version + 1}-primary"): # We yield the resolved path for key1, to make it clear that it's # for version N, and the other key is for version N+1. key1 = path.as_uri() yield key3 yield key1 if key2: yield key2 if not key1 and not key2: if not context.config.repository_key_fetch: die( "Fedora GPG keys not found in /usr/share/distribution-gpg-keys", hint="Make sure the distribution-gpg-keys package is installed", ) if context.config.release == "rawhide": # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, # let's fetch it from distribution-gpg-keys on github, which is generally up-to-date. keys = "https://raw.githubusercontent.com/rpm-software-management/distribution-gpg-keys/main/keys/fedora" # The rawhide key is a symlink and github doesn't redirect those to the actual file for some # reason, so we fetch the file and read the release it points to ourselves. with tempfile.TemporaryDirectory() as d: curl(context.config, f"{keys}/RPM-GPG-KEY-fedora-rawhide-primary", Path(d)) key = (Path(d) / "RPM-GPG-KEY-fedora-rawhide-primary").read_text() keyurl = f"{keys}/{key}" else: keyurl = "https://fedoraproject.org/fedora.gpg" yield keyurl class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "Fedora Linux" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "41" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: return Dnf @classmethod def setup(cls, context: Context) -> None: setup_rpm(context) Dnf.setup( context, list(cls.repositories(context)), filelists=False, metadata_expire="6h" if context.config.release == "rawhide" else None, ) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["basesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: Dnf.invoke(context, "install", packages, apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = find_fedora_rpm_gpgkeys(context) if context.config.local_mirror: yield RpmRepository("fedora", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.release == "eln": mirror = ( context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" ) for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls) yield RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False) elif m := context.config.mirror: directory = "development" if context.config.release == "rawhide" else "releases" url = f"baseurl={join_mirror(m, f'linux/{directory}/$releasever/Everything')}" yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls) yield RpmRepository("fedora-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository("fedora-source", f"{url}/source/tree", gpgurls, enabled=False) if context.config.release != "rawhide": url = f"baseurl={join_mirror(m, 'linux/updates/$releasever/Everything')}" yield RpmRepository("updates", f"{url}/$basearch", gpgurls) yield RpmRepository("updates-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False) yield RpmRepository("updates-source", f"{url}/source/tree", gpgurls, enabled=False) url = f"baseurl={join_mirror(m, 'linux/updates/testing/$releasever/Everything')}" yield RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False) yield RpmRepository( "updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False ) yield RpmRepository("updates-testing-source", f"{url}/source/tree", gpgurls, enabled=False) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" yield RpmRepository("fedora", f"{url}&repo=fedora-$releasever", gpgurls) yield RpmRepository( "fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False ) yield RpmRepository( "fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False ) if context.config.release != "rawhide": yield RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls) yield RpmRepository( "updates-debuginfo", f"{url}&repo=updates-released-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing-debuginfo", f"{url}&repo=updates-testing-debug-f$releasever", gpgurls, enabled=False, ) yield RpmRepository( "updates-testing-source", f"{url}&repo=updates-testing-source-f$releasever", gpgurls, enabled=False, ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "aarch64", Architecture.mips64_le: "mips64el", Architecture.mips_le: "mipsel", Architecture.ppc64_le: "ppc64le", Architecture.riscv64: "riscv64", Architecture.s390x: "s390x", Architecture.x86_64: "x86_64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Fedora") return a mkosi-25.3/mkosi/distributions/kali.py000066400000000000000000000033301474711424400201120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import Distribution, debian from mkosi.installer.apt import AptRepository from mkosi.log import die class Installer(debian.Installer): @classmethod def pretty_name(cls) -> str: return "Kali Linux" @classmethod def default_release(cls) -> str: return "kali-rolling" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.kali @classmethod def repositories(cls, context: Context, local: bool = True) -> Iterable[AptRepository]: if context.config.local_mirror and local: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return yield AptRepository( types=("deb", "deb-src"), url=context.config.mirror or "http://http.kali.org/kali", suite=context.config.release, components=("main", *context.config.repositories), signedby=Path("/usr/share/keyrings/kali-archive-keyring.gpg"), ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.arm64: "arm64", Architecture.arm: "armhf", Architecture.x86_64: "amd64", Architecture.x86: "i386", }.get(arch) if not a: die(f"Architecture {arch} is not supported by {cls.pretty_name()}") return a mkosi-25.3/mkosi/distributions/mageia.py000066400000000000000000000041151474711424400204170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(fedora.Installer): @classmethod def pretty_name(cls) -> str: return "Mageia" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cauldron" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-Mageia", "https://mirrors.kernel.org/mageia/distrib/$releasever/$basearch/media/core/release/media_info/pubkey", ), ) if context.config.local_mirror: yield RpmRepository("core-release", f"baseurl={context.config.local_mirror}", gpgurls) return if context.config.mirror: url = ( f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" ) yield RpmRepository("core-release", f"{url}/release", gpgurls) yield RpmRepository("core-updates", f"{url}/updates/", gpgurls) else: url = "mirrorlist=https://www.mageia.org/mirrorlist/?release=$releasever&arch=$basearch§ion=core" yield RpmRepository("core-release", f"{url}&repo=release", gpgurls) yield RpmRepository("core-updates", f"{url}&repo=updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Mageia") return a mkosi-25.3/mkosi/distributions/openmandriva.py000066400000000000000000000035361474711424400216650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.config import Architecture from mkosi.context import Context from mkosi.distributions import fedora, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(fedora.Installer): @classmethod def pretty_name(cls) -> str: return "OpenMandriva" @classmethod def filesystem(cls) -> str: return "ext4" @classmethod def default_release(cls) -> str: return "cooker" @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: mirror = context.config.mirror or "http://mirror.openmandriva.org" gpgurls = ( find_rpm_gpgkey( context, "RPM-GPG-KEY-OpenMandriva", "https://raw.githubusercontent.com/OpenMandrivaAssociation/openmandriva-repos/master/RPM-GPG-KEY-OpenMandriva", ), ) if context.config.local_mirror: yield RpmRepository("main-release", f"baseurl={context.config.local_mirror}", gpgurls) return url = f"baseurl={join_mirror(mirror, '$releasever/repository/$basearch/main')}" yield RpmRepository("main-release", f"{url}/release", gpgurls) yield RpmRepository("main-updates", f"{url}/updates", gpgurls) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", Architecture.riscv64: "riscv64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenMandriva") return a mkosi-25.3/mkosi/distributions/opensuse.py000066400000000000000000000240641474711424400210420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from collections.abc import Iterable, Sequence from pathlib import Path from xml.etree import ElementTree from mkosi.config import Architecture, Config from mkosi.context import Context from mkosi.curl import curl from mkosi.distributions import DistributionInstaller, PackageType, join_mirror from mkosi.installer import PackageManager from mkosi.installer.dnf import Dnf from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey, setup_rpm from mkosi.installer.zypper import Zypper from mkosi.log import die from mkosi.mounts import finalize_certificate_mounts from mkosi.run import run from mkosi.util import sort_packages class Installer(DistributionInstaller): @classmethod def pretty_name(cls) -> str: return "openSUSE" @classmethod def filesystem(cls) -> str: return "btrfs" @classmethod def package_type(cls) -> PackageType: return PackageType.rpm @classmethod def default_release(cls) -> str: return "tumbleweed" @classmethod def grub_prefix(cls) -> str: return "grub2" @classmethod def package_manager(cls, config: Config) -> type[PackageManager]: if config.find_binary("zypper"): return Zypper else: return Dnf @classmethod def setup(cls, context: Context) -> None: setup_rpm(context, dbbackend="ndb") zypper = context.config.find_binary("zypper") if zypper: Zypper.setup(context, list(cls.repositories(context))) else: Dnf.setup(context, list(cls.repositories(context))) @classmethod def install(cls, context: Context) -> None: cls.install_packages(context, ["filesystem"], apivfs=False) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: if context.config.find_binary("zypper"): Zypper.invoke( context, "install", [ "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", *sort_packages(packages), ], apivfs=apivfs, ) # fmt: skip else: Dnf.invoke(context, "install", sort_packages(packages), apivfs=apivfs) @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: if context.config.find_binary("zypper"): Zypper.invoke(context, "remove", ["--clean-deps", *sort_packages(packages)], apivfs=True) else: Dnf.invoke(context, "remove", packages, apivfs=True) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(id="local-mirror", url=f"baseurl={context.config.local_mirror}", gpgurls=()) return zypper = context.config.find_binary("zypper") mirror = context.config.mirror or "https://download.opensuse.org" if context.config.release == "tumbleweed" or context.config.release.isdigit(): gpgkeys = tuple( p for key in ("RPM-GPG-KEY-openSUSE-Tumbleweed", "RPM-GPG-KEY-openSUSE") if (p := find_rpm_gpgkey(context, key, required=False)) ) if not gpgkeys and not context.config.repository_key_fetch: die( "openSUSE GPG keys not found in /usr/share/distribution-gpg-keys", hint="Make sure the distribution-gpg-keys package is installed", ) if zypper and gpgkeys: run( [ "rpm", "--root=/buildroot", "--import", *(key.removeprefix("file://") for key in gpgkeys), ], sandbox=context.sandbox( options=[ "--bind", context.root, "/buildroot", *finalize_certificate_mounts(context.config), ], ), ) # fmt: skip if context.config.release == "tumbleweed": if context.config.architecture == Architecture.x86_64: subdir = "" else: subdir = f"ports/{cls.architecture(context.config.architecture)}" else: if context.config.architecture != Architecture.x86_64: die(f"Old snapshots are only supported for x86-64 on {cls.pretty_name()}") subdir = f"history/{context.config.release}" for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/tumbleweed/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=repo == "oss", ) if context.config.release == "tumbleweed": for d in ("debug", "source"): url = join_mirror(mirror, f"{subdir}/{d}/tumbleweed/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) if context.config.release == "tumbleweed": url = join_mirror(mirror, f"{subdir}/update/tumbleweed") yield RpmRepository( id="oss-update", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), ) url = join_mirror(mirror, f"{subdir}/update/tumbleweed-non-oss") yield RpmRepository( id="non-oss-update", url=f"baseurl={url}", gpgurls=gpgkeys or (fetch_gpgurls(context, url) if not zypper else ()), enabled=False, ) else: if ( context.config.release in ("current", "stable", "leap") and context.config.architecture != Architecture.x86_64 ): die( f"{cls.pretty_name()} only supports current and stable releases " "for the x86-64 architecture", hint="Specify either tumbleweed or a specific leap release such as 15.6", ) if context.config.release in ("current", "stable", "leap"): release = "openSUSE-current" else: release = f"leap/{context.config.release}" if context.config.architecture == Architecture.x86_64: subdir = "" else: subdir = f"ports/{cls.architecture(context.config.architecture)}" for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/distribution/{release}/repo/{repo}") yield RpmRepository( id=repo, url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) for d in ("debug", "source"): for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/{d}/distribution/{release}/repo/{repo}") yield RpmRepository( id=f"{repo}-{d}", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=False, ) if context.config.release in ("current", "stable", "leap"): url = join_mirror(mirror, f"{subdir}/update/openSUSE-current") yield RpmRepository( id="oss-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), ) url = join_mirror(mirror, f"{subdir}/update/openSUSE-non-oss-current") yield RpmRepository( id="non-oss-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=False, ) else: for repo in ("oss", "non-oss"): url = join_mirror(mirror, f"{subdir}/update/{release}/{repo}") yield RpmRepository( id=f"{repo}-update", url=f"baseurl={url}", gpgurls=fetch_gpgurls(context, url) if not zypper else (), enabled=repo == "oss", ) @classmethod def architecture(cls, arch: Architecture) -> str: a = { Architecture.x86_64: "x86_64", Architecture.arm64: "aarch64", }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by openSUSE") return a def fetch_gpgurls(context: Context, repourl: str) -> tuple[str, ...]: gpgurls = [f"{repourl}/repodata/repomd.xml.key"] with tempfile.TemporaryDirectory() as d: curl(context.config, f"{repourl}/repodata/repomd.xml", Path(d)) xml = (Path(d) / "repomd.xml").read_text() root = ElementTree.fromstring(xml) tags = root.find("{http://linux.duke.edu/metadata/repo}tags") if not tags: die("repomd.xml missing element") for child in tags.iter("{http://linux.duke.edu/metadata/repo}content"): if child.text and child.text.startswith("gpg-pubkey"): gpgkey = child.text.partition("?")[0] gpgurls += [f"{repourl}{gpgkey}"] return tuple(gpgurls) mkosi-25.3/mkosi/distributions/rhel.py000066400000000000000000000075731474711424400201410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from typing import Any, Optional from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey from mkosi.log import die class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @staticmethod def sslcacert(context: Context) -> Optional[Path]: if context.config.mirror: return None p = Path("etc/rhsm/ca/redhat-uep.pem") if (context.sandbox_tree / p).exists(): p = context.sandbox_tree / p elif (Path("/") / p).exists(): p = Path("/") / p else: die("redhat-uep.pem certificate not found in host system or sandbox tree") return p @staticmethod def sslclientkey(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*-key.pem" p = next((p for p in sorted(context.sandbox_tree.glob(pattern))), None) if not p: p = next((p for p in Path("/").glob(pattern)), None) if not p: die("Entitlement key not found in host system or sandbox tree") return p @staticmethod def sslclientcert(context: Context) -> Optional[Path]: if context.config.mirror: return None pattern = "etc/pki/entitlement/*.pem" p = next((p for p in sorted(context.sandbox_tree.glob(pattern)) if "key" not in p.name), None) if not p: p = next((p for p in sorted(Path("/").glob(pattern)) if "key" not in p.name), None) if not p: die("Entitlement certificate not found in host system or sandbox tree") return p @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn.redhat.com/content/dist/" common: dict[str, Any] = dict( gpgurls=cls.gpgurls(context), sslcacert=cls.sslcacert(context), sslclientcert=cls.sslclientcert(context), sslclientkey=cls.sslclientkey(context), ) v = context.config.release major = int(float(v)) yield RpmRepository( f"rhel-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/os')}", enabled=True, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/debug')}", enabled=False, **common, ) yield RpmRepository( f"rhel-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'rhel{major}/{v}/$basearch/{repo}/source')}", enabled=False, **common, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-25.3/mkosi/distributions/rhel_ubi.py000066400000000000000000000041401474711424400207630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "RHEL UBI" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: major = int(float(context.config.release)) return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-redhat{major}-release", "https://access.redhat.com/security/data/fd431d51.txt", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]: if context.config.local_mirror: yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context)) else: mirror = context.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/" v = context.config.release yield RpmRepository( f"ubi-{v}-{repo}-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/os')}", cls.gpgurls(context), ) yield RpmRepository( f"ubi-{v}-{repo}-debug-rpms", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/debug')}", cls.gpgurls(context), enabled=False, ) yield RpmRepository( f"ubi-{v}-{repo}-source", f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/source')}", cls.gpgurls(context), enabled=False, ) @classmethod def repositories(cls, context: Context) -> Iterable[RpmRepository]: yield from cls.repository_variants(context, "baseos") yield from cls.repository_variants(context, "appstream") yield from cls.repository_variants(context, "codeready-builder") yield from cls.epel_repositories(context) mkosi-25.3/mkosi/distributions/rocky.py000066400000000000000000000022441474711424400203240ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from mkosi.context import Context from mkosi.distributions import centos, join_mirror from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey class Installer(centos.Installer): @classmethod def pretty_name(cls) -> str: return "Rocky Linux" @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: return ( find_rpm_gpgkey( context, f"RPM-GPG-KEY-Rocky-{context.config.release}", f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{context.config.release}", ), ) @classmethod def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]: if context.config.mirror: url = f"baseurl={join_mirror(context.config.mirror, f'$releasever/{repo}/$basearch/os')}" else: url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo={repo}-$releasever" return [RpmRepository(repo, url, cls.gpgurls(context))] @classmethod def sig_repositories(cls, context: Context) -> list[RpmRepository]: return [] mkosi-25.3/mkosi/distributions/ubuntu.py000066400000000000000000000043511474711424400205200ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterable from pathlib import Path from mkosi.context import Context from mkosi.distributions import Distribution, debian from mkosi.installer.apt import AptRepository class Installer(debian.Installer): @classmethod def pretty_name(cls) -> str: return "Ubuntu" @classmethod def default_release(cls) -> str: return "noble" @classmethod def default_tools_tree_distribution(cls) -> Distribution: return Distribution.debian @classmethod def repositories(cls, context: Context, local: bool = True) -> Iterable[AptRepository]: types = ("deb", "deb-src") components = ( "main", *context.config.repositories, ) if context.config.local_mirror and local: yield AptRepository( types=("deb",), url=context.config.local_mirror, suite=context.config.release, components=("main",), signedby=None, ) return if context.config.architecture.is_x86_variant(): mirror = context.config.mirror or "http://archive.ubuntu.com/ubuntu" else: mirror = context.config.mirror or "http://ports.ubuntu.com" signedby = Path("/usr/share/keyrings/ubuntu-archive-keyring.gpg") yield AptRepository( types=types, url=mirror, suite=context.config.release, components=components, signedby=signedby, ) yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-updates", components=components, signedby=signedby, ) # Security updates repos are never mirrored. But !x86 are on the ports server. if context.config.architecture.is_x86_variant(): mirror = "http://security.ubuntu.com/ubuntu" else: mirror = "http://ports.ubuntu.com" yield AptRepository( types=types, url=mirror, suite=f"{context.config.release}-security", components=components, signedby=signedby, ) mkosi-25.3/mkosi/documentation.py000066400000000000000000000034061474711424400171450ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import logging import subprocess from pathlib import Path from mkosi.config import DocFormat from mkosi.log import die from mkosi.pager import page from mkosi.run import find_binary, run def show_docs( manual: str, formats: list[DocFormat], *, man_chapter: int = 1, resources: Path, pager: bool = True, ) -> None: while formats: form = formats.pop(0) try: if form == DocFormat.man: man = resources / f"man/{manual}.{man_chapter}" if not man.exists(): raise FileNotFoundError() run(["man", "--local-file", man]) return elif form == DocFormat.pandoc: if not find_binary("pandoc"): logging.warn("pandoc is not available") continue pandoc = run( ["pandoc", "-t", "man", "-s", resources / f"man/{manual}.{man_chapter}.md"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, log=False, ) run(["man", "--local-file", "-"], input=pandoc.stdout) return elif form == DocFormat.markdown: page((resources / f"man/{manual}.{man_chapter}.md").read_text(), pager) return elif form == DocFormat.system: run(["man", str(man_chapter), manual], log=False) return except (FileNotFoundError, subprocess.CalledProcessError) as e: if not formats: if isinstance(e, FileNotFoundError): die("The mkosi package does not contain the man page {manual!r}.") raise e mkosi-25.3/mkosi/initrd.py000066400000000000000000000237041474711424400155700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import contextlib import dataclasses import logging import os import platform import shutil import sys import tempfile from pathlib import Path from typing import Optional, cast import mkosi.resources from mkosi.config import DocFormat, OutputFormat from mkosi.documentation import show_docs from mkosi.log import log_notice, log_setup from mkosi.run import find_binary, run, uncaught_exception_handler from mkosi.sandbox import __version__, umask from mkosi.tree import copy_tree from mkosi.util import PathString, mandatory_variable, resource_path @dataclasses.dataclass(frozen=True) class KernelInstallContext: command: str kernel_version: str entry_dir: Path kernel_image: Path initrds: list[Path] staging_area: Path layout: str image_type: str initrd_generator: Optional[str] uki_generator: Optional[str] verbose: bool @staticmethod def parse(*, name: str, description: str) -> "KernelInstallContext": parser = argparse.ArgumentParser( description=description, allow_abbrev=False, usage=f"{name} COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE…", ) parser.add_argument( "command", metavar="COMMAND", help="The action to perform. Only 'add' is supported.", ) parser.add_argument( "kernel_version", metavar="KERNEL_VERSION", help="Kernel version string", ) parser.add_argument( "entry_dir", metavar="ENTRY_DIR", type=Path, nargs="?", help="Type#1 entry directory (ignored)", ) parser.add_argument( "kernel_image", metavar="KERNEL_IMAGE", type=Path, nargs="?", help="Kernel image", ) parser.add_argument( "initrds", metavar="INITRD…", type=Path, nargs="*", help="Initrd files", ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) args = parser.parse_args() return KernelInstallContext( command=args.command, kernel_version=args.kernel_version, entry_dir=args.entry_dir, kernel_image=args.kernel_image, initrds=args.initrds, staging_area=Path(mandatory_variable("KERNEL_INSTALL_STAGING_AREA")), layout=mandatory_variable("KERNEL_INSTALL_LAYOUT"), image_type=mandatory_variable("KERNEL_INSTALL_IMAGE_TYPE"), initrd_generator=os.getenv("KERNEL_INSTALL_INITRD_GENERATOR"), uki_generator=os.getenv("KERNEL_INSTALL_UKI_GENERATOR"), verbose=int(os.getenv("KERNEL_INSTALL_VERBOSE", 0)) > 0, ) def process_crypttab(staging_dir: str) -> list[str]: cmdline = [] # Generate crypttab with all the x-initrd.attach entries if Path("/etc/crypttab").exists(): try: crypttab = [ line for line in Path("/etc/crypttab").read_text().splitlines() if ( len(entry := line.split()) >= 4 and not entry[0].startswith("#") and "x-initrd.attach" in entry[3] ) ] if crypttab: with (Path(staging_dir) / "crypttab").open("w") as f: f.write("# Automatically generated by mkosi-initrd\n") f.write("\n".join(crypttab)) cmdline += ["--extra-tree", f"{staging_dir}/crypttab:/etc/crypttab"] except PermissionError: logging.warning("Permission denied to access /etc/crypttab, the initrd may be unbootable") return cmdline def initrd_finalize(staging_dir: str, output: str, output_dir: str) -> None: if output_dir: with umask(~0o700) if os.getuid() == 0 else cast(umask, contextlib.nullcontext()): Path(output_dir).mkdir(parents=True, exist_ok=True) else: output_dir = str(Path.cwd()) log_notice(f"Copying {staging_dir}/{output} to {output_dir}/{output}") # mkosi symlinks the expected output image, so dereference it copy_tree( Path(f"{staging_dir}/{output}").resolve(), Path(f"{output_dir}/{output}"), ) def initrd_common_args(parser: argparse.ArgumentParser) -> None: parser.add_argument( "--kernel-version", metavar="KERNEL_VERSION", help="Kernel version string", default=platform.uname().release, ) parser.add_argument( "-O", "--output-dir", metavar="DIR", help="Output directory", default="", ) parser.add_argument( "--debug", help="Turn on debugging output", action="store_true", default=False, ) parser.add_argument( "--debug-shell", help="Spawn debug shell if a sandboxed command fails", action="store_true", default=False, ) parser.add_argument( "-D", "--show-documentation", help="Show the man page", action="store_true", default=False, ) parser.add_argument( "--version", action="version", version=f"mkosi {__version__}", ) def include_system_config(name: str) -> list[str]: cmdline = [] for d in ("/usr/lib", "/usr/local/lib", "/run", "/etc"): p = Path(d) / name if p.exists(): cmdline += ["--include", os.fspath(p)] return cmdline @uncaught_exception_handler() def main() -> None: log_setup() parser = argparse.ArgumentParser( prog="mkosi-initrd", description="Build initrds or unified kernel images for the current system using mkosi", allow_abbrev=False, usage="mkosi-initrd [options...]", ) parser.add_argument( "-o", "--output", metavar="NAME", help="Output name", default="initrd", ) parser.add_argument( "--kernel-image", metavar="KERNEL_IMAGE", help="Kernel image", type=Path, ) parser.add_argument( "-t", "--format", choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)], help="Output format (CPIO archive, UKI or local directory)", default="cpio", ) initrd_common_args(parser) args = parser.parse_args() if args.show_documentation: with resource_path(mkosi.resources) as r: show_docs("mkosi-initrd", DocFormat.all(), resources=r) return with ( tempfile.TemporaryDirectory() as staging_dir, tempfile.TemporaryDirectory() as sandbox_tree, ): cmdline: list[PathString] = [ "mkosi", "--force", "--directory", "", "--format", args.format, "--output", args.output, "--output-directory", staging_dir, "--extra-tree", f"/usr/lib/modules/{args.kernel_version}:/usr/lib/modules/{args.kernel_version}", "--extra-tree=/usr/lib/firmware:/usr/lib/firmware", "--remove-files=/usr/lib/firmware/*-ucode", "--kernel-modules-exclude=.*", "--kernel-modules-include=host", "--build-sources", "", "--include=mkosi-initrd", ] # fmt: skip if args.kernel_image: cmdline += [ "--extra-tree", f"{args.kernel_image}:/usr/lib/modules/{args.kernel_version}/vmlinuz", ] # fmt: skip if args.debug: cmdline += ["--debug"] if args.debug_shell: cmdline += ["--debug-shell"] if os.getuid() == 0: cmdline += [ "--workspace-dir=/var/tmp", "--package-cache-dir=/var", "--cache-only=metadata", ] if args.format != OutputFormat.directory.value: cmdline += ["--output-mode=600"] cmdline += include_system_config("mkosi-initrd") # Make sure we don't use any of mkosi's default repositories. for p in ( "yum.repos.d/mkosi.repo", "apt/sources.list.d/mkosi.sources", "zypp/repos.d/mkosi.repo", "pacman.conf", ): (Path(sandbox_tree) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) (Path(sandbox_tree) / "etc" / p).touch() # Copy in the host's package manager configuration. for p in ( "dnf", "yum.repos.d/", "apt", "zypp", "pacman.conf", "pacman.d/", ): if not (Path("/etc") / p).exists(): continue (Path(sandbox_tree) / "etc" / p).parent.mkdir(parents=True, exist_ok=True) if (Path("/etc") / p).resolve().is_file(): shutil.copy2(Path("/etc") / p, Path(sandbox_tree) / "etc" / p) else: shutil.copytree( Path("/etc") / p, Path(sandbox_tree) / "etc" / p, ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True, ) cmdline += ["--sandbox-tree", sandbox_tree] cmdline += process_crypttab(staging_dir) if Path("/etc/kernel/cmdline").exists(): cmdline += ["--kernel-command-line", Path("/etc/kernel/cmdline").read_text()] # Resolve dnf binary to determine which version the host uses by default # (to avoid preferring dnf5 if the host uses dnf4) # as there's a much bigger chance that it has a populated dnf cache directory. run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env={"MKOSI_DNF": dnf.resolve().name} if (dnf := find_binary("dnf")) else {}, ) initrd_finalize(staging_dir, args.output, args.output_dir) if __name__ == "__main__": main() mkosi-25.3/mkosi/installer/000077500000000000000000000000001474711424400157145ustar00rootroot00000000000000mkosi-25.3/mkosi/installer/__init__.py000066400000000000000000000165321474711424400200340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from mkosi.config import Config, ConfigFeature, OutputFormat from mkosi.context import Context from mkosi.mounts import finalize_certificate_mounts from mkosi.run import apivfs_options, finalize_interpreter, finalize_passwd_symlinks, find_binary from mkosi.tree import rmtree from mkosi.util import PathString, flatten, startswith class PackageManager: @classmethod def executable(cls, config: Config) -> str: return "custom" @classmethod def subdir(cls, config: Config) -> Path: return Path("custom") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [] @classmethod def state_subdirs(cls, state: Path) -> list[Path]: return [] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return {} @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. # systemd's chroot detection doesn't work when unprivileged so tell it explicitly. "SYSTEMD_IN_CHROOT": "1", } if "SYSTEMD_HWDB_UPDATE_BYPASS" not in context.config.environment: env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1" if ( "KERNEL_INSTALL_BYPASS" not in context.config.environment and context.config.bootable != ConfigFeature.disabled ): env["KERNEL_INSTALL_BYPASS"] = "1" else: env |= { "BOOT_ROOT": "/boot", # Required to make 90-loaderentry.install put the right paths into the bootloader entry. "BOOT_MNT": "/boot", # Hack to tell dracut to not create a hostonly initrd when it's invoked by kernel-install. "hostonly_l": "no", } return context.config.environment | env @classmethod def env_cmd(cls, context: Context) -> list[PathString]: return ["env", *([f"{k}={v}" for k, v in cls.finalize_environment(context).items()])] @classmethod def mounts(cls, context: Context) -> list[PathString]: mounts = [ *finalize_certificate_mounts(context.config), "--bind", context.repository, "/repository", ] # fmt: skip if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")): mounts += ["--ro-bind", mirror, mirror] subdir = context.config.distribution.package_manager(context.config).subdir(context.config) for d in ("cache", "lib"): src = context.metadata_dir / d / subdir mounts += ["--bind", src, Path("/var") / d / subdir] # If we're not operating on the configured package cache directory, we're operating on a snapshot # of the repository metadata. To make sure any downloaded packages are still cached in the # configured package cache directory in this scenario, we mount in the relevant directories from # the configured package cache directory. if d == "cache" and context.metadata_dir != context.config.package_cache_dir_or_default(): caches = context.config.distribution.package_manager(context.config).cache_subdirs(src) mounts += flatten( ( "--bind", context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src), Path("/var") / d / subdir / p.relative_to(src), ) for p in caches if ( context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src) ).exists() ) return mounts @classmethod def options(cls, *, root: PathString, apivfs: bool = True) -> list[PathString]: return [ *(apivfs_options() if apivfs else []), "--become-root", "--suppress-chown", # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. "--ro-bind-try", Path(root) / "etc/machine-id", "/buildroot/etc/machine-id", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", "/run/user/0", # Some package managers (e.g. dpkg) read from the host's /etc/passwd instead of the buildroot's # /etc/passwd so we symlink /etc/passwd from the buildroot to make sure it gets used. *(finalize_passwd_symlinks("/buildroot") if apivfs else []), ] # fmt: skip @classmethod def apivfs_script_cmd(cls, context: Context) -> list[PathString]: return [ finalize_interpreter(bool(context.config.tools_tree)), "-SI", "/sandbox.py", "--bind", "/", "/", "--same-dir", "--bind", "/var/tmp", "/buildroot/var/tmp", *apivfs_options(), *cls.options(root="/buildroot"), ] # fmt: skip @classmethod def sandbox( cls, context: Context, *, apivfs: bool, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return context.sandbox( network=True, options=[ "--bind", context.root, "/buildroot", *cls.mounts(context), *cls.options(root=context.root, apivfs=apivfs), *options, ], ) # fmt: skip @classmethod def sync(cls, context: Context, force: bool) -> None: pass @classmethod def createrepo(cls, context: Context) -> None: pass def clean_package_manager_metadata(context: Context) -> None: """ Remove package manager metadata Try them all regardless of the distro: metadata is only removed if the package manager is not present in the image. """ subdir = context.config.distribution.package_manager(context.config).subdir(context.config) if context.config.clean_package_metadata == ConfigFeature.disabled: return if context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in ( OutputFormat.directory, OutputFormat.tar, ): return # If cleaning is not explicitly requested, keep the repository metadata if we're building a directory or # tar image (which are often used as a base tree for extension images and thus should retain package # manager metadata) or if the corresponding package manager is installed in the image. executable = context.config.distribution.package_manager(context.config).executable(context.config) remove = [] for tool, paths in ( ("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), ("dnf5", ["usr/lib/sysimage/libdnf5"]), ("dpkg", ["var/lib/dpkg"]), (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]), ): # fmt: skip if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary( tool, root=context.root ): remove += [context.root / p for p in paths if (context.root / p).exists()] rmtree(*remove, sandbox=context.sandbox) mkosi-25.3/mkosi/installer/apt.py000066400000000000000000000243761474711424400170660ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import textwrap from collections.abc import Sequence from pathlib import Path from typing import Final, Optional from mkosi.config import PACKAGE_GLOBS, Config, ConfigFeature from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.log import die from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.util import _FILE, PathString @dataclasses.dataclass(frozen=True) class AptRepository: types: tuple[str, ...] url: str suite: str components: tuple[str, ...] signedby: Optional[Path] def __str__(self) -> str: return textwrap.dedent( f"""\ Types: {" ".join(self.types)} URIs: {self.url} Suites: {self.suite} Components: {" ".join(self.components)} {"Signed-By" if self.signedby else "Trusted"}: {self.signedby or "yes"} """ ) class Apt(PackageManager): documentation_exclude_globs: Final[list[str]] = [ "usr/share/doc/*", "usr/share/man/*", "usr/share/groff/*", "usr/share/gtk-doc/*", "usr/share/info/*", ] @classmethod def executable(cls, config: Config) -> str: return "apt-get" @classmethod def subdir(cls, config: Config) -> Path: return Path("apt") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "archives"] @classmethod def dpkg_cmd(cls, command: str) -> list[PathString]: return [ command, "--admindir=/buildroot/var/lib/dpkg", "--root=/buildroot", ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: cmd = cls.apivfs_script_cmd(context) return { **{ command: cmd + cls.env_cmd(context) + cls.cmd(context, command) for command in ( "apt", "apt-cache", "apt-cdrom", "apt-config", "apt-extracttemplates", "apt-get", "apt-key", "apt-mark", "apt-sortpkgs", ) }, **{ command: cmd + cls.dpkg_cmd(command) for command in ( "dpkg", "dpkg-query", ) }, "mkosi-install": ["apt-get", "install"], "mkosi-upgrade": ["apt-get", "upgrade"], "mkosi-remove": ["apt-get", "purge"], "mkosi-reinstall": ["apt-get", "install", "--reinstall"], } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: (context.sandbox_tree / "etc/apt").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True) (context.sandbox_tree / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True) with umask(~0o755): # TODO: Drop once apt 2.5.4 is widely available. (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True) (context.root / "var/lib/dpkg/status").touch() (context.root / "var/lib/dpkg/available").touch() # We have a special apt.conf outside of the sandbox tree that only configures "Dir::Etc" that we pass # to APT_CONFIG to tell apt it should read config files from /etc/apt in case this is overridden by # distributions. This is required because apt parses CLI configuration options after parsing its # configuration files and as such we can't use CLI options to tell apt where to look for # configuration files. config = context.sandbox_tree / "etc/apt.conf" if not config.exists(): config.write_text( textwrap.dedent( """\ Dir::Etc "/etc/apt"; """ ) ) sources = context.sandbox_tree / "etc/apt/sources.list.d/mkosi.sources" if not sources.exists(): for repo in repositories: if repo.signedby and not (context.config.tools() / str(repo.signedby).lstrip("/")).exists(): die( f"Keyring for repo {repo.url} not found at {repo.signedby}", hint="Make sure the right keyring package (e.g. debian-archive-keyring, " "kali-archive-keyring or ubuntu-keyring) is installed", ) with sources.open("w") as f: for repo in repositories: f.write(str(repo)) @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "APT_CONFIG": "/etc/apt.conf", "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_INTERACTIVE_SEEN": "true", } if "INITRD" not in context.config.environment and context.config.bootable != ConfigFeature.disabled: env["INITRD"] = "No" return super().finalize_environment(context) | env @classmethod def cmd(cls, context: Context, command: str = "apt-get") -> list[PathString]: debarch = context.config.distribution.architecture(context.config.architecture) cmdline: list[PathString] = [ command, "-o", f"APT::Architecture={debarch}", "-o", f"APT::Architectures={debarch}", "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}", "-o", "APT::Immediate-Configure=off", "-o", "APT::Get::Assume-Yes=true", "-o", "APT::Get::AutomaticRemove=true", "-o", "APT::Get::Allow-Change-Held-Packages=true", "-o", "APT::Get::Allow-Remove-Essential=true", "-o", "APT::Sandbox::User=root", "-o", "Acquire::AllowReleaseInfoChange=true", "-o", "Acquire::Check-Valid-Until=false", "-o", "Dir::Cache=/var/cache/apt", "-o", "Dir::State=/var/lib/apt", "-o", "Dir::Log=/var/log/apt", "-o", "Dir::State::Status=/buildroot/var/lib/dpkg/status", "-o", f"Dir::Bin::DPkg={context.config.find_binary('dpkg')}", "-o", "Debug::NoLocking=true", "-o", "DPkg::Options::=--root=/buildroot", "-o", "DPkg::Options::=--force-unsafe-io", "-o", "DPkg::Options::=--force-architecture", "-o", "DPkg::Options::=--force-depends", "-o", "DPkg::Options::=--no-debsig", "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", ] # fmt: skip if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", ] # fmt: skip if not context.config.with_docs: cmdline += [ f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs ] cmdline += ["--option=DPkg::Options::=--path-include=/usr/share/doc/*/copyright"] if context.config.proxy_url: cmdline += [ "-o", f"Acquire::http::Proxy={context.config.proxy_url}", "-o", f"Acquire::https::Proxy={context.config.proxy_url}", ] # fmt: skip return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, options: Sequence[PathString] = (), stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs, options=options), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "update") @classmethod def createrepo(cls, context: Context) -> None: if not (conf := context.repository / "conf/distributions").exists(): conf.parent.mkdir(exist_ok=True) conf.write_text( textwrap.dedent( f"""\ Origin: mkosi Label: mkosi Architectures: {context.config.distribution.architecture(context.config.architecture)} Codename: mkosi Components: main Description: mkosi local repository """ ) ) run( [ "reprepro", "--ignore=extension", "includedeb", "mkosi", *(d.name for glob in PACKAGE_GLOBS for d in context.repository.glob(glob) if "deb" in glob), ], sandbox=context.sandbox( options=[ "--bind", context.repository, workdir(context.repository), "--chdir", workdir(context.repository), ], ), ) # fmt: skip (context.sandbox_tree / "etc/apt/sources.list.d").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/apt/sources.list.d/mkosi-local.sources").write_text( textwrap.dedent( """\ Enabled: yes Types: deb URIs: file:///repository Suites: mkosi Components: main Trusted: yes """ ) ) cls.invoke( context, "update", arguments=[ "-o", "Dir::Etc::sourcelist=sources.list.d/mkosi-local.sources", "-o", "Dir::Etc::sourceparts=-", "-o", "APT::Get::List-Cleanup=0", ], ) # fmt: skip mkosi-25.3/mkosi/installer/dnf.py000066400000000000000000000222741474711424400170440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import textwrap from collections.abc import Sequence from pathlib import Path from typing import Optional from mkosi.config import Cacheonly, Config from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.log import ARG_DEBUG from mkosi.run import CompletedProcess, run, workdir from mkosi.util import _FILE, PathString class Dnf(PackageManager): @classmethod def executable(cls, config: Config) -> str: # Allow the user to override autodetection with an environment variable dnf = config.environment.get("MKOSI_DNF") return Path(dnf or config.find_binary("dnf5") or "dnf").name @classmethod def subdir(cls, config: Config) -> Path: return Path("libdnf5" if cls.executable(config) == "dnf5" else "dnf") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [ p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "dnf": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), "mkosi-install": ["dnf", "install"], "mkosi-upgrade": ["dnf", "upgrade"], "mkosi-remove": ["dnf", "remove"], "mkosi-reinstall": ["dnf", "reinstall"], } # fmt: skip @classmethod def setup( cls, context: Context, repositories: Sequence[RpmRepository], filelists: bool = True, metadata_expire: Optional[str] = None, ) -> None: (context.sandbox_tree / "etc/dnf/vars").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/yum.repos.d").mkdir(parents=True, exist_ok=True) config = context.sandbox_tree / "etc/dnf/dnf.conf" if not config.exists(): config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: # Make sure we download filelists so all dependencies can be resolved. # See https://bugzilla.redhat.com/show_bug.cgi?id=2180842 if cls.executable(context.config) == "dnf5" and filelists: f.write("[main]\noptional_metadata_types=filelists\n") # The versionlock plugin will fail if enabled without a configuration file so lets' write a noop # configuration file to make it happy which can be overridden by users. versionlock = context.sandbox_tree / "etc/dnf/plugins/versionlock.conf" if not versionlock.exists(): versionlock.parent.mkdir(parents=True, exist_ok=True) versionlock.write_text( textwrap.dedent( """\ [main] enabled=0 locklist=/dev/null """ ) ) repofile = context.sandbox_tree / "etc/yum.repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} """ ) ) if repo.sslcacert: f.write(f"sslcacert={repo.sslcacert}\n") if repo.sslclientcert: f.write(f"sslclientcert={repo.sslclientcert}\n") if repo.sslclientkey: f.write(f"sslclientkey={repo.sslclientkey}\n") if repo.priority: f.write(f"priority={repo.priority}\n") if metadata_expire: f.write(f"metadata_expire={metadata_expire}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd( cls, context: Context, cached_metadata: bool = True, ) -> list[PathString]: dnf = cls.executable(context.config) cmdline: list[PathString] = [ dnf, "--assumeyes", "--best", f"--releasever={context.config.release}", "--installroot=/buildroot", "--setopt=keepcache=1", "--setopt=logdir=/var/log", f"--setopt=cachedir=/var/cache/{cls.subdir(context.config)}", f"--setopt=persistdir=/var/lib/{cls.subdir(context.config)}", f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--disable-plugin=*" if dnf == "dnf5" else "--disableplugin=*", ] # fmt: skip for plugin in ("builddep", "versionlock"): cmdline += ["--enable-plugin", plugin] if dnf == "dnf5" else ["--enableplugin", plugin] if ARG_DEBUG.get(): cmdline += ["--setopt=debuglevel=10"] if not context.config.repository_key_check: cmdline += ["--nogpgcheck"] if context.config.repositories: opt = "--enable-repo" if dnf == "dnf5" else "--enablerepo" cmdline += [f"{opt}={repo}" for repo in context.config.repositories] if context.config.cacheonly == Cacheonly.always: cmdline += ["--cacheonly"] elif cached_metadata: cmdline += ["--setopt=metadata_expire=never"] if dnf == "dnf5": cmdline += ["--setopt=cacheonly=metadata"] if not context.config.architecture.is_native(): cmdline += [ f"--forcearch={context.config.distribution.architecture(context.config.architecture)}" ] if not context.config.with_docs: cmdline += ["--no-docs" if dnf == "dnf5" else "--nodocs"] if dnf == "dnf5": cmdline += ["--use-host-config"] else: cmdline += [ "--config=/etc/dnf/dnf.conf", "--setopt=reposdir=/etc/yum.repos.d", "--setopt=varsdir=/etc/dnf/vars", ] if context.config.proxy_url: cmdline += [f"--setopt=proxy={context.config.proxy_url}"] if context.config.proxy_peer_certificate: cmdline += ["--setopt=proxy_sslcacert=/proxy.cacert"] if context.config.proxy_client_certificate: cmdline += ["--setopt=proxy_sslclientcert=/proxy.clientcert"] if context.config.proxy_client_key: cmdline += ["--setopt=proxy_sslclientkey=/proxy.clientkey"] return cmdline @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, cached_metadata: bool = True, ) -> CompletedProcess: try: return run( cls.cmd(context, cached_metadata=cached_metadata) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) finally: # dnf interprets the log directory relative to the install root so there's nothing we can do but # to remove the log files from the install root afterwards. if (context.root / "var/log").exists(): for p in (context.root / "var/log").iterdir(): if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")): p.unlink() @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke( context, "makecache", arguments=[*(["--refresh"] if force else []), *arguments], cached_metadata=False, ) @classmethod def createrepo(cls, context: Context) -> None: run( ["createrepo_c", workdir(context.repository)], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 metadata_expire=never priority=10 """ ) ) cls.sync(context, force=True, arguments=["--disablerepo=*", "--enablerepo=mkosi"]) mkosi-25.3/mkosi/installer/pacman.py000066400000000000000000000216761474711424400175410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import shutil import textwrap from collections.abc import Sequence from contextlib import AbstractContextManager from pathlib import Path from mkosi.config import Config from mkosi.context import Context from mkosi.distributions import detect_distribution from mkosi.installer import PackageManager from mkosi.log import complete_step from mkosi.run import CompletedProcess, run, workdir from mkosi.sandbox import umask from mkosi.tree import copy_tree from mkosi.util import _FILE, PathString from mkosi.versioncomp import GenericVersion @dataclasses.dataclass(frozen=True) class PacmanRepository: id: str url: str class Pacman(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "pacman" @classmethod def subdir(cls, config: Config) -> Path: return Path("pacman") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "pkg"] @classmethod def state_subdirs(cls, state: Path) -> list[Path]: return [state / "local"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "pacman": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "mkosi-install": ["pacman", "--sync", "--needed"], "mkosi-upgrade": ["pacman", "--sync", "--sysupgrade", "--needed"], "mkosi-remove": ["pacman", "--remove", "--recursive", "--nosave"], "mkosi-reinstall": ["pacman", "--sync"], } # fmt: skip @classmethod def mounts(cls, context: Context) -> list[PathString]: mounts = [ *super().mounts(context), # pacman writes downloaded packages to the first writable cache directory. We don't want it to # write to our local repository directory so we expose it as a read-only directory to pacman. "--ro-bind", context.repository, "/var/cache/pacman/mkosi", "--ro-bind", context.keyring_dir, "/etc/pacman.d/gnupg", ] # fmt: skip if (context.root / "var/lib/pacman/local").exists(): # pacman reuses the same directory for the sync databases and the local database containing the # list of installed packages. The former should go in the cache directory, the latter should go # in the image, so we bind mount the local directory from the image to make sure that happens. mounts += ["--bind", context.root / "var/lib/pacman/local", "/var/lib/pacman/local"] return mounts @classmethod def setup(cls, context: Context, repositories: Sequence[PacmanRepository]) -> None: if context.config.repository_key_check: sig_level = "Required DatabaseOptional" else: # If we are using a single local mirror built on the fly there # will be no signatures sig_level = "Never" with umask(~0o755): (context.root / "var/lib/pacman/local").mkdir(parents=True, exist_ok=True) (context.sandbox_tree / "etc/mkosi-local.conf").touch() config = context.sandbox_tree / "etc/pacman.conf" if config.exists(): # If DownloadUser is specified, remove it as the user won't be available in the sandbox. lines = config.read_text().splitlines() lines = [line for line in lines if not line.strip().startswith("DownloadUser")] config.write_text("\n".join(lines)) return config.parent.mkdir(exist_ok=True, parents=True) with config.open("w") as f: f.write( textwrap.dedent( f"""\ [options] SigLevel = {sig_level} LocalFileSigLevel = Optional ParallelDownloads = 5 Architecture = {context.config.distribution.architecture(context.config.architecture)} """ ) ) if not context.config.with_docs: f.write( textwrap.dedent( """\ NoExtract = usr/share/doc/* NoExtract = usr/share/man/* NoExtract = usr/share/groff/* NoExtract = usr/share/gtk-doc/* NoExtract = usr/share/info/* """ ) ) # This has to go first so that our local repository always takes precedence over any other ones. f.write("Include = /etc/mkosi-local.conf\n") if any((context.sandbox_tree / "etc/pacman.d/").glob("*.conf")): f.write( textwrap.dedent( """\ Include = /etc/pacman.d/*.conf """ ) ) for repo in repositories: f.write( textwrap.dedent( f"""\ [{repo.id}] Server = {repo.url} """ ) ) @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "pacman", "--root=/buildroot", "--logfile=/dev/null", "--dbpath=/var/lib/pacman", # Make sure pacman looks at our local repository first by putting it as the first cache # directory. We mount it read-only so the second directory will still be used for writing new # cache entries. "--cachedir=/var/cache/pacman/mkosi", "--cachedir=/var/cache/pacman/pkg", "--hookdir=/buildroot/etc/pacman.d/hooks", "--arch", context.config.distribution.architecture(context.config.architecture), "--color", "auto", "--noconfirm", ] # fmt: skip @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def keyring(cls, context: Context) -> None: def sandbox() -> AbstractContextManager[list[PathString]]: return cls.sandbox( context, apivfs=False, # By default the keyring is mounted read-only so we override the read-only mount with a # writable mount to make it writable for the following pacman-key commands. options=["--bind", context.keyring_dir, "/etc/pacman.d/gnupg"], ) if ( (d := detect_distribution(context.config.tools())[0]) and d.is_apt_distribution() and (context.sandbox_tree / "usr/share/pacman/keyrings").exists() ): # pacman on Debian/Ubuntu looks for keyrings in /usr/share/keyrings so make sure all sandbox # trees keyrings are available in that location as well. (context.sandbox_tree / "usr/share").mkdir(parents=True, exist_ok=True) copy_tree( context.sandbox_tree / "usr/share/pacman/keyrings", context.sandbox_tree / "usr/share/keyrings", dereference=True, sandbox=context.sandbox, ) with complete_step("Populating pacman keyring"): run(["pacman-key", "--init"], sandbox=sandbox()) run(["pacman-key", "--populate"], sandbox=sandbox()) @classmethod def sync(cls, context: Context, force: bool) -> None: cls.invoke(context, "--sync", ["--refresh", *(["--refresh"] if force else [])]) @classmethod def createrepo(cls, context: Context) -> None: run( [ "repo-add", "--quiet", workdir(context.repository / "mkosi.db.tar"), *sorted( (workdir(p) for p in context.repository.glob("*.pkg.tar*")), key=lambda p: GenericVersion(Path(p).name), ), ], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/mkosi-local.conf").write_text( textwrap.dedent( """\ [mkosi] Server = file:///i/dont/exist SigLevel = Never Usage = Install Search Upgrade """ ) ) # pacman can't sync a single repository, so we go behind its back and do it ourselves. shutil.move(context.repository / "mkosi.db.tar", context.metadata_dir / "lib/pacman/sync/mkosi.db") mkosi-25.3/mkosi/installer/rpm.py000066400000000000000000000106171474711424400170710ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import subprocess import textwrap from pathlib import Path from typing import Literal, Optional, overload from mkosi.context import Context from mkosi.distributions import Distribution from mkosi.log import die from mkosi.run import run from mkosi.util import PathString @dataclasses.dataclass(frozen=True) class RpmRepository: id: str url: str gpgurls: tuple[str, ...] enabled: bool = True sslcacert: Optional[Path] = None sslclientkey: Optional[Path] = None sslclientcert: Optional[Path] = None priority: Optional[int] = None @overload def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: Literal[True] = True, ) -> str: ... @overload def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: bool, ) -> Optional[str]: ... def find_rpm_gpgkey( context: Context, key: str, fallback: Optional[str] = None, *, required: bool = True, ) -> Optional[str]: root = context.config.tools() if context.config.tools_tree_certificates else Path("/") if gpgpath := next((root / "usr/share/distribution-gpg-keys").rglob(key), None): return (Path("/") / gpgpath.relative_to(root)).as_uri() if gpgpath := next(Path(context.sandbox_tree / "etc/pki/rpm-gpg").rglob(key), None): return (Path("/") / gpgpath.relative_to(context.sandbox_tree)).as_uri() if fallback and context.config.repository_key_fetch: return fallback if required: die( f"{key} GPG key not found in /usr/share/distribution-gpg-keys", hint="Make sure the distribution-gpg-keys package is installed", ) return None def setup_rpm( context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm", dbbackend: Optional[str] = None, ) -> None: confdir = context.sandbox_tree / "etc/rpm" confdir.mkdir(parents=True, exist_ok=True) if not (confdir / "macros.lang").exists() and context.config.locale: (confdir / "macros.lang").write_text(f"%_install_langs {context.config.locale}") if not (confdir / "macros.dbpath").exists(): (confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}") if dbbackend: (confdir / "macros.db_backend").write_text(f"%_db_backend {dbbackend}") plugindir = Path( run( ["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(), stdout=subprocess.PIPE, ).stdout.strip() ) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): with (confdir / "macros.disable-plugins").open("w") as f: for plugin in plugindir.iterdir(): f.write(f"%__transaction_{plugin.stem} %{{nil}}\n") if context.config.distribution == Distribution.opensuse or ( context.config.distribution.is_centos_variant() and context.config.release == "9" ): # Write an rpm sequoia policy that makes sure "sha1.second_preimage_resistance = always" is # configured and makes sure that a minimal config is in place to make sure builds succeed. # TODO: Remove when distributions GPG keys are accepted by the default rpm-sequoia config everywhere. p = context.sandbox_tree / "etc/crypto-policies/back-ends/rpm-sequoia.config" p.parent.mkdir(parents=True, exist_ok=True) prev = p.read_text() if p.exists() else "" with p.open("w") as f: for line in prev.splitlines(keepends=True): if line.startswith("sha1.second_preimage_resistance"): f.write('sha1.second_preimage_resistance = "always"\n') else: f.write(line) if not any(line.startswith("[hash_algorithms]") for line in prev.splitlines()): f.write( textwrap.dedent( """ [hash_algorithms] sha1.second_preimage_resistance = "always" sha224 = "always" sha256 = "always" sha384 = "always" sha512 = "always" default_disposition = "never" """ ) ) def rpm_cmd() -> list[PathString]: return ["env", "HOME=/", "rpm", "--root=/buildroot"] mkosi-25.3/mkosi/installer/zypper.py000066400000000000000000000132771474711424400176310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import hashlib import textwrap from collections.abc import Sequence from pathlib import Path from mkosi.config import Config, yes_no from mkosi.context import Context from mkosi.installer import PackageManager from mkosi.installer.rpm import RpmRepository, rpm_cmd from mkosi.run import CompletedProcess, run, workdir from mkosi.util import _FILE, PathString class Zypper(PackageManager): @classmethod def executable(cls, config: Config) -> str: return "zypper" @classmethod def subdir(cls, config: Config) -> Path: return Path("zypp") @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: return [cache / "packages"] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: install: list[PathString] = [ "zypper", "install", "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", ] # fmt: skip return { "zypper": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), "mkosi-install": install, "mkosi-upgrade": ["zypper", "update"], "mkosi-remove": ["zypper", "remove", "--clean-deps"], "mkosi-reinstall": install + ["--force"], } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: config = context.sandbox_tree / "etc/zypp/zypp.conf" config.parent.mkdir(exist_ok=True, parents=True) # rpm.install.excludedocs can only be configured in zypp.conf so we append to any user provided # config file. Let's also bump the refresh delay to the same default as dnf which is 48 hours. with config.open("a") as f: f.write( textwrap.dedent( f""" [main] rpm.install.excludedocs = {yes_no(not context.config.with_docs)} repo.refresh.delay = {48 * 60} """ ) ) repofile = context.sandbox_tree / "etc/zypp/repos.d/mkosi.repo" if not repofile.exists(): repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: # zypper uses the repo ID as its cache key which is unsafe so add a hash of the url used # to it to make sure a unique cache is used for each repository. We use roughly the same # algorithm here that dnf uses as well. key = hashlib.sha256(repo.url.encode()).hexdigest()[:16] f.write( textwrap.dedent( f"""\ [{repo.id}-{key}] name={repo.id} {repo.url} gpgcheck=1 enabled={int(repo.enabled)} autorefresh=0 keeppackages=1 """ ) ) if repo.priority: f.write(f"priority={repo.priority}\n") for i, url in enumerate(repo.gpgurls): f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ") f.write(f"{url}\n") f.write("\n") @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: return super().finalize_environment(context) | { "ZYPP_CONF": "/etc/zypp/zypp.conf", "RPM_FORCE_DEBIAN": "1", } @classmethod def cmd(cls, context: Context) -> list[PathString]: return [ "zypper", "--installroot=/buildroot", "--cache-dir=/var/cache/zypp", "--non-interactive", "--no-refresh", f"--releasever={context.config.release}", *(["--gpg-auto-import-keys"] if context.config.repository_key_fetch else []), *(["--no-gpg-checks"] if not context.config.repository_key_check else []), *([f"--plus-content={repo}" for repo in context.config.repositories]), ] @classmethod def invoke( cls, context: Context, operation: str, arguments: Sequence[str] = (), *, apivfs: bool = False, stdout: _FILE = None, ) -> CompletedProcess: return run( cls.cmd(context) + [operation, *arguments], sandbox=cls.sandbox(context, apivfs=apivfs), env=cls.finalize_environment(context), stdout=stdout, ) @classmethod def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> None: cls.invoke(context, "refresh", [*(["--force"] if force else []), *arguments]) @classmethod def createrepo(cls, context: Context) -> None: run( ["createrepo_c", workdir(context.repository)], sandbox=context.sandbox(options=["--bind", context.repository, workdir(context.repository)]), ) (context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text( textwrap.dedent( """\ [mkosi] name=mkosi baseurl=file:///repository gpgcheck=0 autorefresh=0 keeppackages=0 priority=10 """ ) ) cls.sync(context, force=True, arguments=["mkosi"]) mkosi-25.3/mkosi/kmod.py000066400000000000000000000223411474711424400152250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import itertools import logging import os import re import subprocess from collections.abc import Iterable, Iterator from pathlib import Path from mkosi.context import Context from mkosi.log import complete_step, log_step from mkosi.run import chroot_cmd, run from mkosi.sandbox import chase from mkosi.util import chdir, parents_below def loaded_modules() -> list[str]: # Loaded modules are listed with underscores but the filenames might use dashes instead. return [ rf"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines() ] def filter_kernel_modules( root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str], ) -> list[Path]: modulesd = Path("usr/lib/modules") / kver with chdir(root): modules = set(modulesd.rglob("*.ko*")) keep = set() if include: regex = re.compile("|".join(include)) for m in modules: rel = os.fspath(Path(*m.parts[5:])) if regex.search(rel): keep.add(rel) if exclude: remove = set() regex = re.compile("|".join(exclude)) for m in modules: rel = os.fspath(Path(*m.parts[5:])) if rel not in keep and regex.search(rel): remove.add(m) modules -= remove return sorted(modules) def normalize_module_name(name: str) -> str: return name.replace("_", "-") def module_path_to_name(path: Path) -> str: return normalize_module_name(path.name.partition(".")[0]) def modinfo(context: Context, kver: str, modules: Iterable[str]) -> str: cmdline = ["modinfo", "--set-version", kver, "--null"] if context.config.output_format.is_extension_image() and not context.config.overlay: cmdline += ["--basedir", "/buildroot"] sandbox = context.sandbox(options=["--ro-bind", context.root, "/buildroot"]) else: sandbox = chroot_cmd(root=context.root) return run( ["modinfo", "--set-version", kver, "--null", *modules], stdout=subprocess.PIPE, sandbox=sandbox, ).stdout.strip() def resolve_module_dependencies( context: Context, kver: str, modules: Iterable[str], ) -> tuple[set[Path], set[Path]]: """ Returns a tuple of lists containing the paths to the module and firmware dependencies of the given list of module names (including the given module paths themselves). The paths are returned relative to the root directory. """ modulesd = Path("usr/lib/modules") / kver if (p := context.root / modulesd / "modules.builtin").exists(): builtin = set(module_path_to_name(Path(m)) for m in p.read_text().splitlines()) else: builtin = set() with chdir(context.root): allmodules = set(modulesd.rglob("*.ko*")) nametofile = {module_path_to_name(m): m for m in allmodules} log_step("Running modinfo to fetch kernel module dependencies") # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps # that map the path of the module to its module dependencies and its firmware dependencies # respectively. Because there's more kernel modules than the max number of accepted CLI arguments, we # split the modules list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): chunk = list(nametofile.keys())[i : i + 8500] info += modinfo(context, kver, chunk) log_step("Calculating required kernel modules and firmware") moddep = {} firmwaredep = {} depends: set[str] = set() firmware: set[Path] = set() with chdir(context.root): for line in info.split("\0"): key, sep, value = line.partition(":") if not sep: key, sep, value = line.partition("=") value = value.strip() if key == "depends": depends.update(normalize_module_name(d) for d in value.split(",") if d) elif key == "softdep": # softdep is delimited by spaces and can contain strings like pre: and post: so discard # anything that ends with a colon. depends.update(normalize_module_name(d) for d in value.split() if not d.endswith(":")) elif key == "firmware": glob = "" if value.endswith("*") else "*" fw = [f for f in Path("usr/lib/firmware").glob(f"{value}{glob}")] if not fw: logging.debug(f"Not including missing firmware /usr/lib/firmware/{value} in the initrd") firmware.update(fw) elif key == "name": # The file names use dashes, but the module names use underscores. We track the names in # terms of the file names, since the depends use dashes and therefore filenames as well. name = normalize_module_name(value) moddep[name] = depends firmwaredep[name] = firmware depends = set() firmware = set() todo = [*builtin, *modules] mods = set() firmware = set() while todo: m = todo.pop() if m in mods: continue depends = moddep.get(m, set()) for d in depends: if d not in nametofile and d not in builtin: logging.warning(f"{d} is a dependency of {m} but is not installed, ignoring ") mods.add(m) todo += depends firmware.update(firmwaredep.get(m, set())) return set(nametofile[m] for m in mods if m in nametofile), set(firmware) def gen_required_kernel_modules( context: Context, kver: str, *, include: Iterable[str], exclude: Iterable[str], ) -> Iterator[Path]: modulesd = Path("usr/lib/modules") / kver # There is firmware in /usr/lib/firmware that is not depended on by any modules so if any firmware was # installed we have to take the slow path to make sure we don't copy firmware into the initrd that is not # depended on by any kernel modules. if exclude or (context.root / "usr/lib/firmware").glob("*"): modules = filter_kernel_modules(context.root, kver, include=include, exclude=exclude) names = [module_path_to_name(m) for m in modules] mods, firmware = resolve_module_dependencies(context, kver, names) else: logging.debug( "No modules excluded and no firmware installed, using kernel modules generation fast path" ) with chdir(context.root): mods = set(modulesd.rglob("*.ko*")) firmware = set() # Some firmware dependencies are symbolic links, so the targets for those must be included in the list # of required firmware files too. Intermediate symlinks are not included, and so links pointing to links # results in dangling symlinks in the final image. for fw in firmware.copy(): if (context.root / fw).is_symlink(): target = Path(chase(os.fspath(context.root), os.fspath(fw))) if target.exists(): firmware.add(target.relative_to(context.root)) yield from sorted( itertools.chain( { p.relative_to(context.root) for f in mods | firmware for p in parents_below(context.root / f, context.root / "usr/lib") }, mods, firmware, (p.relative_to(context.root) for p in (context.root / modulesd).glob("modules*")), ) ) if (modulesd / "vdso").exists(): if not mods: yield from ( p.relative_to(context.root) for p in parents_below(context.root / modulesd / "vdso", context.root / "usr/lib") ) yield modulesd / "vdso" yield from sorted(p.relative_to(context.root) for p in (context.root / modulesd / "vdso").iterdir()) def process_kernel_modules( context: Context, kver: str, *, include: Iterable[str], exclude: Iterable[str], ) -> None: if not exclude: return modulesd = Path("usr/lib/modules") / kver firmwared = Path("usr/lib/firmware") with complete_step("Applying kernel module filters"): required = set(gen_required_kernel_modules(context, kver, include=include, exclude=exclude)) with chdir(context.root): modules = sorted(modulesd.rglob("*.ko*"), reverse=True) firmware = sorted(firmwared.rglob("*"), reverse=True) for m in modules: if m in required: continue p = context.root / m if p.is_file() or p.is_symlink(): p.unlink() elif p.exists(): p.rmdir() for fw in firmware: if fw in required: continue if any(fw.is_relative_to(firmwared / d) for d in ("amd-ucode", "intel-ucode")): continue p = context.root / fw if p.is_file() or p.is_symlink(): p.unlink() if p.parent != context.root / firmwared and not any(p.parent.iterdir()): p.parent.rmdir() elif p.exists(): p.rmdir() mkosi-25.3/mkosi/log.py000066400000000000000000000052071474711424400150560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import contextvars import logging import os import sys from collections.abc import Iterator from typing import Any, NoReturn, Optional from mkosi.sandbox import Style # This global should be initialized after parsing arguments ARG_DEBUG = contextvars.ContextVar("debug", default=False) ARG_DEBUG_SHELL = contextvars.ContextVar("debug-shell", default=False) ARG_DEBUG_SANDBOX = contextvars.ContextVar("debug-sandbox", default=False) LEVEL = 0 def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") sys.exit(1) def log_step(text: str) -> None: prefix = " " * LEVEL if sys.exc_info()[0]: # We are falling through exception handling blocks. # De-emphasize this step here, so the user can tell more # easily which step generated the exception. The exception # or error will only be printed after we finish cleanup. logging.info(f"{prefix}({text})") else: logging.info(f"{prefix}{Style.bold}{text}{Style.reset}") def log_notice(text: str) -> None: logging.info(f"{Style.bold}{text}{Style.reset}") @contextlib.contextmanager def complete_step(text: str, text2: Optional[str] = None) -> Iterator[list[Any]]: global LEVEL log_step(text) LEVEL += 1 try: args: list[Any] = [] yield args finally: LEVEL -= 1 assert LEVEL >= 0 if text2 is not None: log_step(text2.format(*args)) class Formatter(logging.Formatter): def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None: fmt = fmt or "%(message)s" self.formatters = { logging.DEBUG: logging.Formatter(f"‣ {Style.gray}{fmt}{Style.reset}"), logging.INFO: logging.Formatter(f"‣ {fmt}"), logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"), logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"), logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"), } # fmt: skip super().__init__(fmt, *args, **kwargs) def format(self, record: logging.LogRecord) -> str: return self.formatters[record.levelno].format(record) def log_setup(default_log_level: str = "info") -> None: handler = logging.StreamHandler(stream=sys.stderr) handler.setFormatter(Formatter()) logging.getLogger().addHandler(handler) logging.getLogger().setLevel( logging.getLevelName(os.getenv("SYSTEMD_LOG_LEVEL", default_log_level).upper()) ) mkosi-25.3/mkosi/manifest.py000066400000000000000000000246271474711424400161120ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import dataclasses import datetime import json import subprocess import textwrap from pathlib import Path from typing import IO, Any, Optional from mkosi.config import ManifestFormat from mkosi.context import Context from mkosi.distributions import PackageType from mkosi.installer.apt import Apt from mkosi.log import complete_step from mkosi.run import run @dataclasses.dataclass class PackageManifest: """A description of a package The fields used here must match https://systemd.io/COREDUMP_PACKAGE_METADATA/#well-known-keys. """ type: str name: str version: str architecture: str size: int def as_dict(self) -> dict[str, str]: return { "type": self.type, "name": self.name, "version": self.version, "architecture": self.architecture, } @dataclasses.dataclass class SourcePackageManifest: name: str changelog: Optional[str] packages: list[PackageManifest] = dataclasses.field(default_factory=list) def add(self, package: PackageManifest) -> None: self.packages.append(package) def report(self) -> str: size = sum(p.size for p in self.packages) t = textwrap.dedent( f"""\ SourcePackage: {self.name} Packages: {" ".join(p.name for p in self.packages)} Size: {size} """ ) if self.changelog: t += f"""\nChangelog:\n{self.changelog}\n""" return t def parse_pkg_desc(f: Path) -> tuple[str, str, str, str]: name = version = base = arch = "" with f.open() as desc: for line in desc: line = line.strip() if line == "%NAME%": name = next(desc).strip() elif line == "%VERSION%": version = next(desc).strip() elif line == "%BASE%": base = next(desc).strip() elif line == "%ARCH%": arch = next(desc).strip() break return name, version, base, arch @dataclasses.dataclass class Manifest: context: Context packages: list[PackageManifest] = dataclasses.field(default_factory=list) source_packages: dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict) _init_timestamp: datetime.datetime = dataclasses.field(init=False, default_factory=datetime.datetime.now) def need_source_info(self) -> bool: return ManifestFormat.changelog in self.context.config.manifest_format def record_packages(self) -> None: with complete_step("Recording packages in manifest…"): if self.context.config.distribution.package_type() == PackageType.rpm: self.record_rpm_packages() if self.context.config.distribution.package_type() == PackageType.deb: self.record_deb_packages() if self.context.config.distribution.package_type() == PackageType.pkg: self.record_pkg_packages() def record_rpm_packages(self) -> None: c = run( [ "rpm", "--root=/buildroot", "--query", "--all", "--queryformat", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n", ], stdout=subprocess.PIPE, sandbox=( self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]) ), ) # fmt: skip packages = sorted(c.stdout.splitlines()) for package in packages: nevra, srpm, name, arch, size, installtime = package.split("\t") assert nevra.startswith(f"{name}-") evra = nevra.removeprefix(f"{name}-") # Some packages have architecture '(none)', and it's not part of NEVRA, e.g.: # gpg-pubkey-45719a39-5f2c0192 gpg-pubkey (none) 0 1635985199 if arch != "(none)": assert nevra.endswith(f".{arch}") evr = evra.removesuffix(f".{arch}") else: evr = evra arch = "" # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( self.context.config.base_trees and datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp ): continue manifest = PackageManifest("rpm", name, evr, arch, int(size)) self.packages.append(manifest) if not self.need_source_info(): continue source = self.source_packages.get(srpm) if source is None: c = run( [ "rpm", "--root=/buildroot", "--query", "--changelog", nevra, ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]), ) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source source.add(manifest) def record_deb_packages(self) -> None: c = run( [ "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", # noqa: E501 ], stdout=subprocess.PIPE, sandbox=self.context.sandbox(options=["--ro-bind", self.context.root, "/buildroot"]), ) # fmt: skip packages = sorted(c.stdout.splitlines()) for package in packages: name, source, version, arch, size, installtime = package.split("\t") # dpkg records the size in KBs, the field is optional db-fsys:Last-Modified is not available in # very old dpkg, so just skip creating the manifest for sysext when building on very old # distributions by setting the timestamp to epoch. This only affects Ubuntu Bionic which is # nearing EOL. If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by # packages that were installed in this execution of mkosi. We assume that the upper layer is put # together in one go, which currently is always true. install_timestamp = datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) if self.context.config.base_trees and install_timestamp < self._init_timestamp: continue manifest = PackageManifest("deb", name, version, arch, int(size or 0) * 1024) self.packages.append(manifest) if not self.need_source_info(): continue source_package = self.source_packages.get(source) if source_package is None: # Yes, --quiet is specified twice, to avoid output about download stats. Note that the # argument of the 'changelog' verb is the binary package name, not the source package # name. We also have to set "Dir" explicitly because apt has no separate option to configure # the changelog directory. Apt.invoke() sets all options that are interpreted relative to Dir # to absolute paths by default so this is safe. result = Apt.invoke( self.context, "changelog", ["--quiet", "--quiet", "-o", "Dir=/buildroot", name], stdout=subprocess.PIPE, ) source_package = SourcePackageManifest(source, result.stdout.strip()) self.source_packages[source] = source_package source_package.add(manifest) def record_pkg_packages(self) -> None: packages = sorted((self.context.root / "var/lib/pacman/local").glob("*/desc")) for desc in packages: name, version, source, arch = parse_pkg_desc(desc) package = PackageManifest("pkg", name, version, arch, 0) self.packages.append(package) source_package = self.source_packages.get(source) if source_package is None: source_package = SourcePackageManifest(source, None) self.source_packages[source] = source_package source_package.add(package) def has_data(self) -> bool: # We might add more data in the future return len(self.packages) > 0 def as_dict(self) -> dict[str, Any]: config = { "name": self.context.config.image_id or "image", "distribution": str(self.context.config.distribution), "architecture": str(self.context.config.architecture), } if self.context.config.image_version is not None: config["version"] = self.context.config.image_version if self.context.config.release is not None: config["release"] = self.context.config.release return { # Bump this when incompatible changes are made to the manifest format. "manifest_version": 1, # Describe the image itself. "config": config, # Describe the image content in terms of packages. "packages": [package.as_dict() for package in self.packages], } def write_json(self, out: IO[str]) -> None: json.dump(self.as_dict(), out, indent=2) def write_package_report(self, out: IO[str]) -> None: """Create a human-readable report about packages This is modelled after "Fedora compose reports" that are sent to fedora-devel. The format describes added and removed packages, and includes the changelogs. A diff between two such reports shows what changed *in* the packages quite nicely. """ out.write(f"Packages: {len(self.packages)}\n") out.write(f"Size: {sum(p.size for p in self.packages)}") for package in self.source_packages.values(): out.write(f"\n{80 * '-'}\n") out.write(package.report()) mkosi-25.3/mkosi/mounts.py000066400000000000000000000100621474711424400156150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import stat import tempfile from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional, Union from mkosi.config import BuildSourcesEphemeral, Config from mkosi.log import die from mkosi.sandbox import OverlayOperation from mkosi.util import PathString, flatten def stat_is_whiteout(st: os.stat_result) -> bool: return stat.S_ISCHR(st.st_mode) and st.st_rdev == 0 def delete_whiteout_files(path: Path) -> None: """Delete any char(0,0) device nodes underneath @path Overlayfs uses such files to mark "whiteouts" (files present in the lower layers, but removed in the upper one). """ for entry in path.rglob("*"): # TODO: Use Path.stat() once we depend on Python 3.10+. if stat_is_whiteout(os.stat(entry, follow_symlinks=False)): entry.unlink() @contextlib.contextmanager def mount_overlay( lowerdirs: Sequence[Path], dst: Path, *, upperdir: Optional[Path] = None, ) -> Iterator[Path]: with contextlib.ExitStack() as stack: if upperdir is None: upperdir = Path(stack.enter_context(tempfile.TemporaryDirectory(prefix="volatile-overlay"))) st = lowerdirs[-1].stat() os.chmod(upperdir, st.st_mode) workdir = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir") ) ) try: with OverlayOperation(tuple(str(p) for p in lowerdirs), str(upperdir), str(workdir), str(dst)): yield dst finally: delete_whiteout_files(upperdir) @contextlib.contextmanager def finalize_source_mounts( config: Config, *, ephemeral: Union[BuildSourcesEphemeral, bool], ) -> Iterator[list[PathString]]: with contextlib.ExitStack() as stack: options: list[PathString] = [] for t in config.build_sources: src, dst = t.with_prefix("/work/src") if ephemeral: if ephemeral == BuildSourcesEphemeral.buildcache: if config.build_dir is None: die( "BuildSourcesEphemeral=buildcache was configured, but no build directory exists.", # noqa: E501 hint="Configure BuildDirectory= or create mkosi.builddir.", ) upperdir = config.build_dir / f"mkosi.buildovl.{src.name}" upperdir.mkdir(mode=src.stat().st_mode, exist_ok=True) else: upperdir = Path( stack.enter_context(tempfile.TemporaryDirectory(prefix="volatile-overlay.")) ) os.chmod(upperdir, src.stat().st_mode) workdir = Path( stack.enter_context( tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir.") ) ) options += [ "--overlay-lowerdir", src, "--overlay-upperdir", upperdir, "--overlay-workdir", workdir, "--overlay", dst, ] # fmt: skip else: options += ["--bind", src, dst] yield options def finalize_certificate_mounts(config: Config, relaxed: bool = False) -> list[PathString]: mounts = [] root = config.tools() if config.tools_tree_certificates else Path("/") if not relaxed or root != Path("/"): mounts += [ (root / subdir, Path("/") / subdir) for subdir in ( Path("etc/pki"), Path("etc/ssl"), Path("etc/ca-certificates"), Path("var/lib/ca-certificates"), ) if (root / subdir).exists() and any(p for p in (root / subdir).rglob("*") if not p.is_dir()) ] return flatten(("--ro-bind", src, target) for src, target in sorted(set(mounts), key=lambda s: s[1])) mkosi-25.3/mkosi/pager.py000066400000000000000000000010201474711424400153600ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import pydoc from typing import Optional def page(text: str, enabled: Optional[bool]) -> None: if enabled: # Initialize less options from $MKOSI_LESS or provide a suitable fallback. # F: don't page if one screen # X: do not clear screen # M: verbose prompt # K: quit on ^C # R: allow rich formatting os.environ["LESS"] = os.getenv("MKOSI_LESS", "FXMKR") pydoc.pager(text) else: print(text) mkosi-25.3/mkosi/partition.py000066400000000000000000000047711474711424400163130ustar00rootroot00000000000000import dataclasses import json import subprocess from collections.abc import Mapping, Sequence from pathlib import Path from typing import Any, Final, Optional from mkosi.log import die from mkosi.run import SandboxProtocol, nosandbox, run, workdir @dataclasses.dataclass(frozen=True) class Partition: type: str uuid: str partno: Optional[int] split_path: Optional[Path] roothash: Optional[str] @classmethod def from_dict(cls, dict: Mapping[str, Any]) -> "Partition": return cls( type=dict["type"], uuid=dict["uuid"], partno=int(partno) if (partno := dict.get("partno")) else None, # We have to translate the sandbox path to the path on the host by removing the /work prefix. split_path=( Path(p.removeprefix("/work")) if ((p := dict.get("split_path")) and p != "-") else None ), roothash=dict.get("roothash"), ) GRUB_BOOT_PARTITION_UUID: Final[str] = "21686148-6449-6e6f-744e-656564454649" def find_partitions(image: Path, *, sandbox: SandboxProtocol = nosandbox) -> list[Partition]: output = json.loads( run( ["systemd-repart", "--json=short", workdir(image, sandbox)], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, sandbox=sandbox(options=["--ro-bind", image, workdir(image, sandbox)]), ).stdout ) return [Partition.from_dict(d) for d in output] def finalize_roothash(partitions: Sequence[Partition]) -> Optional[str]: roothash: Optional[str] = None usrhash: Optional[str] = None for p in partitions: if (h := p.roothash) is None: continue if not (p.type.startswith("usr") or p.type.startswith("root")): die(f"Found roothash property on unexpected partition type {p.type}") # When there's multiple verity enabled root or usr partitions, the first one wins. if p.type.startswith("usr"): usrhash = usrhash or h else: roothash = roothash or h return f"roothash={roothash}" if roothash else f"usrhash={usrhash}" if usrhash else None def finalize_root(partitions: Sequence[Partition]) -> Optional[str]: root = finalize_roothash(partitions) if not root: root = next((f"root=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("root")), None) if not root: root = next((f"mount.usr=PARTUUID={p.uuid}" for p in partitions if p.type.startswith("usr")), None) return root mkosi-25.3/mkosi/qemu.py000066400000000000000000001663601474711424400152540ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import base64 import contextlib import dataclasses import enum import errno import fcntl import hashlib import io import json import logging import os import random import resource import shutil import socket import struct import subprocess import sys import tempfile import textwrap import uuid from collections.abc import Iterator, Sequence from pathlib import Path from typing import Optional from mkosi.config import ( Args, Config, ConfigFeature, ConsoleMode, Drive, Firmware, Network, OutputFormat, VsockCID, finalize_term, format_bytes, systemd_tool_version, want_selinux_relabel, yes_no, ) from mkosi.log import ARG_DEBUG, die from mkosi.partition import finalize_root, find_partitions from mkosi.run import SD_LISTEN_FDS_START, AsyncioThread, find_binary, fork_and_wait, run, spawn, workdir from mkosi.tree import copy_tree, rmtree from mkosi.user import INVOKING_USER, become_root_in_subuid_range, become_root_in_subuid_range_cmd from mkosi.util import PathString, StrEnum, current_home_dir, flock, flock_or_die, groupby, round_up, try_or from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") VHOST_VSOCK_SET_GUEST_CID = 0x4008AF60 class QemuDeviceNode(StrEnum): kvm = enum.auto() vhost_vsock = enum.auto() def device(self) -> Path: return Path("/dev") / str(self) def description(self) -> str: return { QemuDeviceNode.kvm: "KVM acceleration", QemuDeviceNode.vhost_vsock: "a VSock device", }[self] def feature(self, config: Config) -> ConfigFeature: return { QemuDeviceNode.kvm: config.kvm, QemuDeviceNode.vhost_vsock: config.vsock, }[self] def open(self) -> int: return os.open(self.device(), os.O_RDWR | os.O_CLOEXEC | os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: os.close(self.open()) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENODEV, errno.EPERM, errno.EACCES): raise e if log and e.errno in (errno.ENOENT, errno.ENODEV): logging.warning( f"{self.device()} not found. Not adding {self.description()} to the virtual machine." ) if log and e.errno in (errno.EPERM, errno.EACCES): logging.warning( f"Permission denied to access {self.device()}. " f"Not adding {self.description()} to the virtual machine. " "(Maybe a kernel module could not be loaded?)" ) return False return True def hash_output(config: Config) -> "hashlib._Hash": p = os.fspath(config.output_dir_or_cwd() / config.output) return hashlib.sha256(p.encode()) def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: cid = int.from_bytes(hash.digest()[:4], byteorder="little") # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) def vsock_cid_in_use(vfd: int, cid: int) -> bool: try: fcntl.ioctl(vfd, VHOST_VSOCK_SET_GUEST_CID, struct.pack("=Q", cid)) except OSError as e: if e.errno != errno.EADDRINUSE: raise return True return False def find_unused_vsock_cid(config: Config, vfd: int) -> int: hash = hash_output(config) for i in range(64): cid = hash_to_vsock_cid(hash) if not vsock_cid_in_use(vfd, cid): return cid hash.update(i.to_bytes(length=4, byteorder="little")) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) if not vsock_cid_in_use(vfd, cid): return cid die("Failed to find an unused VSock connection ID") class KernelType(StrEnum): pe = enum.auto() uki = enum.auto() unknown = enum.auto() @classmethod def identify(cls, config: Config, path: Path) -> "KernelType": if not config.find_binary("bootctl"): logging.warning("bootctl is not installed, assuming 'unknown' kernel type") return KernelType.unknown if (v := systemd_tool_version("bootctl", sandbox=config.sandbox)) < 253: logging.warning(f"bootctl {v} doesn't know kernel-identify verb, assuming 'unknown' kernel type") return KernelType.unknown type = run( ["bootctl", "kernel-identify", workdir(path)], stdout=subprocess.PIPE, sandbox=config.sandbox(options=["--ro-bind", path, workdir(path)]), ).stdout.strip() try: return cls(type) except ValueError: logging.warning(f"Unknown kernel type '{type}', assuming 'unknown'") return KernelType.unknown def find_qemu_binary(config: Config) -> Path: options = [f"qemu-system-{config.architecture.to_qemu()}"] if config.architecture.is_native(): options += ["/usr/libexec/qemu-kvm"] for o in options: if qemu := config.find_binary(o): return qemu die( "qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed?", ) @dataclasses.dataclass(frozen=True) class OvmfConfig: description: Path firmware: Path format: str vars: Path vars_format: str def find_ovmf_firmware(config: Config, firmware: Firmware) -> Optional[OvmfConfig]: if not firmware.is_uefi(): return None desc = list((config.tools() / "usr/share/qemu/firmware").glob("*")) if config.tools() == Path("/"): desc += list((config.tools() / "etc/qemu/firmware").glob("*")) arch = config.architecture.to_qemu() machine = config.architecture.default_qemu_machine() for p in sorted(desc): if p.is_dir(): continue j = json.loads(p.read_text()) if "uefi" not in j["interface-types"]: logging.debug(f"{p.name} firmware description does not target UEFI, skipping") continue for target in j["targets"]: if target["architecture"] != arch: continue # We cannot use fnmatch as for example our default machine for x86-64 is q35 and the firmware # description lists "pc-q35-*" so we use a substring check instead. if any(machine in glob for glob in target["machines"]): break else: logging.debug( f"{p.name} firmware description does not target architecture {arch} or " f"machine {machine}, skipping" ) continue if "nvram-template" not in j["mapping"]: logging.debug(f"{p.name} firmware description is missing nvram-template, skipping") continue if firmware == Firmware.uefi_secure_boot and "secure-boot" not in j["features"]: logging.debug(f"{p.name} firmware description does not include secure boot, skipping") continue if firmware != Firmware.uefi_secure_boot and "secure-boot" in j["features"]: logging.debug(f"{p.name} firmware description includes secure boot, skipping") continue if ( config.firmware_variables in (Path("microsoft"), Path("microsoft-mok")) and "enrolled-keys" not in j["features"] ): logging.debug(f"{p.name} firmware description does not have enrolled Microsoft keys, skipping") continue if ( config.firmware_variables not in (Path("microsoft"), Path("microsoft-mok")) and "enrolled-keys" in j["features"] ): logging.debug(f"{p.name} firmware description has enrolled Microsoft keys, skipping") continue logging.debug(f"Using {p.name} firmware description") return OvmfConfig( description=Path("/") / p.relative_to(config.tools()), firmware=Path(j["mapping"]["executable"]["filename"]), format=j["mapping"]["executable"]["format"], vars=Path(j["mapping"]["nvram-template"]["filename"]), vars_format=j["mapping"]["nvram-template"]["format"], ) die("Couldn't find matching OVMF UEFI firmware description") @contextlib.contextmanager def start_swtpm(config: Config) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-swtpm-") as state: # swtpm_setup is noisy and doesn't have a --quiet option so we pipe it's stdout to /dev/null. run( [ "swtpm_setup", "--tpm-state", workdir(Path(state)), "--tpm2", "--pcr-banks", "sha256", "--config", "/dev/null", ], sandbox=config.sandbox(options=["--bind", state, workdir(Path(state))]), stdout=None if ARG_DEBUG.get() else subprocess.DEVNULL, ) # fmt: skip cmdline = ["swtpm", "socket", "--tpm2", "--tpmstate", f"dir={workdir(Path(state))}"] # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start # qemu before swtpm has had the chance to create the socket (or where we try to chown it first). with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: path = Path(state) / Path("sock") sock.bind(os.fspath(path)) sock.listen() cmdline += ["--ctrl", f"type=unixio,fd={SD_LISTEN_FDS_START}"] with spawn( cmdline, pass_fds=(sock.fileno(),), sandbox=config.sandbox( options=["--bind", state, workdir(Path(state))], setup=scope_cmd( name=f"mkosi-swtpm-{config.machine_or_name()}", description=f"swtpm for {config.machine_or_name()}", ), ), ) as proc: yield path proc.terminate() def find_virtiofsd(*, root: Path = Path("/"), extra: Sequence[Path] = ()) -> Optional[Path]: if p := find_binary("virtiofsd", root=root, extra=extra): return p if (p := root / "usr/libexec/virtiofsd").exists(): return Path("/") / p.relative_to(root) if (p := root / "usr/lib/virtiofsd").exists(): return Path("/") / p.relative_to(root) return None def unshare_version() -> str: return run(["unshare", "--version"], stdout=subprocess.PIPE).stdout.strip().split()[-1] def systemd_escape(config: Config, s: PathString, path: bool = False) -> str: cmdline = ["systemd-escape", s] if path: cmdline += ["--path"] return run(cmdline, stdout=subprocess.PIPE, sandbox=config.sandbox()).stdout.strip() @contextlib.contextmanager def start_virtiofsd( config: Config, directory: Path, *, uidmap: bool = True, name: Optional[str] = None, selinux: bool = False, ) -> Iterator[Path]: virtiofsd = find_virtiofsd(root=config.tools(), extra=config.extra_search_paths) if virtiofsd is None: die("virtiofsd must be installed to boot directory images or use RuntimeTrees= with mkosi vm") cmdline: list[PathString] = [ virtiofsd, "--shared-dir", workdir(directory), "--xattr", # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the # warning. "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", ] # fmt: skip if selinux: cmdline += ["--security-label"] st = None if uidmap: st = Path(directory).stat() # If we're already running as the same user that we'll be running virtiofsd as, don't bother doing # any explicit user switching or chown()'ing as it's not needed in this case. if st.st_uid == os.getuid() and st.st_gid == os.getgid(): st = None # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start # qemu before virtiofsd has had the chance to create the socket (or where we try to chown it first). with ( tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd-") as context, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock, ): if st: # Make sure virtiofsd can access the socket in this directory. os.chown(context, st.st_uid, st.st_gid) # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's # not too long as virtiofs tag names are limited to 36 bytes. path = Path(context) / f"sock-{uuid.uuid4().hex}"[:35] sock.bind(os.fspath(path)) sock.listen() if st: # Make sure virtiofsd can connect to the socket. os.chown(path, st.st_uid, st.st_gid) cmdline += ["--fd", str(SD_LISTEN_FDS_START)] # We want RuntimeBuildSources= and RuntimeTrees= to do the right thing even when running mkosi vm # as root without the source directories necessarily being owned by root. We achieve this by running # virtiofsd as the owner of the source directory and then mapping that uid to root. if not name: name = f"{config.machine_or_name()}-{systemd_escape(config, directory, path=True)}" else: name = systemd_escape(config, name) name = f"mkosi-virtiofsd-{name}" description = f"virtiofsd for machine {config.machine_or_name()} for {directory}" scope = [] if st: scope = scope_cmd(name=name, description=description, user=st.st_uid, group=st.st_gid) elif not uidmap and (os.getuid() == 0 or unshare_version() >= "2.38"): scope = scope_cmd(name=name, description=description) with spawn( cmdline, pass_fds=(sock.fileno(),), user=st.st_uid if st and not scope else None, group=st.st_gid if st and not scope else None, # If we're booting from virtiofs and unshare is too old, we don't set up a scope so we can use # our own function to become root in the subuid range. # TODO: Drop this as soon as we drop CentOS Stream 9 support and can rely on newer unshare # features. preexec_fn=become_root_in_subuid_range if not scope and not uidmap else None, sandbox=config.sandbox( options=[ "--bind", directory, workdir(directory), *(["--become-root"] if uidmap else []), ], setup=( scope + (become_root_in_subuid_range_cmd() if scope and not uidmap else []) ), ), ) as proc: # fmt: skip yield path proc.terminate() @contextlib.contextmanager def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]: """ This yields a vsock address and a dict that will be filled in with the notifications from the VM. The dict should only be accessed after the context manager has been finalized. """ with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as vsock: vsock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) vsock.listen() vsock.setblocking(False) num_messages = 0 num_bytes = 0 messages = {} async def notify() -> None: nonlocal num_messages nonlocal num_bytes import asyncio loop = asyncio.get_running_loop() while True: s, _ = await loop.sock_accept(vsock) num_messages += 1 with s: data = [] try: while buf := await loop.sock_recv(s, 4096): data.append(buf) except ConnectionResetError: logging.debug("vsock notify listener connection reset by peer") for msg in b"".join(data).decode().split("\n"): if not msg: continue num_bytes += len(msg) k, _, v = msg.partition("=") messages[k] = v with AsyncioThread(notify()): try: yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", messages finally: logging.debug( f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes" ) for k, v in messages.items(): logging.debug(f"- {k}={v}") def make_nocow(config: Config, path: Path) -> None: run( ["chattr", "+C", workdir(path)], check=False, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None, sandbox=config.sandbox(options=["--bind", path, workdir(path)]), ) @contextlib.contextmanager def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: assert config.forward_journal bin = config.find_binary("systemd-journal-remote", "/usr/lib/systemd/systemd-journal-remote") if not bin: die("systemd-journal-remote must be installed to forward logs from the virtual machine") if config.forward_journal.suffix == ".journal": d = config.forward_journal.parent else: d = config.forward_journal if not d.exists(): # Pass exist_ok=True because multiple mkosi processes might be trying to create the parent directory # at the same time. d.mkdir(exist_ok=True, parents=True) # Make sure COW is disabled so systemd-journal-remote doesn't complain on btrfs filesystems. make_nocow(config, d) INVOKING_USER.chown(d) with tempfile.NamedTemporaryFile(mode="w", prefix="mkosi-journal-remote-config-") as f: os.chmod(f.name, 0o644) # Make sure we capture all the logs by bumping the limits. We set MaxFileSize=4G because with the # compact mode enabled the files cannot grow any larger anyway. f.write( textwrap.dedent( f"""\ [Remote] MaxUse=1T KeepFree=1G MaxFileSize=4G MaxFiles={1 if config.forward_journal.suffix == ".journal" else 100} """ ) ) f.flush() user = d.stat().st_uid if os.getuid() == 0 else None group = d.stat().st_gid if os.getuid() == 0 else None scope = scope_cmd( name=f"mkosi-journal-remote-{config.machine_or_name()}", description=f"mkosi systemd-journal-remote for {config.machine_or_name()}", user=user, group=group, ) with spawn( [ bin, "--output", workdir(config.forward_journal), "--split-mode", "none" if config.forward_journal.suffix == ".journal" else "host", ], pass_fds=(sockfd,), sandbox=config.sandbox( options=[ "--bind", config.forward_journal.parent, workdir(config.forward_journal.parent), "--ro-bind", f.name, "/etc/systemd/journal-remote.conf", ], setup=scope, ), user=user if not scope else None, group=group if not scope else None, foreground=False, ) as proc: # fmt: skip yield proc.terminate() @contextlib.contextmanager def start_journal_remote_vsock(config: Config) -> Iterator[str]: with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock: sock.bind((socket.VMADDR_CID_ANY, socket.VMADDR_PORT_ANY)) sock.listen() with start_journal_remote(config, sock.fileno()): yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{sock.getsockname()[1]}" @contextlib.contextmanager def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: if config.output_format in (OutputFormat.cpio, OutputFormat.uki): yield src return # If we're booting a directory image that was not built as root, we have to make an ephemeral copy. If # we're running as root, we have to make an ephemeral copy so that all the files in the directory tree # are also owned by root. If we're not running as root, we'll be making use of a subuid/subgid user # namespace and we don't want any leftover files from the subuid/subgid user namespace to remain after we # shut down the container or virtual machine. if not config.ephemeral and (config.output_format != OutputFormat.directory or src.stat().st_uid == 0): with flock_or_die(src): yield src return src = src.resolve() # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this # instead. Limit the size to 16 characters as the output name might be used in a unix socket path by # vmspawn and needs to fit in 108 characters. tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: def copy() -> None: if config.output_format in (OutputFormat.disk, OutputFormat.esp): attr = run( ["lsattr", "-l", workdir(src)], sandbox=config.sandbox(options=["--ro-bind", src, workdir(src)]), stdout=subprocess.PIPE, ).stdout if "No_COW" in attr: tmp.touch() make_nocow(config, tmp) copy_tree( src, tmp, preserve=( config.output_format == OutputFormat.directory and (os.getuid() != 0 or src.stat().st_uid == 0) ), use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, ) with flock(src): fork_and_wait(copy) yield tmp finally: def rm() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() rmtree(tmp, sandbox=config.sandbox) fork_and_wait(rm) def join_initrds(config: Config, initrds: Sequence[Path], output: Path) -> Path: assert initrds if len(initrds) == 1: copy_tree(initrds[0], output, sandbox=config.sandbox) return output seq = io.BytesIO() for p in initrds: initrd = p.read_bytes() n = len(initrd) padding = b"\0" * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) output.write_bytes(seq.getbuffer()) return output def qemu_version(config: Config, binary: Path) -> GenericVersion: return GenericVersion( run( [binary, "--version"], stdout=subprocess.PIPE, sandbox=config.sandbox(), ).stdout.split()[3] ) def want_scratch(config: Config) -> bool: return config.runtime_scratch == ConfigFeature.enabled or ( config.runtime_scratch == ConfigFeature.auto and config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None ) @contextlib.contextmanager def generate_scratch_fs(config: Config) -> Iterator[Path]: with tempfile.NamedTemporaryFile(dir="/var/tmp", prefix="mkosi-scratch-") as scratch: scratch.truncate(1024**4) fs = config.distribution.filesystem() extra = config.environment.get(f"SYSTEMD_REPART_MKFS_OPTIONS_{fs.upper()}", "") run( [f"mkfs.{fs}", "-L", "scratch", *extra.split(), workdir(Path(scratch.name))], stdout=subprocess.DEVNULL, sandbox=config.sandbox(options=["--bind", scratch.name, workdir(Path(scratch.name))]), ) yield Path(scratch.name) def finalize_firmware(config: Config, kernel: Optional[Path]) -> Firmware: if config.firmware != Firmware.auto: return config.firmware if kernel: if KernelType.identify(config, kernel) != KernelType.unknown: return Firmware.uefi_secure_boot return Firmware.linux if ( config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): return Firmware.linux # At the moment there are no qemu firmware descriptions for non-x86 architectures that advertise # secure-boot support so let's default to no secure boot for non-x86 architectures. if config.architecture.is_x86_variant(): return Firmware.uefi_secure_boot return Firmware.uefi def finalize_firmware_variables( config: Config, ovmf: OvmfConfig, stack: contextlib.ExitStack, ) -> tuple[Path, str]: ovmf_vars = Path(stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-ovmf-vars-")).name) if config.firmware_variables in (None, Path("custom"), Path("microsoft")): ovmf_vars_format = ovmf.vars_format else: ovmf_vars_format = "raw" if config.firmware_variables == Path("custom"): assert config.secure_boot_certificate run( [ "virt-fw-vars", "--input", workdir(ovmf.vars), "--output", workdir(ovmf_vars), "--enroll-cert", workdir(config.secure_boot_certificate), "--add-db", "OvmfEnrollDefaultKeys", workdir(config.secure_boot_certificate), "--no-microsoft", "--secure-boot", "--loglevel", "WARNING", ], sandbox=config.sandbox( options=[ "--bind", ovmf_vars, workdir(ovmf_vars), "--ro-bind", ovmf.vars, workdir(ovmf.vars), "--ro-bind", config.secure_boot_certificate, workdir(config.secure_boot_certificate), ], ), ) # fmt: skip elif config.firmware_variables == Path("microsoft-mok"): assert config.secure_boot_certificate run( [ "virt-fw-vars", "--input", workdir(ovmf.vars), "--output", workdir(ovmf_vars), "--add-mok", "605dab50-e046-4300-abb6-3dd810dd8b23", workdir(config.secure_boot_certificate), "--loglevel", "WARNING", ], sandbox=config.sandbox( options=[ "--bind", ovmf_vars, workdir(ovmf_vars), "--ro-bind", ovmf.vars, workdir(ovmf.vars), "--ro-bind", config.secure_boot_certificate, workdir(config.secure_boot_certificate), ], ), ) # fmt: skip else: vars = ( config.tools() / ovmf.vars.relative_to("/") if config.firmware_variables == Path("microsoft") or not config.firmware_variables else config.firmware_variables ) shutil.copy(vars, ovmf_vars) return ovmf_vars, ovmf_vars_format def apply_runtime_size(config: Config, image: Path) -> None: if config.output_format != OutputFormat.disk or not config.runtime_size: return run( [ "systemd-repart", "--definitions=/", "--no-pager", # To use qemu's cache.direct option, the drive size has to be a multiple of the page size. f"--size={round_up(config.runtime_size, resource.getpagesize())}", "--pretty=no", "--offline=yes", workdir(image), ], sandbox=config.sandbox(options=["--bind", image, workdir(image)]), ) # fmt: skip @contextlib.contextmanager def finalize_drive(config: Config, drive: Drive) -> Iterator[Path]: with tempfile.NamedTemporaryFile( dir=drive.directory or "/var/tmp", prefix=f"mkosi-drive-{drive.id}", ) as file: make_nocow(config, Path(file.name)) file.truncate(round_up(drive.size, resource.getpagesize())) yield Path(file.name) @contextlib.contextmanager def finalize_state(config: Config, cid: int) -> Iterator[None]: (INVOKING_USER.runtime_dir() / "machine").mkdir(parents=True, exist_ok=True) with flock(INVOKING_USER.runtime_dir() / "machine"): if (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): die( f"Another virtual machine named {config.machine_or_name()} is already running", hint="Use --machine to specify a different virtual machine name", ) p.write_text( json.dumps( { "Machine": config.machine_or_name(), "ProxyCommand": f"socat - VSOCK-CONNECT:{cid}:%p", "SshKey": os.fspath(config.ssh_key) if config.ssh_key else None, }, sort_keys=True, indent=4, ) ) try: yield finally: with flock(INVOKING_USER.runtime_dir() / "machine"): p.unlink(missing_ok=True) def finalize_kernel_command_line_extra(config: Config) -> list[str]: columns, lines = shutil.get_terminal_size() term = finalize_term() cmdline = [ "rw", # Make sure we set up networking in the VM/container. "systemd.wants=network.target", # Make sure we don't load vmw_vmci which messes with virtio vsock. "module_blacklist=vmw_vmci", f"systemd.tty.term.hvc0={term}", f"systemd.tty.columns.hvc0={columns}", f"systemd.tty.rows.hvc0={lines}", ] if not any(s.startswith("ip=") for s in config.kernel_command_line_extra): cmdline += ["ip=enc0:any", "ip=enp0s1:any", "ip=enp0s2:any", "ip=host0:any", "ip=none"] if not any(s.startswith("loglevel=") for s in config.kernel_command_line_extra): cmdline += ["loglevel=4"] if not any(s.startswith("SYSTEMD_SULOGIN_FORCE=") for s in config.kernel_command_line_extra): cmdline += ["SYSTEMD_SULOGIN_FORCE=1"] if ( not any(s.startswith("systemd.hostname=") for s in config.kernel_command_line_extra) and config.machine ): cmdline += [f"systemd.hostname={config.machine}"] if config.cdrom: # CD-ROMs are read-only so tell systemd to boot in volatile mode. cmdline += ["systemd.volatile=yes"] if config.console != ConsoleMode.gui: cmdline += [ f"systemd.tty.term.console={term}", f"systemd.tty.columns.console={columns}", f"systemd.tty.rows.console={lines}", "console=hvc0", f"TERM={term}", ] elif config.architecture.is_arm_variant(): cmdline += ["console=tty0"] for s in config.kernel_command_line_extra: key, sep, value = s.partition("=") if " " in value: value = f'"{value}"' cmdline += [key if not sep else f"{key}={value}"] return cmdline def finalize_credentials(config: Config) -> dict[str, str]: creds = { "firstboot.locale": "C.UTF-8", **config.credentials, } if "firstboot.timezone" not in creds: if config.find_binary("timedatectl"): tz = run( ["timedatectl", "show", "-p", "Timezone", "--value"], stdout=subprocess.PIPE, check=False, # timedatectl needs to be able to talk via dbus to timedated. sandbox=config.sandbox(options=["--ro-bind", "/run", "/run"]), ).stdout.strip() else: tz = "UTC" creds["firstboot.timezone"] = tz if "ssh.authorized_keys.root" not in creds: if config.ssh_certificate: pubkey = run( ["openssl", "x509", "-in", workdir(config.ssh_certificate), "-pubkey", "-noout"], stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null"), sandbox=config.sandbox( options=["--ro-bind", config.ssh_certificate, workdir(config.ssh_certificate)], ), ).stdout.strip() sshpubkey = run( ["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE, # ssh-keygen insists on being able to resolve the current user which doesn't always work # (think sssd or similar) so let's switch to root which is always resolvable. sandbox=config.sandbox(options=["--become-root", "--ro-bind", "/etc/passwd", "/etc/passwd"]), ).stdout.strip() creds["ssh.authorized_keys.root"] = sshpubkey elif config.ssh: die( "Ssh= is enabled but no SSH certificate was found", hint="Run 'mkosi genkey' to automatically create one", ) return creds def scope_cmd( name: str, description: str, user: Optional[int] = None, group: Optional[int] = None, properties: Sequence[str] = (), environment: bool = True, ) -> list[str]: if not find_binary("systemd-run"): return [] if os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ: env = { "DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"], "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"], } elif os.getuid() == 0: if "DBUS_SYSTEM_ADDRESS" in os.environ: env = {"DBUS_SYSTEM_ADDRESS": os.environ["DBUS_SYSTEM_ADDRESS"]} elif Path("/run/dbus/system_bus_socket").exists(): env = {"DBUS_SYSTEM_ADDRESS": "/run/dbus/system_bus_socket"} else: return [] else: return [] return [ "env", *(f"{k}={v}" for k, v in env.items() if environment), "systemd-run", "--system" if os.getuid() == 0 else "--user", *(["--quiet"] if not ARG_DEBUG.get() else []), "--unit", name, "--description", description, "--scope", "--collect", *(["--expand-environment=no"] if systemd_tool_version("systemd-run") >= 254 else []), *(["--uid", str(user)] if user is not None else []), *(["--gid", str(group)] if group is not None else []), *([f"--property={p}" for p in properties]), ] # fmt: skip def machine1_is_available(config: Config) -> bool: if "DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").is_socket(): return False services = json.loads( run( ["busctl", "list", "--json=pretty"], foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(relaxed=True), stdout=subprocess.PIPE, stderr=sys.stderr, ).stdout.strip() ) return any(service["name"] == "org.freedesktop.machine1" for service in services) def finalize_register(config: Config) -> bool: if config.register == ConfigFeature.disabled: return False if os.getuid() == 0 and ( Path("/run/systemd/machine/io.systemd.Machine").is_socket() or machine1_is_available(config) ): return True if config.register == ConfigFeature.enabled: if os.getuid() != 0: die("Container registration requires root privileges") else: die( "Container registration was requested but systemd-machined is not available", hint="Is the systemd-container package installed?", ) return False def register_machine(config: Config, pid: int, fname: Path, cid: Optional[int]) -> None: if not finalize_register(config): return if (p := Path("/run/systemd/machine/io.systemd.Machine")).is_socket(): run( [ "varlinkctl", "call", p, "io.systemd.Machine.Register", json.dumps( { "name": config.machine_or_name().replace("_", "-"), "service": "mkosi", "class": "vm", "leader": pid, **({"rootDirectory": os.fspath(fname)} if fname.is_dir() else {}), **({"vSockCid": cid} if cid is not None else {}), **({"sshAddress": f"vsock/{cid}"} if cid is not None else {}), **({"sshPrivateKeyPath": f"{config.ssh_key}"} if config.ssh_key else {}), } ), ], foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(relaxed=True), # Make sure varlinkctl doesn't write to stdout which messes up the terminal. stdout=subprocess.DEVNULL, stderr=sys.stderr, ) else: run( [ "busctl", "call", "--quiet", "org.freedesktop.machine1", "/org/freedesktop/machine1", "org.freedesktop.machine1.Manager", "RegisterMachine", "sayssus", config.machine_or_name().replace("_", "-"), "0", "mkosi", "vm", str(pid), fname if fname.is_dir() else "", ], # fmt: skip foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(relaxed=True), # systemd-machined might not be installed so let's ignore any failures unless running in debug # mode. check=ARG_DEBUG.get(), stderr=None if ARG_DEBUG.get() else subprocess.DEVNULL, ) def run_qemu(args: Args, config: Config) -> None: if config.output_format not in ( OutputFormat.disk, OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp, OutputFormat.directory, ): die(f"{config.output_format} images cannot be booted in qemu") if ( config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and config.firmware not in (Firmware.auto, Firmware.linux) and not config.firmware.is_uefi() ): die(f"{config.output_format} images cannot be booted with the '{config.firmware}' firmware") if config.runtime_trees and config.firmware == Firmware.bios: die("RuntimeTrees= cannot be used when booting in BIOS firmware") if config.kvm == ConfigFeature.enabled and not config.architecture.is_native(): die( f"KVM acceleration requested but {config.architecture} does not match " "the native host architecture" ) if ( config.firmware_variables in (Path("custom"), Path("microsoft-mok")) and not config.secure_boot_certificate ): die("SecureBootCertificate= must be configured to use FirmwareVariables=custom|microsoft-mok") # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related # device nodes anymore as access to these might be gated behind the kvm group and we won't be part of the # kvm group anymore after unsharing the user namespace. To get around this, open all those device nodes # early can pass them as file descriptors to qemu later. Note that we can't pass the kvm file descriptor # to qemu until version 9.0. qemu_device_fds = { d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } qemu = find_qemu_binary(config) have_kvm = (qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or ( qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds ) if config.kvm == ConfigFeature.enabled and not have_kvm: die("KVM acceleration requested but cannot access /dev/kvm") if config.vsock == ConfigFeature.enabled and QemuDeviceNode.vhost_vsock not in qemu_device_fds: die("VSock requested but cannot access /dev/vhost-vsock") if config.console not in (ConsoleMode.native, ConsoleMode.gui) and not config.find_binary( "systemd-pty-forward" ): die(f"Console mode {config.console} requested but systemd-pty-forward not found") if config.linux: kernel = config.linux elif "-kernel" in args.cmdline: kernel = Path(args.cmdline[args.cmdline.index("-kernel") + 1]) else: kernel = None if config.output_format in (OutputFormat.uki, OutputFormat.esp) and kernel: logging.warning( f"Booting UKI output, kernel {kernel} configured with Linux= or " "passed with -kernel will not be used" ) kernel = None if kernel and not kernel.exists(): die(f"Kernel not found at {kernel}") firmware = finalize_firmware(config, kernel) if not kernel and ( firmware == Firmware.linux or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ): if firmware.is_uefi(): name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki kernel = config.output_dir_or_cwd() / name else: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}, please install a kernel in the image " "or provide a -kernel argument to mkosi vm" ) ovmf = find_ovmf_firmware(config, firmware) # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if ( config.runtime_trees or config.runtime_build_sources or config.runtime_home or config.output_format == OutputFormat.directory ): shm = ["-object", f"memory-backend-memfd,id=mem,size={config.ram // 1024**2}M,share=on"] machine = f"type={config.architecture.default_qemu_machine()}" if firmware.is_uefi() and config.architecture.supports_smm(): machine += f",smm={'on' if firmware == Firmware.uefi_secure_boot else 'off'}" if shm: machine += ",memory-backend=mem" cmdline: list[PathString] = [] if config.console in (ConsoleMode.interactive, ConsoleMode.read_only): cmdline += [ "systemd-pty-forward", "--background=48;2;12;51;19", # green "--title", f"Virtual Machine {config.machine_or_name()}", ] # fmt: skip if config.console == ConsoleMode.read_only: cmdline += ["--read-only"] cmdline += [ qemu, "-machine", machine, "-smp", str(config.cpus or os.cpu_count()), "-m", f"{config.ram // 1024**2}M", "-object", "rng-random,filename=/dev/urandom,id=rng0", "-device", "virtio-rng-pci,rng=rng0,id=rng-device0", "-device", "virtio-balloon,free-page-reporting=on", "-no-user-config", *shm, ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] elif config.runtime_network == Network.interface: if os.getuid() != 0: die("RuntimeNetwork=interface requires root privileges") cmdline += ["-nic", "tap,script=no,model=virtio-net-pci"] elif config.runtime_network == Network.none: cmdline += ["-nic", "none"] if config.kvm != ConfigFeature.disabled and have_kvm and config.architecture.can_kvm(): accel = "kvm" if qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION: index = list(qemu_device_fds.keys()).index(QemuDeviceNode.kvm) cmdline += ["--add-fd", f"fd={SD_LISTEN_FDS_START + index},set=1,opaque=/dev/kvm"] accel += ",device=/dev/fdset/1" cmdline += ["-cpu", "host"] else: accel = "tcg" cmdline += ["-cpu", "max"] cmdline += ["-accel", accel] cid: Optional[int] = None if QemuDeviceNode.vhost_vsock in qemu_device_fds: if config.vsock_cid == VsockCID.auto: cid = find_unused_vsock_cid(config, qemu_device_fds[QemuDeviceNode.vhost_vsock]) elif config.vsock_cid == VsockCID.hash: cid = hash_to_vsock_cid(hash_output(config)) else: cid = config.vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): die( f"VSock connection ID {cid} is already in use by another virtual machine", hint="Use VsockConnectionId=auto to have mkosi automatically " "find a free vsock connection ID", ) index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock) cmdline += ["-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}"] if config.console == ConsoleMode.gui: if config.architecture.is_arm_variant(): cmdline += ["-device", "virtio-gpu-pci"] else: cmdline += ["-device", "virtio-vga"] cmdline += [ "-nodefaults", "-display", "sdl,gl=on", "-audio", "driver=pipewire,model=virtio", ] # fmt: skip else: # -nodefaults removes the default CDROM device which avoids an error message during boot # -serial mon:stdio adds back the serial device removed by -nodefaults. cmdline += [ "-nographic", "-nodefaults", "-chardev", "stdio,mux=on,id=console,signal=off", "-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci", "-device", "virtconsole,chardev=console", "-mon", "console", ] # fmt: skip # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware.is_uefi(): assert ovmf cmdline += ["-drive", f"if=pflash,format={ovmf.format},readonly=on,file={ovmf.firmware}"] notifications: dict[str, str] = {} with contextlib.ExitStack() as stack: if firmware.is_uefi(): assert ovmf ovmf_vars, ovmf_vars_format = finalize_firmware_variables(config, ovmf, stack) cmdline += ["-drive", f"file={ovmf_vars},if=pflash,format={ovmf_vars_format}"] if firmware == Firmware.uefi_secure_boot: cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", ] # fmt: skip if config.cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size # 2048. src = (config.output_dir_or_cwd() / config.output_with_compression).resolve() fname = src.parent / f"{src.name}-{uuid.uuid4().hex}" run( [ "systemd-repart", "--definitions=/", "--no-pager", "--pretty=no", "--offline=yes", "--empty=create", "--size=auto", "--sector-size=2048", "--copy-from", workdir(src), workdir(fname), ], # fmt: skip sandbox=config.sandbox( options=[ "--bind", fname.parent, workdir(fname.parent), "--ro-bind", src, workdir(src), ], ), ) # fmt: skip stack.callback(lambda: fname.unlink()) else: fname = stack.enter_context( copy_ephemeral(config, config.output_dir_or_cwd() / config.output_with_compression) ) apply_runtime_size(config, fname) kcl = [] if kernel: cmdline += ["-kernel", kernel] if any(s.startswith("root=") for s in finalize_kernel_command_line_extra(config)): pass elif config.output_format == OutputFormat.disk: # We can't rely on gpt-auto-generator when direct kernel booting so synthesize a root= # kernel argument instead. root = finalize_root(find_partitions(fname, sandbox=config.sandbox)) if not root: die("Cannot perform a direct kernel boot without a root or usr partition") kcl += [root] elif config.output_format == OutputFormat.directory: sock = stack.enter_context( start_virtiofsd( config, fname, name=config.machine_or_name(), uidmap=False, selinux=bool(want_selinux_relabel(config, fname, fatal=False)), ), ) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", ] # fmt: skip kcl += ["root=root", "rootfstype=virtiofs"] credentials = finalize_credentials(config) def add_virtiofs_mount( sock: Path, dst: PathString, cmdline: list[PathString], credentials: dict[str, str], *, tag: str ) -> None: cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", ] # fmt: skip if "fstab.extra" not in credentials: credentials["fstab.extra"] = "" if credentials["fstab.extra"] and not credentials["fstab.extra"][-1] == "\n": credentials["fstab.extra"] += "\n" credentials["fstab.extra"] += f"{tag} {dst} virtiofs x-initrd.mount\n" if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") sock = stack.enter_context(start_virtiofsd(config, src)) add_virtiofs_mount(sock, dst, cmdline, credentials, tag=src.name) if config.build_dir: sock = stack.enter_context(start_virtiofsd(config, config.build_dir)) add_virtiofs_mount(sock, "/work/build", cmdline, credentials, tag="build") for tree in config.runtime_trees: sock = stack.enter_context(start_virtiofsd(config, tree.source)) add_virtiofs_mount( sock, Path("/root/src") / (tree.target or ""), cmdline, credentials, tag=tree.target.name if tree.target else tree.source.name, ) if config.runtime_home and (p := current_home_dir()): sock = stack.enter_context(start_virtiofsd(config, p)) add_virtiofs_mount( sock, Path("/root"), cmdline, credentials, tag="user-home", ) if want_scratch(config) or config.output_format in (OutputFormat.disk, OutputFormat.esp): cmdline += ["-device", "virtio-scsi-pci,id=mkosi"] if want_scratch(config): scratch = stack.enter_context(generate_scratch_fs(config)) cache = "cache.writeback=on,cache.direct=on,cache.no-flush=yes,aio=io_uring" cmdline += [ "-drive", f"if=none,id=scratch,file={scratch},format=raw,discard=on,{cache}", "-device", "virtio-blk-pci,drive=scratch", ] # fmt: skip kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( kernel and KernelType.identify(config, kernel) != KernelType.uki and "-initrd" not in args.cmdline ): if (config.output_dir_or_cwd() / config.output_split_initrd).exists(): cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd] elif config.initrds: initrd = config.output_dir_or_cwd() / f"initrd-{uuid.uuid4().hex}" join_initrds(config, config.initrds, initrd) stack.callback(lambda: initrd.unlink()) cmdline += ["-initrd", fname] if config.output_format in (OutputFormat.disk, OutputFormat.esp): direct = fname.stat().st_size % resource.getpagesize() == 0 ephemeral = config.ephemeral cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" # noqa: E501 device_type = "virtio-blk-pci" if config.cdrom: device_type = "scsi-cd" elif config.removable: device_type = "scsi-hd,removable=on" cmdline += [ "-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", "-device", f"{device_type},drive=mkosi,bootindex=1", ] # fmt: skip if config.tpm == ConfigFeature.enabled or ( config.tpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None ): sock = stack.enter_context(start_swtpm(config)) cmdline += [ "-chardev", f"socket,id=chrtpm,path={sock}", "-tpmdev", "emulator,id=tpm0,chardev=chrtpm", ] # fmt: skip if config.architecture.is_x86_variant(): cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] elif config.architecture.is_arm_variant(): cmdline += ["-device", "tpm-tis-device,tpmdev=tpm0"] if QemuDeviceNode.vhost_vsock in qemu_device_fds: addr, notifications = stack.enter_context(vsock_notify_handler()) credentials["vmm.notify_socket"] = addr if config.forward_journal: credentials["journal.forward_to_socket"] = stack.enter_context( start_journal_remote_vsock(config) ) for k, v in credentials.items(): payload = base64.b64encode(v.encode()).decode() if config.architecture.supports_smbios(firmware): cmdline += ["-smbios", f"type=11,value=io.systemd.credential.binary:{k}={payload}"] # qemu's fw_cfg device only supports keys up to 55 characters long. elif config.architecture.supports_fw_cfg() and len(k) <= 55 - len("opt/io.systemd.credentials/"): f = stack.enter_context(tempfile.NamedTemporaryFile(prefix="mkosi-fw-cfg-", mode="w")) f.write(v) f.flush() cmdline += ["-fw_cfg", f"name=opt/io.systemd.credentials/{k},file={f.name}"] elif kernel: kcl += [f"systemd.set_credential_binary={k}:{payload}"] kcl += finalize_kernel_command_line_extra(config) if kernel and ( KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ): cmdline += ["-append", " ".join(config.kernel_command_line + kcl)] elif config.architecture.supports_smbios(firmware): cmdline += [ "-smbios", f"type=11,value=io.systemd.stub.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", "-smbios", f"type=11,value=io.systemd.boot.kernel-cmdline-extra={' '.join(kcl).replace(',', ',,')}", ] for _, drives in groupby(config.drives, key=lambda d: d.file_id): file = stack.enter_context(finalize_drive(config, drives[0])) for drive in drives: arg = f"if=none,id={drive.id},file={file},format=raw,file.locking=off,cache.writeback=on,cache.direct=on,cache.no-flush=yes,aio=io_uring" # noqa: E501 if drive.options: arg += f",{drive.options}" cmdline += ["-drive", arg] cmdline += config.qemu_args cmdline += args.cmdline if cid is not None: stack.enter_context(finalize_state(config, cid)) # Reopen stdin, stdout and stderr to give qemu a private copy of them. This is a mitigation for the # case when running mkosi under meson and one or two of the three are redirected and their pipe might # block, but qemu opens all of them non-blocking because at least one of them is opened this way. stdin = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdin.fileno()}", os.O_RDONLY), OSError, sys.stdin.fileno(), ) stdout = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdout.fileno()}", os.O_WRONLY), OSError, sys.stdout.fileno(), ) stderr = try_or( lambda: os.open(f"/proc/self/fd/{sys.stderr.fileno()}", os.O_WRONLY), OSError, sys.stderr.fileno(), ) name = f"mkosi-{config.machine_or_name().replace('_', '-')}" with spawn( cmdline, stdin=stdin, stdout=stdout, stderr=stderr, pass_fds=qemu_device_fds.values(), env=os.environ | config.environment, foreground=True, sandbox=config.sandbox( network=True, devices=True, relaxed=True, options=["--same-dir"], setup=scope_cmd( name=name, description=f"mkosi Virtual Machine {name}", properties=config.unit_properties, environment=False, ), ), ) as proc: # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never # exit. for fd in qemu_device_fds.values(): os.close(fd) register_machine(config, proc.pid, fname, cid) if status := int(notifications.get("EXIT_STATUS", 0)): raise subprocess.CalledProcessError(status, cmdline) def run_ssh(args: Args, config: Config) -> None: with flock(INVOKING_USER.runtime_dir() / "machine"): if not (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): die( f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", hint="Is the machine running and was it built with Ssh=yes and Vsock=yes?", ) state = json.loads(p.read_text()) if not state["SshKey"]: die( "An SSH key must be configured when booting the image to use 'mkosi ssh'", hint="Use 'mkosi genkey' to generate a new SSH key and certificate", ) cmd: list[PathString] = [ "ssh", "-i", state["SshKey"], "-F", "none", # Silence known hosts file errors/warnings. "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", "-o", f"ProxyCommand={state['ProxyCommand']}", "root@mkosi", ] # fmt: skip cmd += args.cmdline run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( network=True, devices=True, relaxed=True, # ssh insists on being able to resolve the current user which doesn't always work (think sssd or # similar) so let's switch to root which is always resolvable. options=["--same-dir", "--become-root"], ), ) mkosi-25.3/mkosi/resources/000077500000000000000000000000001474711424400157315ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/__init__.py000066400000000000000000000000001474711424400200300ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/completion.bash000066400000000000000000000034101474711424400207370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=bash _mkosi_compgen_files() { compgen -f -- "$1" } _mkosi_compgen_dirs() { compgen -d -- "$1" } _mkosi_completion() { local -a _mkosi_options _mkosi_verbs local -A _mkosi_nargs _mkosi_choices _mkosi_compgen ##VARIABLEDEFINITIONS## # completing_program="$1" local completing_word="$2" local completing_word_preceding="$3" if [[ "$completing_word" =~ ^- ]] # completing an option then readarray -t COMPREPLY < <(compgen -W "${_mkosi_options[*]}" -- "${completing_word}") elif [[ "$completing_word_preceding" =~ ^- ]] # the previous word was an option then current_option="${completing_word_preceding}" current_option_nargs="${_mkosi_nargs[${current_option}]}" current_option_choices="${_mkosi_choices[${current_option}]}" current_option_compgen="${_mkosi_compgen[${current_option}]}" if [[ -n "${current_option_compgen}" ]] then readarray -t COMPREPLY < <("${current_option_compgen}" "${completing_word}") fi readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(compgen -W "${current_option_choices}" -- "${completing_word}") if [[ "${current_option_nargs}" == "?" ]] then readarray -t COMPREPLY -O "${#COMPREPLY[@]}" \ < <(compgen -W "${_mkosi_verbs[*]}" -- "${completing_word}") fi else # the preceding word wasn't an option, so we are doing position # arguments now and all of them are verbs readarray -t COMPREPLY < <(compgen -W "${_mkosi_verbs[*]}" -- "${completing_word}") fi } complete -o filenames -F _mkosi_completion mkosi complete -o filenames -F _mkosi_completion python -m mkosi mkosi-25.3/mkosi/resources/completion.zsh000066400000000000000000000010561474711424400206320ustar00rootroot00000000000000#compdef mkosi # SPDX-License-Identifier: LGPL-2.1-or-later # shellcheck shell=zsh _mkosi_verb(){ if (( CURRENT == 1 )); then _describe -t commands 'mkosi verb' _mkosi_verbs else local curcontext="$curcontext" cmd="${${_mkosi_verbs[(r)$words[1]:*]%%:*}}" if (( $#cmd )); then if (( $+functions[_mkosi_$cmd] )); then _mkosi_$cmd else _message "no more options" fi else _message "unknown mkosi verb: $words[1]" fi fi } mkosi-25.3/mkosi/resources/man/000077500000000000000000000000001474711424400165045ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/man/mkosi-addon.1.md000066400000000000000000000023561474711424400214000ustar00rootroot00000000000000% mkosi-addon(1) % % # NAME mkosi-addon — Build addons for unified kernel images for the current system using mkosi # SYNOPSIS `mkosi-addon [options…]` # DESCRIPTION `mkosi-addon` is a wrapper on top of `mkosi` to simplify the generation of PE addons containing customizations for unified kernel images specific to the running or local system. Will include entries in `/etc/crypttab` marked with `x-initrd.attach`, and `/etc/kernel/cmdline`. Kernel modules and firmwares for the running hardware can be included if a local configuration with the option `KernelModulesIncludeHost=` is provided. # OPTIONS `--kernel-version=` : Kernel version where to look for the kernel modules to include. Defaults to the kernel version of the running system (`uname -r`). `--output=`, `-o` : Name to use for the generated output addon. Defaults to `mkosi-local.addon.efi`. `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. Defaults to the current working directory. `--debug=` : Enable additional debugging output. `--debug-shell=` : Spawn debug shell in sandbox if a sandboxed command fails. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # SEE ALSO `mkosi(1)` mkosi-25.3/mkosi/resources/man/mkosi-initrd.1.md000066400000000000000000000023061474711424400215770ustar00rootroot00000000000000% mkosi-initrd(1) % % # NAME mkosi-initrd — Build initrds or unified kernel images for the current system using mkosi # SYNOPSIS `mkosi-initrd [options…]` # DESCRIPTION `mkosi-initrd` is wrapper on top of `mkosi` to simplify the generation of initrds and Unified Kernel Images for the current running system. # OPTIONS `--kernel-version=` : Kernel version where to look for the kernel modules to include. Defaults to the kernel version of the running system (`uname -r`). `--format=`, `-t` : Output format. One of `cpio` (CPIO archive), `uki` (a unified kernel image with the image in the `.initrd` PE section) or `directory` (for generating an image directly in a local directory). Defaults to `cpio`. `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `initrd`. `--output-dir=`, `-O` : Path to a directory where to place all generated artifacts. Defaults to the current working directory. `--debug=` : Enable additional debugging output. `--debug-shell=` : Spawn debug shell in sandbox if a sandboxed command fails. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # SEE ALSO `mkosi(1)` mkosi-25.3/mkosi/resources/man/mkosi-sandbox.1.md000066400000000000000000000120571474711424400217500ustar00rootroot00000000000000% mkosi-sandbox(1) % % # NAME mkosi-sandbox — Run commands in a custom sandbox # SYNOPSIS `mkosi-sandbox [options…] command [arguments]` # DESCRIPTION `mkosi-sandbox` runs the given command in a custom sandbox. The sandbox is configured by specifying command line options that configure individual parts of the sandbox. If no command is specified, `mkosi-sandbox` will start `bash` in the sandbox. Note that this sandbox is not designed to be a security boundary. Its intended purpose is to allow running commands in an isolated environment so they are not affected by the host system. # OPTIONS `--tmpfs DST` : Mounts a new tmpfs at `DST` in the sandbox. `--dev DST` : Sets up a private `/dev` at `DST` in the sandbox. This private `/dev` will only contain the basic device nodes required for a functioning sandbox (e.g. `/dev/null`) and no actual devices. `--proc DST` : Mounts `/proc` from the host at `DST` in the sandbox. `--dir DST` : Creates a directory and all missing parent directories at `DST` in the sandbox. All directories are created with mode 755 unless the path ends with `/tmp` or `/var/tmp` in which case it is created with mode 1777. `--bind SRC DST` : The source path `SRC` is recursively bind mounted to `DST` in the sandbox. The mountpoint is created in the sandbox if it does not yet exist. Any missing parent directories in the sandbox are created as well. `--bind-try SRC DST` : Like `--bind`, but doesn't fail if the source path doesn't exist. `--ro-bind SRC DST` : Like `--bind`, but does a recursive readonly bind mount. `--ro-bind-try SRC DST` : Like `--bind-try`, but does a recursive readonly bind mount. `--symlink SRC DST` : Creates a symlink at `DST` in the sandbox pointing to `SRC`. If `DST` already exists and is a file or symlink, a temporary symlink is created and mounted on top of `DST`. `--write DATA DST` : Writes the string from `DATA` to `DST` in the sandbox. `--overlay-lowerdir DIR` : Adds `DIR` from the host as a new lower directory for the next overlayfs mount. `--overlay-upperdir DIR` : Sets the upper directory for the next overlayfs mount to `DIR` from the host. If set to `tmpfs`, the upperdir and workdir will be subdirectories of a fresh tmpfs mount. `--overlay-workdir DIR` : Sets the working directory for the next overlayfs mount to `DIR` from the host. `--overlay DST` : Mounts a new overlay filesystem at `DST` in the sandbox. The lower directories, upper directory and working directory are specified using the `--overlay-lowerdir`, `--overlay-upperdir` and `--overlay-workdir` options respectively. After each `--overlay` option is parsed, the other overlay options are reset. `--unsetenv NAME` : Unsets the `NAME` environment variable in the sandbox. `--setenv NAME VALUE` : Sets the `NAME` environment variable to `VALUE` in the sandbox `--chdir DIR` : Changes the working directory to `DIR` in the sandbox. `--same-dir` : Changes to the working directory in the sandbox to the current working directory that `mkosi-sandbox` is invoked in on the host. `--become-root` : Maps the current user to the root user in the sandbox. If this option is not specified, the current user is mapped to itself in the sandbox. Regardless of whether this option is specified or not, the current user will have a full set of ambient capabilities in the sandbox. This includes `CAP_SYS_ADMIN` which means that the invoked process in the sandbox will be able to do bind mounts and other operations. If `mkosi-sandbox` is invoked as the root user, this option won't do anything. `--suppress-chown` : Specifying this option causes all calls to `chown()` or similar system calls to become a noop in the sandbox. This is primarily useful when invoking package managers in the sandbox which might try to `chown()` files to different users or groups which would fail unless `mkosi-sandbox` is invoked by a privileged user. `--unshare-net` : Specifying this option makes `mkosi-sandbox` unshare a network namespace if possible. `--unshare-ipc` : Specifying this option makes `mkosi-sandbox` unshare an IPC namespace if possible. `--exec-fd FD` : The specified `FD` will be closed when `mkosi-sandbox` calls `execvp()`. This is useful to wait until all setup logic has completed before continuing execution in the parent process invoking `mkosi-sandbox`. `--version` : Show package version. `--help`, `-h` : Show brief usage information. # EXAMPLES Start `bash` in the current working directory in its own network namespace as the current user. ```sh mkosi-sandbox --bind / / --same-dir --unshare-net ``` Run `id` as the root user in a sandbox with only `/usr` from the host plus the necessary symlinks to be able to run commands. ```sh mkosi-sandbox \ --ro-bind /usr /usr \ --symlink usr/bin /bin \ --symlink usr/bin /bin \ --symlink usr/lib /lib \ --symlink usr/lib64 /lib64 \ --symlink usr/sbin /sbin \ --dev /dev \ --proc /proc \ --tmpfs /tmp \ --become-root \ id ``` # SEE ALSO `mkosi(1)` mkosi-25.3/mkosi/resources/man/mkosi.1.md000066400000000000000000004455631474711424400203300ustar00rootroot00000000000000% mkosi(1) % % # NAME mkosi — Build Bespoke OS Images # SYNOPSIS `mkosi [options…] summary` `mkosi [options…] cat-config` `mkosi [options…] build [command line…]` `mkosi [options…] shell [command line…]` `mkosi [options…] boot [nspawn settings…]` `mkosi [options…] vm [vmm parameters…]` `mkosi [options…] ssh [command line…]` `mkosi [options…] journalctl [command line…]` `mkosi [options…] coredumpctl [command line…]` `mkosi [options…] sysupdate [command line…]` `mkosi [options…] sandbox [command line …]` `mkosi [options…] clean` `mkosi [options…] serve` `mkosi [options…] burn ` `mkosi [options…] bump` `mkosi [options…] genkey` `mkosi [options…] documentation [manual]` `mkosi [options…] completion [shell]` `mkosi [options…] dependencies` `mkosi [options…] help` # DESCRIPTION **mkosi** is a tool for easily building customized OS images. It's a fancy wrapper around **dnf**, **apt**, **pacman** and **zypper** that may generate disk images with a number of bells and whistles. ## Command Line Verbs The following command line verbs are known: `summary` : Show a human-readable summary of all options used for building the images. This will parse the command line and configuration files, but only print what it is configured for and not actually build or run anything. `cat-config` : Output the names and contents of all loaded configuration files. **mkosi** loads a bunch of files from different locations and this command makes it easier to figure out what is configured where. `build` : Build the image-based on the settings passed on the command line and in the configuration files. This command is the default if no verb is specified. Any command line arguments specified after the verb will be passed directly to the build script, if one is defined. `shell` : This builds the image if it is not built yet, and then invokes **systemd-nspawn** to run an interactive shell in the image. This doesn't require booting the system, it's like a better chroot. An optional command line may be specified after the `shell` verb, to be invoked in place of the shell in the container. Use `-f` in order to rebuild the image unconditionally before acquiring the shell, see below. This command must be executed as `root`. `boot` : Similar to `shell`, but instead of spawning a shell, it boots systemd in the image using **systemd-nspawn**. An optional command line may be specified after the `boot` verb, which can contain extra nspawn options as well as arguments which are passed as the *kernel command line* to the init system in the image. `vm` : Similar to `boot`, but uses the configured virtual machine monitor (by default `qemu`) to boot up the image, i.e. instead of container virtualization, virtual machine virtualization is used. How extra command line arguments are interpreted depends on the configured virtual machine monitor. See `VirtualMachineMonitor=` for more information. `ssh` : When the image is built with the `Ssh=yes` option, this command connects to a booted virtual machine via SSH. Make sure to run `mkosi ssh` with the same config as `mkosi build` so that it has the necessary information available to connect to the running virtual machine via SSH. Specifically, the SSH private key from the `SshKey=` setting is used to connect to the virtual machine. Use `mkosi genkey` to automatically generate a key and certificate that will be picked up by **mkosi**. Any arguments passed after the `ssh` verb are passed as arguments to the **ssh** invocation. To connect to a container, use `machinectl login` or `machinectl shell`. The `Machine=` option can be used to give the machine a custom hostname when booting it which can later be used to **ssh** into the image (e.g. `mkosi --machine=mymachine vm` followed by `mkosi --machine=mymachine ssh`). `journalctl` : Uses **journalctl** to inspect the journal inside the image. Any arguments specified after the **journalctl** verb are appended to the **journalctl** invocation. `coredumpctl` : Uses **coredumpctl** to look for coredumps inside the image. Any arguments specified after the **coredumpctl** verb are appended to the **coredumpctl** invocation. `sysupdate` : Invokes **systemd-sysupdate** with the `--transfer-source=` option set to the output directory and the `--definitions=` option set to the directory configured with `SysupdateDirectory=`. Any arguments specified after the `sysupdate` verb are passed directly to **systemd-sysupdate** invocation. `sandbox` : Run arbitrary commands inside of the same sandbox used to execute other verbs such as `boot`, `shell`, `vm` and more. This means `/usr` will be replaced by `/usr` from the tools tree if one is used while everything else will remain in place. If no command is provided, `$SHELL` will be executed or **bash** if `$SHELL` is not set. `clean` : Remove build artifacts generated on a previous build. If combined with `-f`, also removes incremental build cache images. If `-f` is specified twice, also removes any package cache. `serve` : This builds the image if it is not built yet, and then serves the output directory (i.e. usually `mkosi.output/`, see below) via a small embedded HTTP server, listening on port 8081. Combine with `-f` in order to rebuild the image unconditionally before serving it. This command is useful for testing network-based acquisition of OS images, for example via `machinectl pull-raw …` and `machinectl pull-tar …`. `burn ` : This builds the image if it is not built yet, and then writes it to the specified block device. The partition contents are written as-is, but the GPT partition table is corrected to match sector and disk size of the specified medium. `bump` : Bumps the image version from `mkosi.version` and writes the resulting version string to `mkosi.version`. This is useful for implementing a simple versioning scheme: each time this verb is called the version is bumped in preparation for the subsequent build. Note that `--auto-bump`/`-B` may be used to automatically bump the version after each successful build. `genkey` : Generate a pair of SecureBoot keys for usage with the `SecureBootKey=`/`--secure-boot-key=` and `SecureBootCertificate=`/`--secure-boot-certificate=` options. `documentation` : Show **mkosi**'s documentation. If no argument is given, the **mkosi** man page is shown, but the arguments `mkosi`, `mkosi-initrd`, `initrd`, `mkosi-sandbox`, `sandbox`, `mkosi.news` and `news` are supported and respectively show the man pages for **mkosi**, **mkosi-initrd**, **mkosi-sandbox** and **mkosi**'s NEWS file. By default this verb will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well as to install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/man/mkosi.1.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.1.md`. `completion` : Generate shell completion for the shell given as argument and print it to stdout. The arguments `bash`, `fish`, and `zsh` are understood. `dependencies` : Output the list of packages required by **mkosi** to build and boot images. This list can be piped directly to a package manager to install the packages. For example, if the host system uses the **dnf** package manager, the packages could be installed as follows: ```sh mkosi dependencies | xargs -d '\n' dnf install ``` `help` : This verb is equivalent to the `--help` switch documented below: it shows a brief usage explanation. ## Commandline-only Options Those settings cannot be configured in the configuration files. `--force`, `-f` : Replace the output file if it already exists, when building an image. By default when building an image and an output artifact already exists **mkosi** will refuse operation. Specify this option once to delete all build artifacts from a previous run before re-building the image. If incremental builds are enabled, specifying this option twice will ensure the intermediary cache files are removed, too, before the re-build is initiated. If a package cache is used (also see the **Files** section below), specifying this option thrice will ensure the package cache is removed too, before the re-build is initiated. For the `clean` operation this option has a slightly different effect: by default the verb will only remove build artifacts from a previous run, when specified once the incremental cache files are deleted too, and when specified twice the package cache is also removed. `--directory=`, `-C` : Takes a path to a directory. **mkosi** switches to this directory before doing anything. Note that the various configuration files are searched for in this directory, hence using this option is an effective way to build a project located in a specific directory. `--debug` : Enable additional debugging output. `--debug-shell` : When executing a command in the image fails, **mkosi** will start an interactive shell in the image allowing further debugging. `--debug-workspace` : When specified, the workspace directory will not be deleted and its location will be logged when **mkosi** exits. `--debug-sandbox` : Run **mkosi-sandbox** with **strace**. `--version` : Show package version. `--help`, `-h` : Show brief usage information. `--genkey-common-name=` : Common name to be used when generating keys via **mkosi**'s `genkey` command. Defaults to `mkosi of %u`, where `%u` expands to the username of the user invoking **mkosi**. `--genkey-valid-days=` : Number of days that the keys should remain valid when generating keys via **mkosi**'s `genkey` command. Defaults to two years (730 days). `--auto-bump=`, `-B` : If specified, after each successful build the version is bumped in a fashion equivalent to the `bump` verb, in preparation for the next build. This is useful for simple, linear version management: each build in a series will have a version number one higher then the previous one. `--doc-format` : The format to show the documentation in. Supports the values `markdown`, `man`, `pandoc`, `system` and `auto`. In the case of `markdown` the documentation is shown in the original Markdown format. `man` shows the documentation in man page format, if it is available. **pandoc** will generate the man page format on the fly, if **pandoc** is available. `system` will show the system-wide man page for **mkosi**, which may or may not correspond to the version you are using, depending on how you installed **mkosi**. `auto`, which is the default, will try all methods in the order `man`, `pandoc`, `markdown`, `system`. `--json` : Show the summary output as JSON-SEQ. `--wipe-build-dir`, `-w` : Wipe the build directory if one is configured before building the image. ## Supported output formats The following output formats are supported: * Raw *GPT* disk image, created using **systemd-repart** (*disk*) * Plain directory, containing the OS tree (*directory*) * Tar archive (*tar*) * CPIO archive (*cpio*) The output format may also be set to *none* to have **mkosi** produce no image at all. This can be useful if you only want to use the image to produce another output in the build scripts (e.g. build an RPM). When a *GPT* disk image is created, repart partition definition files may be placed in `mkosi.repart/` to configure the generated disk image. It is highly recommended to run **mkosi** on a file system that supports reflinks such as XFS and btrfs and to keep all related directories on the same file system. This allows **mkosi** to create images very quickly by using reflinks to perform copying via copy-on-write operations. ## Configuration Settings The following settings can be set through configuration files (the syntax with `SomeSetting=value`) and on the command line (the syntax with `--some-setting=value`). For some command line parameters, a single-letter shortcut is also allowed. In the configuration files, the setting must be in the appropriate section, so the settings are grouped by section below. Configuration is parsed in the following order: * The command line arguments are parsed. * `mkosi.local.conf` or `mkosi.local` is parsed if it exists. This file or directory should be in `.gitignore` (or equivalent) and is intended for local configuration. * Any default paths (depending on the option) are configured if the corresponding path exists. * `mkosi.conf` is parsed if it exists in the directory configured with `--directory=` or the current working directory if `--directory=` is not used. * `mkosi.conf.d/` is parsed in the same directory if it exists. Each directory and each file with the `.conf` extension in `mkosi.conf.d/` is parsed. Any directory in `mkosi.conf.d` is parsed as if it were a regular top level directory. * If any profiles are configured, their configuration is parsed from the `mkosi.profiles/` directory. * Subimages are parsed from the `mkosi.images` directory if it exists. Note that settings configured via the command line always override settings configured via configuration files. If the same setting is configured more than once via configuration files, later assignments override earlier assignments except for settings that take a collection of values. Also, settings read from `mkosi.local` or `mkosi.local.conf` will override settings from configuration files that are parsed later, but not settings specified on the CLI. For settings that take a single value, the empty assignment (`SomeSetting=` or `--some-setting=`) can be used to override a previous setting and reset to the default. Settings that take a collection of values are merged by appending the new values to the previously configured values. Assigning the empty string to such a setting removes all previously assigned values, and overrides any configured default values as well. The values specified on the CLI are appended after all the values from configuration files. To conditionally include configuration files, the `[Match]` section can be used. A `[Match]` section consists of individual conditions. Conditions can use a pipe symbol (`|`) after the equals sign (`…=|…`), which causes the condition to become a triggering condition. The config file will be included if the logical AND of all non-triggering conditions and the logical OR of all triggering conditions is satisfied. To negate the result of a condition, prefix the argument with an exclamation mark. If an argument is prefixed with the pipe symbol and an exclamation mark, the pipe symbol must be passed first, and the exclamation second. Note that `[Match]` conditions compare against the current values of specific settings, and do not take into account changes made to the setting in configuration files that have not been parsed yet (settings specified on the CLI are taken into account). Also note that matching against a setting and then changing its value afterwards in a different config file may lead to unexpected results. The `[Match]` section of a `mkosi.conf` file in a directory applies to the entire directory. If the conditions are not satisfied, the entire directory is skipped. The `[Match]` sections of files in `mkosi.conf.d/` and `mkosi.local.conf` only apply to the file itself. If there are multiple `[Match]` sections in the same configuration file, each of them has to be satisfied in order for the configuration file to be included. Specifically, triggering conditions only apply to the current `[Match]` section and are reset between multiple `[Match]` sections. As an example, the following will only match if the output format is one of `disk` or `directory` and the architecture is one of `x86-64` or `arm64`: ```ini [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 ``` The `[TriggerMatch]` section can be used to indicate triggering match sections. These are identical to triggering conditions except they apply to the entire match section instead of just a single condition. As an example, the following will match if the distribution is `debian` and the release is `bookworm` or if the distribution is `ubuntu` and the release is `noble`. ```ini [TriggerMatch] Distribution=debian Release=bookworm [TriggerMatch] Distribution=ubuntu Release=noble ``` The semantics of conditions in `[TriggerMatch]` sections is the same as in `[Match]`, i.e. all normal conditions are joined by a logical AND and all triggering conditions are joined by a logical OR. When mixing `[Match]` and `[TriggerMatch]` sections, a match is achieved when all `[Match]` sections match and at least one `[TriggerMatch]` section matches. The absence of match sections is valued as true. Logically this means: ``` (⋀ᵢ Matchᵢ) ∧ (⋁ᵢ TriggerMatchᵢ) ``` Command line options that take no argument are shown without `=` in their long version. In the config files, they should be specified with a boolean argument: either `1`, `yes`, or `true` to enable, or `0`, `no`, `false` to disable. ### [Distribution] Section `Distribution=`, `--distribution=`, `-d` : The distribution to install in the image. Takes one of the following arguments: `fedora`, `debian`, `kali`, `ubuntu`, `arch`, `opensuse`, `mageia`, `centos`, `rhel`, `rhel-ubi`, `openmandriva`, `rocky`, `alma`, `azure` or `custom`. If not specified, defaults to the distribution of the host or `custom` if the distribution of the host is not a supported distribution. `Release=`, `--release=`, `-r` : The release of the distribution to install in the image. The precise syntax of the argument this takes depends on the distribution used, and is either a numeric string (in case of Fedora Linux, CentOS, …, e.g. `29`), or a distribution version name (in case of Debian, Kali, Ubuntu, …, e.g. `artful`). Defaults to a recent version of the chosen distribution, or the version of the distribution running on the host if it matches the configured distribution. `Architecture=`, `--architecture=` : The architecture to build the image for. The architectures that are actually supported depends on the distribution used and whether a bootable image is requested or not. When building for a foreign architecture, you'll also need to install and register a user mode emulator for that architecture. One of the following architectures can be specified per image built: `alpha`, `arc`, `arm`, `arm64`, `ia64`, `loongarch64`, `mips64-le`, `mips-le`, `parisc`, `ppc`, `ppc64`, `ppc64-le`, `riscv32`, `riscv64`, `s390`, `s390x`, `tilegx`, `x86`, `x86-64`. `Mirror=`, `--mirror=`, `-m` : The mirror to use for downloading the distribution packages. Expects a mirror URL as argument. If not provided, the default mirror for the distribution is used. The default mirrors for each distribution are as follows (unless specified, the same mirror is used for all architectures): | | x86-64 | aarch64 | |----------------|-----------------------------------|--------------------------------| | `debian` | http://deb.debian.org/debian | | | `arch` | https://geo.mirror.pkgbuild.com | http://mirror.archlinuxarm.org | | `opensuse` | http://download.opensuse.org | | | `kali` | http://http.kali.org/kali | | | `ubuntu` | http://archive.ubuntu.com | http://ports.ubuntu.com | | `centos` | https://mirrors.centos.org | | | `rocky` | https://mirrors.rockylinux.org | | | `alma` | https://mirrors.almalinux.org | | | `fedora` | https://mirrors.fedoraproject.org | | | `rhel-ubi` | https://cdn-ubi.redhat.com | | | `mageia` | https://www.mageia.org | | | `openmandriva` | http://mirrors.openmandriva.org | | | `azure` | https://packages.microsoft.com/ | | `LocalMirror=`, `--local-mirror=` : The mirror will be used as a local, plain and direct mirror instead of using it as a prefix for the full set of repositories normally supported by distributions. Useful for fully offline builds with a single repository. Supported on **deb**-, **rpm**-, and **pacman**-based distributions. Overrides `--mirror=` but only for the local **mkosi** build, it will not be configured inside the final image, `--mirror=` (or the default repository) will be configured inside the final image instead. `RepositoryKeyCheck=`, `--repository-key-check=` : Controls signature/key checks when using repositories, enabled by default. Useful to disable checks when combined with `--local-mirror=` and using only a repository from a local filesystem. `RepositoryKeyFetch=`, `--repository-key-fetch=` : Controls whether **mkosi** will fetch distribution GPG keys remotely. Enabled by default on Ubuntu when not using a tools tree or when using Ubuntu tools trees to build Arch Linux or RPM-based distributions. Disabled by default on all other distributions. When disabled, the distribution GPG keys for the target distribution have to be installed locally on the host system alongside the package manager for that distribution. This setting is only implemented for distributions using **dnf**, **pacman** or **zypper** as their package manager. For other distributions the distribution GPG keys are always looked up locally regardless of the value of this setting. To make the distribution GPG keys for distributions available without enabling this setting, the corresponding package has to be installed on the host. This is usually one of `archlinux-keyring`, `debian-keyring`, `kali-archive-keyring`, `ubuntu-keyring` or `distribution-gpg-keys` (for RPM-based distributions). `Repositories=`, `--repositories=` : Enable package repositories that are disabled by default. This can be used to enable the EPEL repos for CentOS or different components of the Debian/Kali/Ubuntu repositories. ### [Output] Section `Format=`, `--format=`, `-t` : The image format type to generate. One of `directory` (for generating an OS image directly in a local directory), `tar` (similar, but a tarball of the OS image is generated), `cpio` (similar, but a cpio archive is generated), `disk` (a block device OS image with a GPT partition table), `uki` (a unified kernel image with the OS image in the `.initrd` PE section), `esp` (`uki` but wrapped in a disk image with only an ESP partition), `oci` (a directory compatible with the OCI image specification), `sysext`, `confext`, `portable`, `addon` or `none` (the OS image is solely intended as a build image to produce another artifact). If the `disk` output format is used, the disk image is generated using **systemd-repart**. The repart partition definition files to use can be configured using the `RepartDirectories=` setting or via `mkosi.repart/`. When verity partitions are configured using **systemd-repart**'s `Verity=` setting, **mkosi** will automatically parse the verity hash partition's roothash from **systemd-repart**'s JSON output and include it in the kernel command line of every unified kernel image built by **mkosi**. If the `none` output format is used, the outputs from a previous build are not removed, but clean scripts (see `CleanScripts=`) are still executed. This allows rerunning a build script (see `BuildScripts=`) without removing the results of a previous build. `ManifestFormat=`, `--manifest-format=` : The manifest format type or types to generate. A comma-delimited list consisting of `json` (the standard JSON output format that describes the packages installed), `changelog` (a human-readable text format designed for diffing). By default no manifest is generated. `Output=`, `--output=`, `-o` : Name to use for the generated output image file or directory. Defaults to `image` or, if `ImageId=` is specified, it is used as the default output name, optionally suffixed with the version set with `ImageVersion=` or if a specific image is built from `mkosi.images`, the name of the image is preferred over `ImageId`. Note that this option does not allow configuring the output directory, use `OutputDirectory=` for that. Note that this only specifies the output prefix, depending on the specific output format, compression and image version used, the full output name might be `image_7.8.raw.xz`. `CompressOutput=`, `--compress-output=` : Configure compression for the resulting image or archive. The argument can be either a boolean or a compression algorithm (**xz**, **zstd**). **zstd** compression is used by default, except CentOS and derivatives up to version 8, which default to **xz**, and OCI images, which default to **gzip**. Note that when applied to block device image types, compression means the image cannot be started directly but needs to be decompressed first. This also means that the `shell`, `boot`, `vm` verbs are not available when this option is used. Implied for `tar`, `cpio`, `uki`, `esp`, `oci` and `addon`. `CompressLevel=`, `--compress-level=` : Configure the compression level to use. Takes an integer. The possible values depend on the compression being used. `OutputDirectory=`, `--output-directory=`, `-O` : Path to a directory where to place all generated artifacts. If this is not specified and the directory `mkosi.output/` exists in the local directory, it is automatically used for this purpose. `OutputMode=`, `--output-mode=` : File system access mode used when creating the output image file. Takes an access mode in octal notation. If not set, uses the current system defaults. `ImageVersion=`, `--image-version=` : Configure the image version. This accepts any string, but it is recommended to specify a series of dot separated components. The version may also be configured by reading a `mkosi.version` file (in which case it may be conveniently managed via the `bump` verb or the `--auto-bump` option) or by reading stdout if it is executable (see the **Scripts** section below). When specified the image version is included in the default output file name, i.e. instead of `image.raw` the default will be `image_0.1.raw` for version `0.1` of the image, and similar. The version is also passed via the `$IMAGE_VERSION` to any build scripts invoked (which may be useful to patch it into `/usr/lib/os-release` or similar, in particular the `IMAGE_VERSION=` field of it). `ImageId=`, `--image-id=` : Configure the image identifier. This accepts a freeform string that shall be used to identify the image with. If set the default output file will be named after it (possibly suffixed with the version). The identifier is also passed via the `$IMAGE_ID` to any build scripts invoked. The image ID is automatically added to `/usr/lib/os-release`. `SplitArtifacts=`, `--split-artifacts` : The artifact types to split out of the final image. A comma-delimited list consisting of `uki`, `kernel`, `initrd` and `partitions`. When building a bootable image `kernel` and `initrd` correspond to their artifact found in the image (or in the UKI), while `uki` copies out the entire UKI. When building a disk image and `partitions` is specified, pass `--split=yes` to **systemd-repart** to have it write out split partition files for each configured partition. Read the [man](https://www.freedesktop.org/software/systemd/man/systemd-repart.html#--split=BOOL) page for more information. This is useful in A/B update scenarios where an existing disk image shall be augmented with a new version of a root or `/usr` partition along with its Verity partition and unified kernel. By default `uki`, `kernel` and `initrd` are split out. `RepartDirectories=`, `--repart-directory=` : Paths to directories containing **systemd-repart** partition definition files that are used when **mkosi** invokes **systemd-repart** when building a disk image. If `mkosi.repart/` exists in the local directory, it will be used for this purpose as well. Note that **mkosi** invokes repart with `--root=` set to the root of the image root, so any `CopyFiles=` source paths in partition definition files will be relative to the image root directory. `SectorSize=`, `--sector-size=` : Override the default sector size that **systemd-repart** uses when building a disk image. `Overlay=`, `--overlay` : When used together with `BaseTrees=`, the output will consist only out of changes to the specified base trees. Each base tree is attached as a lower layer in an overlayfs structure, and the output becomes the upper layer, initially empty. Thus files that are not modified compared to the base trees will not be present in the final output. This option may be used to create [systemd *system extensions* or *portable services*](https://uapi-group.org/specifications/specs/extension_image). `Seed=`, `--seed=` : Takes a UUID as argument or the special value `random`. Overrides the seed that **systemd-repart** uses when building a disk image. This is useful to achieve reproducible builds, where deterministic UUIDs and other partition metadata should be derived on each build. If not specified explicitly and the file `mkosi.seed` exists in the local directory, the UUID to use is read from it. Otherwise, a random UUID is used. `CleanScripts=`, `--clean-script=` : Takes a comma-separated list of paths to executables that are used as the clean scripts for this image. See the **Scripts** section for more information. ### [Content] Section `Packages=`, `--package=`, `-p` : Install the specified distribution packages (i.e. RPM, deb, …) in the image. Takes a comma-separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. Use `BuildPackages=` to specify packages that shall only be installed in an overlay that is mounted when the prepare scripts are executed with the `build` argument and when the build scripts are executed. The types and syntax of *package specifications* that are allowed depend on the package installer (e.g. **dnf** for RPM-based distros or **apt** for deb-based distros), but may include package names, package names with version and/or architecture, package name globs, package groups, and virtual provides, including file paths. See `PackageDirectories=` for information on how to make local packages available for installation with `Packages=`. **Example**: when using a distro that uses **dnf**, the following configuration would install the **meson** package (in the latest version), the 32-bit version of the `libfdisk-devel` package, all available packages that start with the `git-` prefix, a **systemd** RPM from the local file system, one of the packages that provides `/usr/bin/ld`, the packages in the *Development Tools* group, and the package that contains the `mypy` python module. ```ini Packages=meson libfdisk-devel.i686 git-* /usr/bin/ld @development-tools python3dist(mypy) ``` `BuildPackages=`, `--build-package=` : Similar to `Packages=`, but configures packages to install only in an overlay that is made available on top of the image to the prepare scripts when executed with the `build` argument and the build scripts. This option should be used to list packages containing header files, compilers, build systems, linkers and other build tools the `mkosi.build` scripts require to operate. Note that packages listed here will be absent in the final image. `VolatilePackages=`, `--volatile-package=` : Similar to `Packages=`, but packages configured with this setting are not cached when `Incremental=` is enabled and are installed after executing any build scripts. Specifically, this setting can be used to install packages that change often or which are built by a build script. `PackageDirectories=`, `--package-directory=` : Specify directories containing extra packages to be made available during the build. **mkosi** will create a local repository containing all packages in these directories and make it available when installing packages or running scripts. If the `mkosi.packages/` directory is found in the local directory it is also used for this purpose. `VolatilePackageDirectories=`, `--volatile-package-directory=` : Like `PackageDirectories=`, but any changes to the packages in these directories will not invalidate the cached images if `Incremental=` is enabled. Additionally, build scripts can add more packages to the local repository by placing the built packages in `$PACKAGEDIR`. The packages placed in `$PACKAGEDIR` are shared between all image builds and thus available for installation in all images using `VolatilePackages=`. `WithRecommends=`, `--with-recommends=` : Configures whether to install recommended or weak dependencies, depending on how they are named by the used package manager, or not. By default, recommended packages are not installed. This is only used for package managers that support the concept, which are currently **apt**, **dnf** and **zypper**. `WithDocs=`, `--with-docs` : Include documentation in the image. Enabled by default. When disabled, if the underlying distribution package manager supports it documentation is not included in the image. The `$WITH_DOCS` environment variable passed to the `mkosi.build` scripts is set to `0` or `1` depending on whether this option is enabled or disabled. `BaseTrees=`, `--base-tree=` : Takes a comma-separated list of paths to use as base trees. When used, these base trees are each copied into the OS tree and form the base distribution instead of installing the distribution from scratch. Only extra packages are installed on top of the ones already installed in the base trees. Note that for this to work properly, the base image still needs to contain the package manager metadata by setting `CleanPackageMetadata=no` (see `CleanPackageMetadata=`). Instead of a directory, a tar file or a disk image may be provided. In this case it is unpacked into the OS tree. This mode of operation allows setting permissions and file ownership explicitly, in particular for projects stored in a version control system such as **git** which retain full file ownership and access mode metadata for committed files. `SkeletonTrees=`, `--skeleton-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy into the OS tree before invoking the package manager. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to insert files and directories into the OS tree before the package manager installs any packages. If the `mkosi.skeleton/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **Files** section below). Note that skeleton trees are cached and any changes to skeleton trees after a cached image has been built (when using `Incremental=`) are only applied when the cached image is rebuilt (by using `-ff` or running `mkosi -f clean`). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.skeleton.tar` will be automatically used if found in the local directory. To add extra package manager configuration files such as extra repositories, use `SandboxTrees=` as **mkosi** invokes the package managers from outside the image and not inside so any package manager configuration files provided via `SkeletonTrees=` won't take effect when **mkosi** invokes a package manager to install packages. `ExtraTrees=`, `--extra-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy from the host into the image. The second path of each pair refers to the target directory inside the image. If the second path is not provided, the directory is copied on top of the root directory of the image. The second path is always interpreted as an absolute path. Use this to override any default configuration files shipped with the distribution. If the `mkosi.extra/` directory is found in the local directory it is also used for this purpose with the root directory as target (also see the **Files** section below). As with the base tree logic above, instead of a directory, a tar file may be provided too. `mkosi.extra.tar` will be automatically used if found in the local directory. `RemovePackages=`, `--remove-package=` : Takes a comma-separated list of package specifications for removal, in the same format as `Packages=`. The removal will be performed as one of the last steps. This step is skipped if `CleanPackageMetadata=no` is used. `RemoveFiles=`, `--remove-files=` : Takes a comma-separated list of globs. Files in the image matching the globs will be purged at the end. `CleanPackageMetadata=`, `--clean-package-metadata=` : Enable/disable removal of package manager databases and repository metadata at the end of installation. Can be specified as `true`, `false`, or `auto` (the default). With `auto`, package manager databases and repository metadata will be removed if the respective package manager executable is *not* present at the end of the installation. `SourceDateEpoch=`, `--source-date-epoch=` : Takes a timestamp in seconds since the UNIX epoch as argument. File modification times of all files will be clamped to this value. The variable is also propagated to **systemd-repart** and scripts executed by **mkosi**. If not set explicitly, `SOURCE_DATE_EPOCH` from `--environment` and from the host environment are tried in that order. This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. `SyncScripts=`, `--sync-script=` : Takes a comma-separated list of paths to executables that are used as the sync scripts for this image. See the **Scripts** section for more information. `PrepareScripts=`, `--prepare-script=` : Takes a comma-separated list of paths to executables that are used as the prepare scripts for this image. See the **Scripts** section for more information. `BuildScripts=`, `--build-script=` : Takes a comma-separated list of paths to executables that are used as the build scripts for this image. See the **Scripts** section for more information. `PostInstallationScripts=`, `--postinst-script=` : Takes a comma-separated list of paths to executables that are used as the post-installation scripts for this image. See the **Scripts** section for more information. `FinalizeScripts=`, `--finalize-script=` : Takes a comma-separated list of paths to executables that are used as the finalize scripts for this image. See the **Scripts** section for more information. `PostOutputScripts=`, `--postoutput-script` : Takes a comma-separated list of paths to executables that are used as the post output scripts for this image. See the **Scripts** section for more information. `Bootable=`, `--bootable=` : Takes a boolean or `auto`. Enables or disables generation of a bootable image. If enabled, **mkosi** will install an EFI bootloader, and add an ESP partition when the disk image output is used. If the selected EFI bootloader (see `Bootloader=`) is not installed or no kernel images can be found, the build will fail. `auto` behaves as if the option was enabled, but the build won't fail if either no kernel images or the selected EFI bootloader can't be found. If disabled, no bootloader will be installed even if found inside the image, no unified kernel images will be generated and no ESP partition will be added to the image if the disk output format is used. `Bootloader=`, `--bootloader=` : Takes one of `none`, `systemd-boot`, `uki`, `grub`, `systemd-boot-signed`, `uki-signed` or `grub-signed`. Defaults to `systemd-boot`. If set to `none`, no EFI bootloader will be installed into the image. If set to `systemd-boot`, **systemd-boot** will be installed and for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. If set to `uki`, a single UKI will be generated for the latest installed kernel (the one with the highest version) which is installed to `EFI/BOOT/BOOTX64.EFI` in the ESP. If set to `grub`, for each installed kernel, a UKI will be generated and stored in `EFI/Linux` in the ESP. For each generated UKI, a menu entry is appended to the grub configuration in `grub/grub.cfg` in the ESP which chainloads into the UKI. A shim grub.cfg is also written to `EFI//grub.cfg` in the ESP which loads `grub/grub.cfg` in the ESP for compatibility with signed versions of grub which load the grub configuration from this location. The `signed` variants will only install pre-signed EFI binaries shipped by the distribution. Kernels need to be placed into the root filesystem (for example using `ExtraTrees=`) under `/usr/lib/modules/$version`, named `vmlinux` or `vmlinuz`. The `$version` is as produced by Kbuild's `kernelversion` make target. `BiosBootloader=`, `--bios-bootloader=` : Takes one of `none` or `grub`. Defaults to `none`. If set to `none`, no BIOS bootloader will be installed. If set to `grub`, grub is installed as the BIOS boot loader if a bootable image is requested with the `Bootable=` option. If no repart partition definition files are configured, **mkosi** will add a grub BIOS boot partition and an EFI system partition to the default partition definition files. Note that this option is not mutually exclusive with `Bootloader=`. It is possible to have an image that is both bootable on UEFI and BIOS by configuring both `Bootloader=` and `BiosBootloader=`. The grub BIOS boot partition should have UUID `21686148-6449-6e6f-744e-656564454649` and should be at least 1MB. Even if no EFI bootloader is installed, we still need an ESP for BIOS boot as that's where we store the kernel, initrd and grub modules. `ShimBootloader=`, `--shim-bootloader=` : Takes one of `none`, `unsigned`, or `signed`. Defaults to `none`. If set to `none`, shim and MokManager will not be installed to the ESP. If set to `unsigned`, **mkosi** will search for unsigned shim and MokManager EFI binaries and install them. If `SecureBoot=` is enabled, **mkosi** will sign the unsigned EFI binaries before installing them. If set to `signed`, **mkosi** will search for signed EFI binaries and install those. Even if `SecureBoot=` is enabled, **mkosi** won't sign these binaries again. Note that this option only takes effect when an image that is bootable on UEFI firmware is requested using other options (`Bootable=`, `Bootloader=`). Note that when this option is enabled, **mkosi** will only install already signed bootloader binaries, kernel image files and unified kernel images as self-signed binaries would not be accepted by the signed version of shim. `UnifiedKernelImages=`, `--unified-kernel-images=` : Specifies whether to use unified kernel images or not when `Bootloader=` is set to `systemd-boot` or `grub`. Takes a boolean value or `auto`. Defaults to `auto`. If enabled, unified kernel images are always used and the build will fail if any components required to build unified kernel images are missing. If set to `auto`, unified kernel images will be used if all necessary components are available. Otherwise Type 1 entries as defined by the Boot Loader Specification will be used instead. If disabled, Type 1 entries will always be used. `UnifiedKernelImageFormat=`, `--unified-kernel-image-format=` : Takes a filename without any path components to specify the format that unified kernel images should be installed as. This may include both the regular specifiers (see **Specifiers**) and special delayed specifiers, that are expanded during the installation of the files, which are described below. The default format for this parameter is `&e-&k` with `-&h` being appended if `roothash=` or `usrhash=` is found on the kernel command line and `+&c` if `/etc/kernel/tries` is found in the image. The following specifiers may be used: | Specifier | Value | |-----------|----------------------------------------------------| | `&&` | `&` character | | `&e` | Entry Token | | `&k` | Kernel version | | `&h` | `roothash=` or `usrhash=` value of kernel argument | | `&c` | Number of tries used for boot attempt counting | `UnifiedKernelImageProfiles=`, `--uki-profile=` : Build additional UKI profiles. Takes a comma-separated list of paths to UKI profile config files. This option may be used multiple times in which case each config gets built into a corresponding UKI profile. Config files in the `mkosi.uki-profiles/` directory are automatically picked up. All configured UKI profiles are added as additional UKI profiles to each UKI built by **mkosi**. See the documentation for the `UKIProfile` section for information on which settings can be configured in UKI profile config files. `Initrds=`, `--initrd` : Use user-provided initrd(s). Takes a comma-separated list of paths to initrd files. This option may be used multiple times in which case the initrd lists are combined. If no initrds are specified and a bootable image is requested, **mkosi** will look for initrds in a subdirectory `io.mkosi.initrd` of the artifact directory (see `$ARTIFACTDIR` in the section **ENVIRONMENT VARIABLES**), if none are found there **mkosi** will automatically build a default initrd. `InitrdPackages=`, `--initrd-package=` : Extra packages to install into the default initrd. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `InitrdVolatilePackages=`, `--initrd-volatile-package=` : Similar to `VolatilePackages=`, except it applies to the default initrd. `Devicetree=`, `--devicetree=` : When set, specifies a Devicetree blob to be used by the booting system, instead of the one provided by firmware. **mkosi** will search for the specified file relative to common paths where Linux distributions install Devicetree files. It should typically have the format `/.dtb`. `MicrocodeHost=`, `--microcode-host=` : When set to true only include microcode for the host's CPU in the image. `KernelCommandLine=`, `--kernel-command-line=` : Use the specified kernel command line when building images. If the root or usr partition are created with verity enabled, `roothash=` or `usrhash=` respectively are automatically added to the kernel command line and `root=` or `mount.usr=` should not be added. Otherwise, if the value of this setting contains the literals `root=PARTUUID` or `mount.usr=PARTUUID`, these are replaced with the partition UUID of the root or usr partition respectively. For example, `root=PARTUUID` would be replaced with `root=PARTUUID=58c7d0b2-d224-4834-a16f-e036322e88f7` where `58c7d0b2-d224-4834-a16f-e036322e88f7` is the partition UUID of the root partition. `KernelModulesInclude=`, `--kernel-modules-include=` : Takes a list of regex patterns that specify kernel modules to include in the image. Patterns should be relative to `/usr/lib/modules//` paths. **mkosi** checks for a match anywhere in the module path (e.g. `i915` will match against `drivers/gpu/drm/i915.ko`). All modules that match any of the specified patterns are included in the image. All module and firmware dependencies of the matched modules are included in the image as well. If the special value `default` is used, the default kernel modules defined in the **mkosi-initrd** configuration are included as well. If the special value `host` is used, the currently loaded modules on the host system are included as well. This setting takes priority over `KernelModulesExclude=` and only makes sense when used in combination with it because all kernel modules are included in the image by default. `KernelModulesExclude=`, `--kernel-modules-exclude=` : Takes a list of regex patterns that specify modules to exclude from the image. Behaves the same as `KernelModulesInclude=` except that all modules that match any of the specified patterns are excluded from the image. `KernelModulesInitrd=`, `--kernel-modules-initrd=` : Boolean value, enabled (true) by default. If enabled, when building a bootable image, **mkosi** will generate an extra initrd for each unified kernel image it assembles. This initrd contains only modules for the specific kernel version, and will be appended to the prebuilt initrd. This allows generating kernel independent initrds which are augmented with the necessary modules when the UKI is assembled. `KernelModulesInitrdInclude=`, `--kernel-modules-initrd-include=` : Like `KernelModulesInclude=`, but applies to the kernel modules included in the kernel modules initrd. `KernelModulesInitrdExclude=`, `--kernel-modules-initrd-exclude=` : Like `KernelModulesExclude=`, but applies to the kernel modules included in the kernel modules initrd. `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` : The settings `Locale=`, `--locale=`, `LocaleMessages=`, `--locale-messages=`, `Keymap=`, `--keymap=`, `Timezone=`, `--timezone=`, `Hostname=`, `--hostname=`, `RootShell=`, `--root-shell=` correspond to the identically named systemd-firstboot options. See **systemd-firstboot**(1) for more information. Additionally, where applicable, the corresponding systemd credentials for these settings are written to `/usr/lib/credstore`, so that they apply even if only `/usr` is shipped in the image. `RootPassword=`, `--root-password=`, : Set the system root password. If this option is not used, but a `mkosi.rootpw` file is found in the local directory, the password is automatically read from it or if the file is executable it is run as a script and stdout is read instead (see the **Scripts** section below). If the password starts with `hashed:`, it is treated as an already hashed root password. The root password is also stored in `/usr/lib/credstore` under the appropriate systemd credential so that it applies even if only `/usr` is shipped in the image. To create an unlocked account without any password use `hashed:` without a hash. `Autologin=`, `--autologin` : Enable autologin for the `root` user on `/dev/pts/0` (nspawn), `/dev/tty1` and `/dev/hvc0`. `MakeInitrd=`, `--make-initrd` : Add `/etc/initrd-release` and `/init` to the image so that it can be used as an initramfs. `Ssh=`, `--ssh` : If specified, an **sshd** socket unit and matching service are installed in the final image that expose SSH over VSock. When building with this option and running the image using `mkosi vm`, the `mkosi ssh` command can be used to connect to the container/VM via SSH. Note that you still have to make sure openssh is installed in the image to make this option behave correctly. Run `mkosi genkey` to automatically generate an X.509 certificate and private key to be used by **mkosi** to enable SSH access to any virtual machines via `mkosi ssh`. To access images booted using `mkosi boot`, use **machinectl**. `SELinuxRelabel=`, `--selinux-relabel=` : Specifies whether to relabel files to match the image's SELinux policy. Takes a boolean value or `auto`. Defaults to `auto`. If disabled, files will not relabeled. If enabled, an SELinux policy has to be installed in the image and **setfiles** has to be available to relabel files. If any errors occur during **setfiles**, the build will fail. If set to `auto`, files will be relabeled if an SELinux policy is installed in the image and if **setfiles** is available. Any errors occurred during **setfiles** will be ignored. Note that when running unprivileged, **setfiles** will fail to set any labels that are not in the host's SELinux policy. To ensure **setfiles** succeeds without errors, make sure to run **mkosi** as root or build from a host system with the same SELinux policy as the image you're building. `MachineId=`, `--machine-id=` : Takes a UUID or the special value `random`. Sets the machine ID of the image to the specified UUID. If set to `random`, a random UUID will be written to `/etc/machine-id`. If not specified explicitly and the file `mkosi.machine-id` exists in the local directory, the UUID to use is read from it. Otherwise, `uninitialized` will be written to `/etc/machine-id`. ### [Validation] Section `SecureBoot=`, `--secure-boot` : Sign **systemd-boot** (if it is not signed yet) and any generated unified kernel images for UEFI SecureBoot. `SecureBootAutoEnroll=`, `--secure-boot-auto-enroll=` : Set up automatic enrollment of the secure boot keys in virtual machines as documented in **systemd-boot**(7) if `SecureBoot=` is used. Note that **systemd-boot** will only do automatic secure boot key enrollment in virtual machines starting from systemd v253. To do auto enrollment on systemd v252 or on bare metal machines, write a **systemd-boot** configuration file to `/efi/loader/loader.conf` using an extra tree with `secure-boot-enroll force` or `secure-boot-enroll manual` in it. Auto enrollment is not supported on systemd versions older than v252. Defaults to `yes`. `SecureBootKey=`, `--secure-boot-key=` : Path to the PEM file containing the secret key for signing the UEFI kernel image if `SecureBoot=` is used and PCR signatures when `SignExpectedPcr=` is also used. When `SecureBootKeySource=` is specified, the input type depends on the source. `SecureBootCertificate=`, `--secure-boot-certificate=` : Path to the X.509 file containing the certificate for the signed UEFI kernel image, if `SecureBoot=` is used. `SecureBootSignTool=`, `--secure-boot-sign-tool` : Tool to use to sign secure boot PE binaries. Takes one of `systemd-sbsign`, `sbsign` or `auto`. Defaults to `auto`. If set to `auto`, either **systemd-sbsign** or **sbsign** are used if available, with **systemd-sbsign** being preferred. `Verity=`, `--verity=` : Whether to enforce or disable signed verity for extension images. Takes a boolean value or `auto`. If enabled, a verity key and certificate must be present and the build will fail if we don't detect any verity partitions in the disk image produced by **systemd-repart**. If disabled, verity partitions will be excluded from the extension images produced by **systemd-repart**. If set to `auto` and a verity key and certificate are present, **mkosi** will pass them to systemd-repart and expects the generated disk image to contain verity partitions, but the build won't fail if no verity partitions are found in the disk image produced by **systemd-repart**. Note that explicitly disabling signed verity is not yet implemented for the `disk` output and only works for extension images at the moment. `VerityKey=`, `--verity-key=` : Path to the PEM file containing the secret key for signing the verity signature, if a verity signature partition is added with **systemd-repart**. When `VerityKeySource=` is specified, the input type depends on the source. `VerityCertificate=`, `--verity-certificate=` : Path to the X.509 file containing the certificate for signing the verity signature, if a verity signature partition is added with **systemd-repart**. `SignExpectedPcr=`, `--sign-expected-pcr` : Measure the components of the unified kernel image (UKI) using **systemd-measure** and embed the PCR signature into the unified kernel image. This option takes a boolean value or the special value `auto`, which is the default, which is equal to a true value if the **systemd-measure** binary is in `PATH`. Depends on `SecureBoot=` being enabled and key from `SecureBootKey=`. `SignExpectedPcrKey=`, `--sign-expected-pcr-key=` : Path to the PEM file containing the secret key for signing the expected PCR signatures. When `SignExpectedPcrKeySource=` is specified, the input type depends on the source. `SignExpectedPcrCertificate=`, `--sign-expected-pcr-certificate=` : Path to the X.509 file containing the certificate for signing the expected PCR signatures. `SecureBootKeySource=`, `--secure-boot-key-source=`, `VerityKeySource=`, `--verity-key-source=`, `SignExpectedPcrKeySource=`, `--sign-expected-key-source=` : The source of the corresponding private key, to support OpenSSL engines and providers, e.g. `--secure-boot-key-source=engine:pkcs11` or `--secure-boot-key-source=provider:pkcs11`. `SecureBootCertificateSource=`, `--secure-boot-certificate-source=`, `VerityCertificateSource=`, `--verity-certificate-source=`, `SignExpectedPcrCertificateSource=`, `--sign-expected-certificate-source=` : The source of the corresponding certificate, to support OpenSSL providers, e.g. `--secure-boot-certificate-source=provider:pkcs11`. Note that engines are not supported. `Passphrase=`, `--passphrase` : Specify the path to a file containing the passphrase to use for LUKS encryption. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as **cryptsetup** and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. `Checksum=`, `--checksum` : Generate a `.SHA256SUMS` file of all generated artifacts after the build is complete. `Sign=`, `--sign` : Sign the generated `SHA256SUMS` using **gpg** after completion. `OpenPGPTool=`, `--openpgp-tool` : OpenPGP implementation to use for signing. `gpg` is the default. Selecting a value different than the default will use the given Stateless OpenPGP (SOP) tool for signing the `SHA256SUMS` file. Exemplary choices are `sqop` and `rsop`, but any implementation from https://www.openpgp.org/about/sop/ that can be installed locally will work. `Key=`, `--key=` : Select the **gpg** key to use for signing `SHA256SUMS`. This key must be already present in the **gpg** keyring. ### [Build] Section `ToolsTree=`, `--tools-tree=` : If specified, programs executed by **mkosi** to build and boot an image are looked up inside the given tree instead of in the host system. Use this option to make image builds more reproducible by always using the same versions of programs to build the final image instead of whatever version is installed on the host system. If this option is not used, but the `mkosi.tools/` directory is found in the local directory it is automatically used for this purpose with the root directory as target. Note that binaries found in any of the paths configured with `ExtraSearchPaths=` will be executed with `/usr/` from the tools tree instead of from the host. If the host distribution or release does not match the tools tree distribution or release respectively, this might result in failures when trying to execute binaries from any of the extra search paths. If set to `default`, **mkosi** will automatically add an extra tools tree image and use it as the tools tree. The following table shows for which distributions default tools tree packages are defined and which packages are included in those default tools trees: | | Fedora | CentOS | Debian | Kali | Ubuntu | Arch | openSUSE | |-------------------------|:------:|:------:|:------:|:----:|:------:|:----:|:--------:| | `acl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `apt` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `archlinux-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `attr` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `bash` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `btrfs-progs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ca-certificates` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `coreutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `cpio` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `createrepo_c` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `curl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `debian-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `diffutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `distribution-gpg-keys` | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | | `dnf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `dosfstools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `e2fsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `edk2-ovmf` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `erofs-utils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `findutils` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `git` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grep` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `grub-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `jq` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `kali-archive-keyring` | | | | ✓ | | | | | `kmod` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `less` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `mtools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `nano` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `opensc` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssh` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `openssl` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `pkcs11-provider` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `perf` | ✓ | ✓ | ✓ | ✓ | | ✓ | ✓ | | `sed` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `pacman` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `policycoreutils` | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `qemu` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `sbsigntools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `socat` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `squashfs-tools` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `strace` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `swtpm` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `systemd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ukify` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `tar` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `ubuntu-keyring` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | `util-linux` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virtiofsd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `virt-firmware` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `xfsprogs` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `xz` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zstd` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `zypper` | ✓ | | ✓ | ✓ | ✓ | | ✓ | `ToolsTreeDistribution=`, `--tools-tree-distribution=` : Set the distribution to use for the default tools tree. Defaults to the host distribution except for Ubuntu, which defaults to Debian, and RHEL, CentOS, Alma and Rocky, which default to Fedora, or `custom` if the distribution of the host is not a supported distribution. `ToolsTreeRelease=`, `--tools-tree-release=` : Set the distribution release to use for the default tools tree. By default, the hardcoded default release in **mkosi** for the distribution is used. `ToolsTreeMirror=`, `--tools-tree-mirror=` : Set the mirror to use for the default tools tree. By default, the default mirror for the tools tree distribution is used. `ToolsTreeRepositories=`, `--tools-tree-repository` : Same as `Repositories=` but for the default tools tree. `ToolsTreeSandboxTrees=`, `--tools-tree-sandbox-tree` : Same as `SandboxTrees=` but for the default tools tree. `ToolsTreePackages=`, `--tools-tree-package=` : Extra packages to install into the default tools tree. Takes a comma separated list of package specifications. This option may be used multiple times in which case the specified package lists are combined. `ToolsTreePackageDirectories=`, `--tools-tree-package-directory=` : Same as `PackageDirectories=`, but for the default tools tree. `ToolsTreeCertificates=`, `--tools-tree-certificates=` : Specify whether to use certificates and keys from the tools tree. Enabled by default. If enabled, `/etc/pki`, `/etc/ssl`, `/etc/ca-certificates`, and `/var/lib/ca-certificates` from the tools tree are used. Otherwise, these directories are picked up from the host. `ExtraSearchPaths=`, `--extra-search-path=` : List of colon-separated paths to look for tools in, before using the regular `$PATH` search path. `Incremental=`, `--incremental=`, `-i` : Takes either `strict` or a boolean value as its argument. Enables incremental build mode. In this mode, a copy of the OS image is created immediately after all OS packages are installed and the prepare scripts have executed but before the `mkosi.build` scripts are invoked (or anything that happens after it). On subsequent invocations of **mkosi** with the `-i` switch this cached image may be used to skip the OS package installation, thus drastically speeding up repetitive build times. Note that while there is some rudimentary cache invalidation, it is definitely not perfect. In order to force a rebuild of the cached image, combine `-i` with `-ff` to ensure the cached image is first removed and then re-created. If set to `strict`, the build fails if previously built cached image does not exist. `CacheOnly=`, `--cache-only=` : Takes one of `auto`, `metadata`, `always` or `never`. Defaults to `auto`. If `always`, the package manager is instructed not to contact the network. This provides a minimal level of reproducibility, as long as the package cache is already fully populated. If set to `metadata`, the package manager can still download packages, but we won't sync the repository metadata. If set to `auto`, the repository metadata is synced unless we have a cached image (see `Incremental=`) and packages can be downloaded during the build. If set to `never`, repository metadata is always synced and packages can be downloaded during the build. `SandboxTrees=`, `--sandbox-tree=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to copy into the mkosi sandbox before executing a tool. If the `mkosi.sandbox/` directory is found in the local directory it is used for this purpose with the root directory as target (also see the **Files** section below). **mkosi** will look for the package manager configuration and related files in the configured sandbox trees. Unless specified otherwise, it will use the configuration files from their canonical locations in `/usr` or `/etc` in the sandbox trees. For example, it will look for `/etc/dnf/dnf.conf` in the sandbox trees if **dnf** is used to install packages. `WorkspaceDirectory=`, `--workspace-directory=` : Path to a directory where to store data required temporarily while building the image. This directory should have enough space to store the full OS image, though in most modes the actually used disk space is smaller. If not specified, a subdirectory of `$XDG_CACHE_HOME` (if set), `$HOME/.cache` (if set) or `/var/tmp` is used. The data in this directory is removed automatically after each build. It's safe to manually remove the contents of this directory should an **mkosi** invocation be aborted abnormally (for example, due to reboot/power failure). `CacheDirectory=`, `--cache-directory=` : Takes a path to a directory to use as the incremental cache directory for the incremental images produced when the `Incremental=` option is enabled. If this option is not used, but a `mkosi.cache/` directory is found in the local directory it is automatically used for this purpose. `PackageCacheDirectory=`, `--package-cache-dir` : Takes a path to a directory to use as the package cache directory for the distribution package manager used. If unset, but a `mkosi.pkgcache/` directory is found in the local directory it is automatically used for this purpose, otherwise a suitable directory in the user's home directory or system is used. `BuildDirectory=`, `--build-directory=` : Takes a path to a directory to use as the build directory for build systems that support out-of-tree builds (such as Meson). The directory used this way is shared between repeated builds, and allows the build system to reuse artifacts (such as object files, executable, …) generated on previous invocations. The build scripts can find the path to this directory in the `$BUILDDIR` environment variable. This directory is mounted into the image's root directory when **mkosi-chroot** is invoked during execution of the build scripts. If this option is not specified, but a directory `mkosi.builddir/` exists in the local directory it is automatically used for this purpose (also see the **Files** section below). `UseSubvolumes=`, `--use-subvolumes=` : Takes a boolean or `auto`. Enables or disables use of btrfs subvolumes for directory tree outputs. If enabled, **mkosi** will create the root directory as a btrfs subvolume and use btrfs subvolume snapshots where possible to copy base or cached trees which is much faster than doing a recursive copy. If explicitly enabled and `btrfs` is not installed or subvolumes cannot be created, an error is raised. If `auto`, missing **btrfs** or failures to create subvolumes are ignored. `RepartOffline=`, `--repart-offline=` : Specifies whether to build disk images using loopback devices. Enabled by default. When enabled, **systemd-repart** will not use loopback devices to build disk images. When disabled, **systemd-repart** will always use loopback devices to build disk images. Note that when using `RepartOffline=no`**mkosi** cannot run unprivileged and the image build has to be done as the root user outside of any containers and with loopback devices available on the host system. There are currently two known scenarios where `RepartOffline=no` has to be used. The first is when using `Subvolumes=` in a repart partition definition file, as subvolumes cannot be created without using loopback devices. The second is when creating a system with SELinux and an XFS root partition. Because **mkfs.xfs** does not support populating an XFS filesystem with extended attributes, loopback devices have to be used to ensure the SELinux extended attributes end up in the generated XFS filesystem. `History=`, `--history=` : Takes a boolean. If enabled, **mkosi** will write information about the latest build to the `.mkosi-private` subdirectory in the directory from which it was invoked. This information is then used to restore the config of the latest build when running any verb that needs a build without specifying `--force`. To give an example of why this is useful, if you run `mkosi -O my-custom-output-dir -f` followed by `mkosi vm`, **mkosi** will fail saying the image hasn't been built yet. If you run `mkosi -O my-custom-output-dir --history=yes -f` followed by `mkosi vm`, it will boot the image built in the previous step as expected. `BuildSources=`, `--build-sources=` : Takes a comma-separated list of colon-separated path pairs. The first path of each pair refers to a directory to mount from the host. The second path of each pair refers to the directory where the source directory should be mounted when running scripts. Every target path is prefixed with `/work/src` and all build sources are sorted lexicographically by their target before mounting, so that top level paths are mounted first. If not configured explicitly, the current working directory is mounted to `/work/src`. `BuildSourcesEphemeral=`, `--build-sources-ephemeral=` : Takes a boolean or the special value `buildcache`. Disabled by default. Configures whether changes to source directories, the working directory and configured using `BuildSources=`, are persisted. If enabled, all source directories will be reset to their original state every time after running all scripts of a specific type (except sync scripts). 💥💣💥 If set to `buildcache` the overlay is not discarded when running build scripts, but saved to the build directory, configured via `BuildDirectory=`, and will be reused on subsequent runs. The overlay is still discarded for all other scripts. This option can be used to implement more advanced caching of builds, but can lead to unexpected states of the source directory. When using this option, a build directory must be configured. 💥💣💥 `Environment=`, `--environment=` : Adds variables to the environment that package managers and the prepare/build/postinstall/finalize scripts are executed with. Takes a space-separated list of variable assignments or just variable names. In the latter case, the values of those variables will be passed through from the environment in which **mkosi** was invoked. This option may be specified more than once, in which case all listed variables will be set. If the same variable is set twice, the later setting overrides the earlier one. `EnvironmentFiles=`, `--env-file=` : Takes a comma-separated list of paths to files that contain environment variable definitions to be added to the scripting environment. Uses `mkosi.env` if it is found in the local directory. The variables are first read from `mkosi.env` if it exists, then from the given list of files and then from the `Environment=` settings. `WithTests=`, `--without-tests`, `-T` : If set to false (or when the command-line option is used), the `$WITH_TESTS` environment variable is set to `0` when the `mkosi.build` scripts are invoked. This is supposed to be used by the build scripts to bypass any unit or integration tests that are normally run during the source build process. Note that this option has no effect unless the `mkosi.build` build scripts honor it. `WithNetwork=`, `--with-network=` : When true, enables network connectivity while the build scripts `mkosi.build` are invoked. By default, the build scripts run with networking turned off. The `$WITH_NETWORK` environment variable is passed to the `mkosi.build` build scripts indicating whether the build is done with or without network. `ProxyUrl=`, `--proxy-url=` : Configure a proxy to be used for all outgoing network connections. Various tools that **mkosi** invokes and for which the proxy can be configured are configured to use this proxy. **mkosi** also sets various well-known environment variables to specify the proxy to use for any programs it invokes that may need internet access. `ProxyExclude=`, `--proxy-exclude=` : Configure hostnames for which requests should not go through the proxy. Takes a comma-separated list of hostnames. `ProxyPeerCertificate=`, `--proxy-peer-certificate=` : Configure a file containing certificates used to verify the proxy. Defaults to the system-wide certificate store. Currently, setting a proxy peer certificate is only supported when **dnf** or **dnf5** is used to build the image. `ProxyClientCertificate=`, `--proxy-client-certificate=` : Configure a file containing the certificate used to authenticate the client with the proxy. Currently, setting a proxy client certificate is only supported when **dnf** or **dnf5** is used to build the image. `ProxyClientKey=`, `--proxy-client-key=` : Configure a file containing the private key used to authenticate the client with the proxy. Defaults to the proxy client certificate if one is provided. Currently, setting a proxy client key is only supported when **dnf** or **dnf5** is used to build the image. ### [Runtime] Section (previously known as the [Host] section) `NSpawnSettings=`, `--settings=` : Specifies a `.nspawn` settings file for **systemd-nspawn** to use in the `boot` and `shell` verbs, and to place next to the generated image file. This is useful to configure the **systemd-nspawn** environment when the image is run. If this setting is not used but an `mkosi.nspawn` file found in the local directory it is automatically used for this purpose. `VirtualMachineMonitor=`, `--vmm=` : Configures the virtual machine monitor to use. Takes one of `qemu` or `vmspawn`. Defaults to `qemu`. When set to `qemu`, the image is booted with **qemu**. Most output formats can be booted in **qemu**. Any arguments specified after the verb are appended to the **qemu** invocation and are interpreted as extra **qemu** command line arguments. When set to `vmspawn`, **systemd-vmspawn** is used to boot up the image, `vmspawn` only supports disk and directory type images. Any arguments specified after the verb are appended to the **systemd-vmspawn** invocation and are interpreted as extra vmspawn options and extra kernel command line arguments. `Console=`, `--console=` : Configures how to set up the console of the VM. Takes one of `interactive`, `read-only`, `native`, or `gui`. Defaults to `interactive`. `interactive` provides an interactive terminal interface to the VM. `read-only` is similar, but is strictly read-only, i.e. does not accept any input from the user. `native` also provides a TTY-based interface, but uses **qemu**'s native implementation (which means the **qemu** monitor is available). `gui` shows the **qemu** graphical UI. `CPUs=`, `--cpus=` : Configures the number of CPU cores to assign to the guest when booting a virtual machine. Defaults to `2`. When set to `0`, the number of CPUs available to the **mkosi** process will be used. `RAM=`, `--ram=` : Configures the amount of RAM assigned to the guest when booting a virtual machine. Defaults to `2G`. `KVM=`, `--kvm=` : Configures whether KVM acceleration should be used when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `Vsock=`, `--vsock=` : Configures whether to provision a vsock when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `VsockConnectionId=`, `vsock-cid=` : Configures the vsock connection ID to use when booting a virtual machine. Takes a number in the interval `[3, 0xFFFFFFFF)` or `hash` or `auto`. Defaults to `auto`. When set to `hash`, the connection ID will be derived from the full path to the image. When set to `auto`, **mkosi** will try to find a free connection ID automatically. Otherwise, the provided number will be used as is. `TPM=`, `--tpm=` : Configure whether to use a virtual TPM when booting a virtual machine. Takes a boolean value or `auto`. Defaults to `auto`. `CDROM=`, `--cdrom=` : Configures whether to attach the image as a CD-ROM device when booting a virtual machine. Takes a boolean. Defaults to `no`. `Removable=`, `--removable=` : Configures whether to attach the image as a removable device when booting a virtual machine. Takes a boolean. Defaults to `no`. `Firmware=`, `--firmware=` : Configures the virtual machine firmware to use. Takes one of `uefi`, `uefi-secure-boot`, `bios`, `linux`, or `auto`. Defaults to `auto`. When set to `uefi`, the OVMF firmware without secure boot support is used. When set to `uefi-secure-boot`, the OVMF firmware with secure boot support is used. When set to `bios`, the default SeaBIOS firmware is used. When set to `linux`, direct kernel boot is used. See the `Linux=` option for more details on which kernel image is used with direct kernel boot. When set to `auto`, `uefi-secure-boot` is used if possible and `linux` otherwise. `FirmwareVariables=`, `--firmware-variables=` : Configures the path to the the virtual machine firmware variables file to use. Currently, this option is only taken into account when the `uefi` or `uefi-secure-boot` firmware is used. If not specified, **mkosi** will search for the default variables file and use that instead. When set to `microsoft`, a firmware variables file with the Microsoft secure boot certificates already enrolled will be used. When set to `microsoft-mok`, a firmware variables file with the Microsoft secure boot certificates already enrolled will be extended with a `MokList` variable containing the secure boot certificate from `SecureBootCertificate=`. This is intended to be used together with shim binaries signed by the distribution and locally signed EFI binaries. When set to `custom`, the secure boot certificate from `SecureBootCertificate=` will be enrolled into the default firmware variables file. `virt-fw-vars` from the [virt-firmware](https://gitlab.com/kraxel/virt-firmware) project can be used to customize OVMF variable files. `Linux=`, `--linux=` : Set the kernel image to use for **qemu** direct kernel boot. If not specified, **mkosi** will use the kernel provided via the command line (`-kernel` option) or latest the kernel that was installed into the image (or fail if no kernel was installed into the image). Note that when the `cpio` output format is used, direct kernel boot is used regardless of the configured firmware. Depending on the configured firmware, **qemu** might boot the kernel itself or using the configured firmware. `Drives=`, `--drive=` : Add a drive. Takes a colon-delimited string of format `:[:[:[:]]]`. `id` specifies the ID assigned to the drive. This can be used as the `drive=` property in various **qemu** devices. `size` specifies the size of the drive. This takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `directory` optionally specifies the directory in which to create the file backing the drive. `options` optionally specifies extra comma-delimited properties which are passed verbatim to **qemu**'s `-drive` option. `file-id` specifies the ID of the file backing the drive. Drives with the same file ID will share the backing file. The directory and size of the file will be determined from the first drive with a given file ID. **Example usage:** ```ini [Runtime] Drives=btrfs:10G ext4:20G QemuArgs=-device nvme,serial=btrfs,drive=btrfs -device nvme,serial=ext4,drive=ext4 ``` `QemuArgs=` : Space-delimited list of additional arguments to pass when invoking **qemu**. `Ephemeral=`, `--ephemeral` : When used with the `shell`, `boot`, or `vm` verbs, this option runs the specified verb on a temporary snapshot of the output image that is removed immediately when the container terminates. Taking the temporary snapshot is more efficient on file systems that support reflinks natively (**btrfs** or **xfs**) than on more traditional file systems that do not (ext4). `Credentials=`, `--credential=` : Set credentials to be passed to **systemd-nspawn** or the virtual machine respectively when `mkosi shell/boot` or `mkosi vm` are used. This option takes a space separated list of values which can be either key=value pairs or paths. If a path is provided, if it is a file, the credential name will be the name of the file. If the file is executable, the credential value will be the output of executing the file. Otherwise, the credential value will be the contents of the file. If the path is a directory, the same logic applies to each file in the directory. Note that values will only be treated as paths if they do not contain the delimiter (`=`). `KernelCommandLineExtra=`, `--kernel-command-line-extra=` : Set extra kernel command line entries that are appended to the kernel command line at runtime when booting the image. When booting in a container, these are passed as extra arguments to systemd. When booting in a VM, these are appended to the kernel command line via the SMBIOS io.systemd.stub.kernel-cmdline-extra OEM string. This will only be picked up by **systemd-boot** and **systemd-stub** versions newer than or equal to v254. `RuntimeTrees=`, `--runtime-tree=` : Takes a colon-separated pair of paths. The first path refers to a directory to mount into any machine (container or VM) started by mkosi. The second path refers to the target directory inside the machine. If the second path is not provided, the directory is mounted at `/root/src` in the machine. If the second path is relative, it is interpreted relative to `/root/src` in the machine. For each mounted directory, the uid and gid of the user running mkosi are mapped to the root user in the machine. This means that all the files and directories will appear as if they're owned by root in the machine, and all new files and directories created by root in the machine in these directories will be owned by the user running mkosi on the host. Note that when using `mkosi vm` with this feature systemd v254 or newer has to be installed in the image. `RuntimeSize=`, `--runtime-size=` : If specified, disk images are grown to the specified size when they're booted with `mkosi boot` or `mkosi vm`. Takes a size in bytes. Additionally, the suffixes `K`, `M` and `G` can be used to specify a size in kilobytes, megabytes and gigabytes respectively. `RuntimeScratch=`, `--runtime-scratch=` : Takes a boolean value or `auto`. Specifies whether to mount extra scratch space to `/var/tmp`. If enabled, practically unlimited scratch space is made available under `/var/tmp` when booting the image with `mkosi vm`, `mkosi boot` or `mkosi shell`. Note that using this feature with `mkosi vm` requires systemd v254 or newer in the guest. `RuntimeNetwork=`, `--runtime-network=` : Takes one of `user`, `interface` or `none`. Defaults to `user`. Specifies the networking to set up when booting the image. `user` sets up usermode networking. `interface` sets up a virtual network connection between the host and the image. This translates to a veth interface for `mkosi shell` and `mkosi boot` and a tap interface for `mkosi vm` and `mkosi vmspawn`. Note that when using `interface`, **mkosi** does not automatically configure the host interface. It is expected that a recent version of **systemd-networkd** is running on the host which will automatically configure the host interface of the link. `RuntimeBuildSources=`, `--runtime-build-sources=` : Mount the build sources configured with `BuildSources=` and the build directory (if one is configured) to the same locations in `/work` that they were mounted to when running the build script when using `mkosi boot` or `mkosi vm`. `RuntimeHome=`, `--runtime-home=` : Mount the current home directory from which **mkosi** is running to `/root` when using `mkosi boot` or `mkosi vm`. `UnitProperties=`, `--unit-property=` : Configure systemd unit properties to add to the systemd scopes allocated when using `mkosi boot` or `mkosi vm`. These are passed directly to the `--property` options of **systemd-nspawn** and **systemd-run** respectively. `SshKey=`, `--ssh-key=` : Path to the X.509 private key in PEM format to use to connect to a virtual machine started with `mkosi vm` and built with the `Ssh=` option enabled via the `mkosi ssh` command. If not configured and `mkosi.key` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a key in `mkosi.key`. `SshCertificate=`, `--ssh-certificate=` : Path to the X.509 certificate in PEM format to provision as the SSH public key in virtual machines started with `mkosi vm`. If not configured and `mkosi.crt` exists in the working directory, it will automatically be used for this purpose. Run `mkosi genkey` to automatically generate a certificate in `mkosi.crt`. `Machine=`, `--machine=` : Specify the machine name to use when booting the image. Can also be used to refer to a specific image when SSH-ing into an image (e.g. `mkosi --image=myimage ssh`). Note that `Ephemeral=` has to be enabled to start multiple instances of the same image. `Register=`, `--register=` : Takes a boolean value or `auto`. Specifies whether to register the vm/container with systemd-machined. If enabled, mkosi will fail if it can't register the vm/container with systemd-machined. If disabled, mkosi will not register the vm/container with systemd-machined. If `auto`, mkosi will register the vm/container with systemd-machined if it is available. Defaults to `auto`. `ForwardJournal=`, `--forward-journal=` : Specify the path to which journal logs from containers and virtual machines should be forwarded. If the path has the `.journal` extension, it is interpreted as a file to which the journal should be written. Otherwise, the path is interpreted as a directory to which the journal should be written. Note that systemd v256 or newer is required in the virtual machine for log forwarding to work. Note that if a path with the `.journal` extension is given, the journal size is limited to `4G`. Configure an output directory instead of file if your workload produces more than `4G` worth of journal data. `SysupdateDirectory=`, `--sysupdate-directory=` : Path to a directory containing systemd-sysupdate transfer definition files that are used by `mkosi sysupdate`. If `mkosi.sysupdate/` exists in the local directory, it will be used for this purpose as well. Note that `mkosi sysupdate` invokes `systemd-sysupdate` with `--transfer-source=` set to the **mkosi** output directory. To make use of this in a transfer definition file, set `PathRelativeTo=explicit` to have the `Path=` setting for the transfer source be interpreted relative to the **mkosi** output directory. Generally, configuring `PathRelativeTo=explicit` and `Path=/` for the transfer source is sufficient for the match pattern to be interpreted relative to the **mkosi** output directory. ### [Match] Section `Profiles=` : Matches against the configured profiles. `Distribution=` : Matches against the configured distribution. `Release=` : Matches against the configured distribution release. If this condition is used and no distribution has been explicitly configured yet, the host distribution and release are used. `Architecture=` : Matches against the configured architecture. If this condition is used and no architecture has been explicitly configured yet, the host architecture is used. `Repositories=` : Matches against repositories enabled with the `Repositories=` setting. Takes a single repository name. `PathExists=` : This condition is satisfied if the given path exists. Relative paths are interpreted relative to the parent directory of the config file that the condition is read from. `ImageId=` : Matches against the configured image ID, supporting globs. If this condition is used and no image ID has been explicitly configured yet, this condition fails. `ImageVersion=` : Matches against the configured image version. Image versions can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. If this condition is used and no image version has been explicitly configured yet, this condition fails. `Bootable=` : Matches against the configured value for the `Bootable=` feature. Takes a boolean value or `auto`. `Format=` : Matches against the configured value for the `Format=` option. Takes an output format (see the `Format=` option). `SystemdVersion=` : Matches against the systemd version on the host (as reported by `systemctl --version`). Values can be prepended by the operators `==`, `!=`, `>=`, `<=`, `<`, `>` for rich version comparisons according to the UAPI group version format specification. If no operator is prepended, the equality operator is assumed by default. `BuildSources=` : Takes a build source target path (see `BuildSources=`). This match is satisfied if any of the configured build sources uses this target path. For example, if we have a `mkosi.conf` file containing: ```ini [Build] BuildSources=../abc/qed:kernel ``` and a drop-in containing: ```ini [Match] BuildSources=kernel ``` The drop-in will be included. Any absolute paths passed to this setting are interpreted relative to the current working directory. `HostArchitecture=` : Matches against the host's native architecture. See the `Architecture=` setting for a list of possible values. `ToolsTreeDistribution=` : Matches against the configured tools tree distribution. `ToolsTreeRelease=` : Matches against the configured tools tree release. `Environment=` : Matches against a specific key/value pair configured with `Environment=`. If no value is provided, check if the given key is in the environment regardless of which value it has. This table shows which matchers support globs, rich comparisons and the default value that is matched against if no value has been configured at the time the config file is read: | Matcher | Globs | Rich Comparisons | Default | |--------------------------|-------|------------------|----------------------------------------------------------------------------------------| | `Profiles=` | no | no | match fails | | `Distribution=` | no | no | match host distribution | | `Release=` | no | no | match host release | | `Architecture=` | no | no | match host architecture | | `PathExists=` | no | no | n/a | | `ImageId=` | yes | no | match fails | | `ImageVersion=` | no | yes | match fails | | `Bootable=` | no | no | match auto feature | | `Format=` | no | no | match default format | | `SystemdVersion=` | no | yes | n/a | | `BuildSources=` | no | no | match fails | | `HostArchitecture=` | no | no | n/a | | `ToolsTreeDistribution=` | no | no | match the fallback tools tree distribution (see `ToolsTreeDistribution=` in `[Build]`) | | `ToolsTreeRelease=` | no | no | match default tools tree release | | `Environment=` | no | no | n/a | ### [Include] `Include=`, `--include=`, `-I` : Include extra configuration from the given file or directory. The extra configuration is included immediately after parsing the setting, except when used on the command line, in which case the extra configuration is included after parsing all command line arguments. Note that each path containing extra configuration is only parsed once, even if included more than once with `Include=`. The builtin configs for the **mkosi** default initrd, default tools tree and default virtual machine image can be included by including the literal value `mkosi-initrd`, `mkosi-tools` or `mkosi-vm` respectively. Note: Include names starting with either of the literals `mkosi-` or `contrib-` are reserved for use by **mkosi** itself. ### [Config] Section `Profiles=`, `--profile=` : Select the given profiles. A profile is a configuration file or directory in the `mkosi.profiles/` directory. The configuration files and directories of each profile are included after parsing the `mkosi.conf.d/*.conf` drop in configuration. `Dependencies=`, `--dependency=` : The images that this image depends on specified as a comma-separated list. All images configured in this option will be built before this image. When this setting is specified for the "main" image, it specifies which subimages should be built. See the **Building multiple images** section for more information. `MinimumVersion=`, `--minimum-version=` : The minimum **mkosi** version required to build this configuration. If specified multiple times, the highest specified version is used. `ConfigureScripts=`, `--configure-script=` : Takes a comma-separated list of paths to executables that are used as the configure scripts for this image. See the **Scripts** section for more information. `PassEnvironment=`, `--pass-environment=` : Takes a list of environment variable names separated by spaces. When building multiple images, pass the listed environment variables to each individual subimage as if they were "universal" settings. See the **Building multiple images** section for more information. ### [UKIProfile] Section The `UKIProfile` section can be used in UKI profile config files which are passed to the `UnifiedKernelImageProfiles=` setting. The following settings can be specified in the `UKIProfile` section: `Profile=` : The contents of the `.profile` section of the UKI profile. Takes a list of key/value pairs separated by `=`. The `ID=` key must be specified. See the UKI [specification](https://uapi-group.org/specifications/specs/unified_kernel_image/#multi-profile-ukis) for a full list of possible keys. `Cmdline=` : Extra kernel command line options for the UKI profile. Takes a space delimited list of extra kernel command line arguments. Note that the final `.cmdline` section will the combination of the base `.cmdline` section and the extra kernel command line arguments specified with this setting. ## Specifiers The current value of various settings can be accessed when parsing configuration files by using specifiers. To write a literal `%` character in a configuration file without treating it as a specifier, use `%%`. The following specifiers are understood: | Setting | Specifier | |--------------------|-----------| | `Distribution=` | `%d` | | `Release=` | `%r` | | `Architecture=` | `%a` | | `Format=` | `%t` | | `Output=` | `%o` | | `OutputDirectory=` | `%O` | | `ImageId=` | `%i` | | `ImageVersion=` | `%v` | There are also specifiers that are independent of settings: | Specifier | Value | |-----------|------------------------------------------------| | `%C` | Parent directory of current config file | | `%P` | Current working directory | | `%D` | Directory that **mkosi** was invoked in | | `%I` | Name of the current subimage in `mkosi.images` | Finally, there are specifiers that are derived from a setting: | Specifier | Value | |-----------|-------------------------------------------------------| | `%F` | The default filesystem of the configured distribution | Note that the current working directory changes as **mkosi** parses its configuration. Specifically, each time **mkosi** parses a directory containing a `mkosi.conf` file, **mkosi** changes its working directory to that directory. Note that the directory that **mkosi** was invoked in is influenced by the `--directory=` command line argument. The following table shows example values for the directory specifiers listed above: | | `$D/mkosi.conf` | `$D/mkosi.conf.d/abc/abc.conf` | `$D/mkosi.conf.d/abc/mkosi.conf` | |------|-----------------|--------------------------------|----------------------------------| | `%C` | `$D` | `$D/mkosi.conf.d` | `$D/mkosi.conf.d/abc` | | `%P` | `$D` | `$D` | `$D/mkosi.conf.d/abc` | | `%D` | `$D` | `$D` | `$D` | ## Supported distributions Images may be created containing installations of the following distributions: * *Fedora Linux* * *Debian* * *Kali Linux* * *Ubuntu* * *Arch Linux* * *openSUSE* * *Mageia* * *CentOS* * *RHEL* * *RHEL UBI* * *OpenMandriva* * *Rocky Linux* * *Alma Linux* * *Azure Linux* * *None* (**Requires the user to provide a pre-built rootfs**) In theory, any distribution may be used on the host for building images containing any other distribution, as long as the necessary tools are available. Specifically, any distribution that packages **apt** may be used to build *Debian*, *Kali* or *Ubuntu* images. Any distribution that packages **dnf** may be used to build images for any of the RPM-based distributions. Any distro that packages **pacman** may be used to build *Arch Linux* images. Any distribution that packages **zypper** may be used to build *openSUSE* images. Other distributions and build automation tools for embedded Linux systems such as Buildroot, OpenEmbedded and Yocto Project may be used by selecting the `custom` distribution, and populating the rootfs via a combination of base trees, skeleton trees, and prepare scripts. Currently, *Fedora Linux* packages all relevant tools as of Fedora 28. Note that when not using a custom mirror, `RHEL` images can only be built from a host system with a `RHEL` subscription (established using e.g. `subscription-manager`). # Execution Flow Execution flow for `mkosi build`. Default values/calls are shown in parentheses. When building with `--incremental` **mkosi** creates a cache of the distribution installation if not already existing and replaces the distribution installation in consecutive runs with data from the cached one. 1. Parse CLI options 1. Parse configuration files 1. Run configure scripts (`mkosi.configure`) 1. If we're not running as root, unshare the user namespace and map the subuid range configured in `/etc/subuid` and `/etc/subgid` into it. 1. Unshare the mount namespace 1. Remount the following directories read-only if they exist: - `/usr` - `/etc` - `/opt` - `/srv` - `/boot` - `/efi` - `/media` - `/mnt` Then, for each image, we execute the following steps: 1. Copy sandbox trees into the workspace 1. Sync the package manager repository metadata 1. Run sync scripts (`mkosi.sync`) 1. Copy base trees (`--base-tree=`) into the image 1. Reuse a cached image if one is available 1. Copy a snapshot of the package manager repository metadata into the image 1. Copy skeleton trees (`mkosi.skeleton`) into image 1. Install distribution and packages into image 1. Run prepare scripts on image with the `final` argument (`mkosi.prepare`) 1. Install build packages in overlay if any build scripts are configured 1. Run prepare scripts on overlay with the `build` argument if any build scripts are configured (`mkosi.prepare`) 1. Cache the image if configured (`--incremental`) 1. Run build scripts on image + overlay if any build scripts are configured (`mkosi.build`) 1. Finalize the build if the output format `none` is configured 1. Copy the build scripts outputs into the image 1. Copy the extra trees into the image (`mkosi.extra`) 1. Run post-install scripts (`mkosi.postinst`) 1. Write config files required for `Ssh=`, `Autologin=` and `MakeInitrd=` 1. Install systemd-boot and configure secure boot if configured (`--secure-boot`) 1. Run **systemd-sysusers** 1. Run **systemd-tmpfiles** 1. Run `systemctl preset-all` 1. Run **depmod** 1. Run **systemd-firstboot** 1. Run **systemd-hwdb** 1. Remove packages and files (`RemovePackages=`, `RemoveFiles=`) 1. Run SELinux relabel is a SELinux policy is installed 1. Run finalize scripts (`mkosi.finalize`) 1. Generate unified kernel image if configured to do so 1. Generate final output format 1. Run post-output scripts (`mkosi.postoutput`) # Scripts To allow for image customization that cannot be implemented using **mkosi**'s builtin features, **mkosi** supports running scripts at various points during the image build process that can customize the image as needed. Scripts are executed on the host system as root (either real root or root within the user namespace that **mkosi** created when running unprivileged) with a customized environment to simplify modifying the image. For each script, the configured build sources (`BuildSources=`) are mounted into the current working directory before running the script in the current working directory. `$SRCDIR` is set to point to the current working directory. The following scripts are supported: * If **`mkosi.configure`** (`ConfigureScripts=`) exists, it is executed before building the image. This script may be used to dynamically modify the configuration. It receives the configuration serialized as JSON on stdin and should output the modified configuration serialized as JSON on stdout. Note that this script only runs when building or booting the image (`build`, `vm`, `boot` and `shell` verbs). If a default tools tree is configured, it will be built before running the configure scripts and the configure scripts will run with the tools tree available. This also means that the modifications made by configure scripts will not be visible in the `summary` output. * If **`mkosi.sync`** (`SyncScripts=`) exists, it is executed before the image is built. This script may be used to update various sources that are used to build the image. One use case is to run `git pull` on various source repositories before building the image. Specifically, the `BuildSourcesEphemeral=` setting does not apply to sync scripts, which means sync scripts can be used to update build sources even if `BuildSourcesEphemeral=` is enabled. * If **`mkosi.prepare`** (`PrepareScripts=`) exists, it is first called with the `final` argument, right after the software packages are installed. It is called a second time with the `build` command line parameter, right after the build packages are installed and the build overlay mounted on top of the image's root directory . This script has network access and may be used to install packages from other sources than the distro's package manager (e.g. **pip**, **npm**, ...), after all software packages are installed but before the image is cached (if incremental mode is enabled). In contrast to a general purpose installation, it is safe to install packages to the system (`pip install`, `npm install -g`) instead of in `$SRCDIR` itself because the build image is only used for a single project and can easily be thrown away and rebuilt so there's no risk of conflicting dependencies and no risk of polluting the host system. * If **`mkosi.build`** (`BuildScripts=`) exists, it is executed with the build overlay mounted on top of the image's root directory. When running the build script, `$DESTDIR` points to a directory where the script should place any files generated it would like to end up in the image. Note that **make**-, **automake**-, and **meson**-based build systems generally honor `$DESTDIR`, thus making it very natural to build *source* trees from the build script. After running the build script, the contents of `$DESTDIR` are copied into the image. * If **`mkosi.postinst`** (`PostInstallationScripts=`) exists, it is executed after the (optional) build tree and extra trees have been installed. This script may be used to alter the images without any restrictions, after all software packages and built sources have been installed. * If **`mkosi.finalize`** (`FinalizeScripts=`) exists, it is executed as the last step of preparing an image. * If **`mkosi.postoutput`** (`PostOutputScripts=`) exists, it is executed right after all the output files have been generated, before they are finally moved into the output directory. This can be used to generate additional or alternative outputs, e.g. `SHA256FILES` or SBOM manifests. * If **`mkosi.clean`** (`CleanScripts=`) exists, it is executed right after the outputs of a previous build have been cleaned up. A clean script can clean up any outputs that **mkosi** does not know about (e.g. artifacts from `SplitArtifacts=partitions` or RPMs built in a build script). Note that this script does not use the tools tree even if one is configured. * If **`mkosi.version`** exists and is executable, it is run during configuration parsing and populates `ImageVersion=` with the output on stdout. This can be used for external version tracking, e.g. with `git describe` or `date '+%Y-%m-%d'`. Note that this script is executed on the host system without any sandboxing. * If **`mkosi.rootpw`** exists and is executable, it is run during configuration parsing and populates `RootPassword=` with the output on stdout. This can be used to randomly generate a password and can be remembered by outputting it to stderr or by reading `$MKOSI_CONFIG` in another script (e.g. `mkosi.postoutput`). Note that this script is executed on the host system without any sandboxing. If a script uses the `.chroot` extension, **mkosi** will chroot into the image using **mkosi-chroot** (see below) before executing the script. For example, if `mkosi.postinst.chroot` exists, **mkosi** will chroot into the image and execute it as the post-installation script. Instead of a single file script, **mkosi** will also read all files in lexicographical order from appropriately named `.d` directories, e.g. all files in a `mkosi.build.d` would be used as build scripts. This is supported by * `mkosi.sync.d`, * `mkosi.prepare.d`, * `mkosi.build.d`, * `mkosi.postinst.d`, * `mkosi.finalize.d`, * `mkosi.postoutput.d`, and * `mkosi.clean.d`. This can be combined with the `.chroot` extension, e.g. `mkosi.build.d/01-foo.sh` would be run without chrooting into the image and `mkosi.build.d/02-bar.sh.chroot` would be run after chrooting into the image first. Scripts executed by **mkosi** receive the following environment variables: * `$ARCHITECTURE` contains the architecture from the `Architecture=` setting. If `Architecture=` is not set, it will contain the native architecture of the host machine. See the documentation of `Architecture=` for possible values for this variable. * `$QEMU_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by **qemu**. Useful for finding the qemu binary ( `qemu-system-$QEMU_ARCHITECTURE`). * `$DISTRIBUTION` contains the distribution from the `Distribution=` setting. * `$RELEASE` contains the release from the `Release=` setting. * `$DISTRIBUTION_ARCHITECTURE` contains the architecture from `$ARCHITECTURE` in the format used by the configured distribution. * `$PROFILES` contains the profiles from the `Profiles=` setting as a comma-delimited string. * `$CACHED=` is set to `1` if a cached image is available, `0` otherwise. * `$CHROOT_SCRIPT` contains the path to the running script relative to the image root directory. The primary usecase for this variable is in combination with the **mkosi-chroot** script. See the description of **mkosi-chroot** below for more information. * `$SRCDIR` contains the path to the directory **mkosi** was invoked from, with any configured build sources mounted on top. `$CHROOT_SRCDIR` contains the value that `$SRCDIR` will have after invoking **mkosi-chroot**. * `$BUILDDIR` is only defined if `mkosi.builddir` exists and points to the build directory to use. This is useful for all build systems that support out-of-tree builds to reuse already built artifacts from previous runs. `$CHROOT_BUILDDIR` contains the value that `$BUILDDIR` will have after invoking **mkosi-chroot**. * `$DESTDIR` is a directory into which any installed software generated by a build script may be placed. This variable is only set when executing a build script. `$CHROOT_DESTDIR` contains the value that `$DESTDIR` will have after invoking **mkosi-chroot**. * `$OUTPUTDIR` points to the staging directory used to store build artifacts generated during the build. `$CHROOT_OUTPUTDIR` contains the value that `$OUTPUTDIR` will have after invoking **mkosi-chroot**. * `$PACKAGEDIR` points to the directory containing the local package repository. Build scripts can add more packages to the local repository by writing the packages to `$PACKAGEDIR`. * `$ARTIFACTDIR` points to the directory that is used to pass around build artifacts generated during the build and make them available for use by mkosi. This is similar to `PACKAGEDIR`, but is meant for artifacts that may not be packages understood by the package manager, e.g. initrds created by other initrd generators than mkosi. Build scripts can add more artifacts to the directory by placing them in `$ARTIFACTDIR`. Files in this directory are only available for the current build and are not copied out like the contents of `$OUTPUTDIR`. **mkosi** will also use certain subdirectories of an artifacts directory to automatically use their contents at certain steps. Currently the following two subdirectories in the artifact directory are used by mkosi: - `io.mkosi.microcode`: All files in this directory are used as microcode files, i.e. they are prepended to the initrds in lexicographical order. - `io.mkosi.initrd`: All files in this directory are used as initrds and joined in lexicographical order. It is recommended, that users of `$ARTIFACTDIR` put things for their own use in a similar namespaced directory, e.g. `local.my.namespace`. * `$BUILDROOT` is the root directory of the image being built, optionally with the build overlay mounted on top depending on the script that's being executed. * `$WITH_DOCS` is either `0` or `1` depending on whether a build without or with installed documentation was requested (`WithDocs=yes`). A build script should suppress installation of any package documentation to `$DESTDIR` in case `$WITH_DOCS` is set to `0`. * `$WITH_TESTS` is either `0` or `1` depending on whether a build without or with running the test suite was requested (`WithTests=no`). A build script should avoid running any unit or integration tests in case `$WITH_TESTS` is `0`. * `$WITH_NETWORK` is either `0` or `1` depending on whether a build without or with networking is being executed (`WithNetwork=no`). A build script should avoid any network communication in case `$WITH_NETWORK` is `0`. * `$SOURCE_DATE_EPOCH` is defined if requested (`SourceDateEpoch=TIMESTAMP`, `Environment=SOURCE_DATE_EPOCH=TIMESTAMP` or the host environment variable `$SOURCE_DATE_EPOCH`). This is useful to make builds reproducible. See [SOURCE_DATE_EPOCH](https://reproducible-builds.org/specs/source-date-epoch/) for more information. * `$MKOSI_UID` and `$MKOSI_GID` respectively are the uid, gid of the user that invoked mkosi. * `$MKOSI_CONFIG` is a file containing a json summary of the settings of the current image. This file can be parsed inside scripts to gain access to all settings for the current image. * `$IMAGE_ID` contains the identifier from the `ImageId=` or `--image-id=` setting. * `$IMAGE_VERSION` contains the version from the `ImageVersion=` or `--image-version=` setting. Consult this table for which script receives which environment variables: | Variable | `configure` | `sync` | `prepare` | `build` | `postinst` | `finalize` | `postoutput` | `clean` | |-----------------------------|:-----------:|:------:|:---------:|:-------:|:----------:|:----------:|:------------:|:-------:| | `ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `QEMU_ARCHITECTURE` | ✓ | | | | | | | | | `DISTRIBUTION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `DISTRIBUTION_ARCHITECTURE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `RELEASE` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `PROFILES` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | ✓ | | `CACHED` | | ✓ | | | | | | | | `CHROOT_SCRIPT` | | | ✓ | ✓ | ✓ | ✓ | | | | `SRCDIR` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `CHROOT_SRCDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `BUILDDIR` | | | | ✓ | ✓ | ✓ | | | | `CHROOT_BUILDDIR` | | | | ✓ | | | | | | `DESTDIR` | | | | ✓ | | | | | | `CHROOT_DESTDIR` | | | | ✓ | | | | | | `OUTPUTDIR` | | | | | ✓ | ✓ | ✓ | ✓ | | `CHROOT_OUTPUTDIR` | | | | | ✓ | ✓ | | | | `BUILDROOT` | | | ✓ | ✓ | ✓ | ✓ | | | | `PACKAGEDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `ARTIFACTDIR` | | | ✓ | ✓ | ✓ | ✓ | | | | `WITH_DOCS` | | | ✓ | ✓ | | | | | | `WITH_TESTS` | | | ✓ | ✓ | | | | | | `WITH_NETWORK` | | | ✓ | ✓ | ✓ | ✓ | | | | `SOURCE_DATE_EPOCH` | | | ✓ | ✓ | ✓ | ✓ | | ✓ | | `MKOSI_UID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_GID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `MKOSI_CONFIG` | | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_ID` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | `IMAGE_VERSION` | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | Additionally, when a script is executed, a few scripts are made available via `$PATH` to simplify common usecases. * **mkosi-chroot**: This script will chroot into the image and execute the given command. On top of chrooting into the image, it will also mount various files and directories (`$SRCDIR`, `$DESTDIR`, `$BUILDDIR`, `$OUTPUTDIR`, `$CHROOT_SCRIPT`) into the image and modify the corresponding environment variables to point to the locations inside the image. It will also mount APIVFS filesystems (`/proc`, `/dev`, ...) to make sure scripts and tools executed inside the chroot work properly. It also propagates `/etc/resolv.conf` from the host into the chroot if requested so that DNS resolution works inside the chroot. After the mkosi-chroot command exits, various mount points are cleaned up. For example, to invoke **ls** inside of the image, use the following: ```sh mkosi-chroot ls ... ``` To execute the entire script inside the image, add a `.chroot` suffix to the name (`mkosi.build.chroot` instead of `mkosi.build`, etc.). * For all of the supported package managers (**dnf**, **rpm**, **apt**, **dpkg**, **pacman**, **zypper**), scripts of the same name are put into `$PATH` that make sure these commands operate on the image's root directory with the configuration supplied by the user instead of on the host system. This means that from a script, you can do e.g. `dnf install vim` to install vim into the image. Additionally, `mkosi-install`, `mkosi-reinstall`, `mkosi-upgrade` and `mkosi-remove` will invoke the corresponding operation of the package manager being used to built the image. * **git** is automatically invoked with `safe.directory=*` to avoid permissions errors when running as the root user in a user namespace. * **useradd** and **groupadd** are automatically invoked with `--root=$BUILDROOT` when executed outside of the image. When scripts are executed, any directories that are still writable are also made read-only (`/home`, `/var`, `/root`, ...) and only the minimal set of directories that need to be writable remain writable. This is to ensure that scripts can't mess with the host system when **mkosi** is running as root. Note that when executing scripts, all source directories are made ephemeral which means all changes made to source directories while running scripts are thrown away after the scripts finish executing. Use the output, build or cache directories if you need to persist data between builds. # Files To make it easy to build images for development versions of your projects, **mkosi** can read configuration data from the local directory, under the assumption that it is invoked from a *source* tree. Specifically, the following files are used if they exist in the local directory: * The **`mkosi.skeleton/`** directory or **`mkosi.skeleton.tar`** archive may be used to insert files into the image. The files are copied *before* the distribution packages are installed into the image. This allows creation of files that need to be provided early, for example to configure the package manager or set systemd presets. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.extra/`** directory or **`mkosi.extra.tar`** archive may be used to insert additional files into the image, on top of what the distribution includes in its packages. They are similar to `mkosi.skeleton/` and `mkosi.skeleton.tar`, but the files are copied into the directory tree of the image *after* the OS was installed. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.sandbox/`** directory or **`mkosi.sandbox.tar`** archive may be used to configure the package manager without the files being inserted into the image. If the files should be included in the image `mkosi.skeleton/` and `mkosi.skeleton.tar` should be used instead. When using the directory, file ownership is not preserved: all files copied will be owned by root. To preserve ownership, use a tar archive. * The **`mkosi.nspawn`** nspawn settings file will be copied into the same place as the output image file, if it exists. This is useful since nspawn looks for settings files next to image files it boots, for additional container runtime settings. * The **`mkosi.cache/`** directory, if it exists, is automatically used as package download cache, in order to speed repeated runs of the tool. * The **`mkosi.builddir/`** directory, if it exists, is automatically used as out-of-tree build directory, if the build commands in the `mkosi.build` scripts support it. Specifically, this directory will be mounted into the build container, and the `$BUILDDIR` environment variable will be set to it when the build scripts are invoked. A build script may then use this directory as build directory, for **automake**-style or **ninja**-style out-of-tree builds. This speeds up builds considerably, in particular when **mkosi** is used in incremental mode (`-i`): not only the image and build overlay, but also the build tree is reused between subsequent invocations. Note that if this directory does not exist the `$BUILDDIR` environment variable is not set, and it is up to the build scripts to decide whether to do an in-tree or an out-of-tree build, and which build directory to use. * The **`mkosi.rootpw`** file can be used to provide the password for the root user of the image. If the password is prefixed with `hashed:` it is treated as an already hashed root password. The password may optionally be followed by a newline character which is implicitly removed. The file must have an access mode of 0600 or less. If this file does not exist, the distribution's default root password is set (which usually means access to the root user is blocked). * The **`mkosi.passphrase`** file provides the passphrase to use when LUKS encryption is selected. It should contain the passphrase literally, and not end in a newline character (i.e. in the same format as **cryptsetup** and `/etc/crypttab` expect the passphrase files). The file must have an access mode of 0600 or less. * The **`mkosi.crt`** and **`mkosi.key`** files contain an X.509 certificate and PEM private key to use when signing is required (UEFI SecureBoot, verity, ...). * The **`mkosi.output/`** directory is used to store all build artifacts. * The **`mkosi.credentials/`** directory is used as a source of extra credentials similar to the `Credentials=` option. For each file in the directory, the filename will be used as the credential name and the file contents become the credential value, or, if the file is executable, **mkosi** will execute the file and the command's output to stdout will be used as the credential value. Output to stderr will be ignored. Credentials configured with `Credentials=` take precedence over files in `mkosi.credentials`. * The **`mkosi.repart/`** directory is used as the source for **systemd-repart** partition definition files which are passed to **systemd-repart** when building a disk image. If it does not exist and the `RepartDirectories=` setting is not configured, **mkosi** will default to the following partition definition files: `00-esp.conf` (if we're building a bootable image): ```ini [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=512M SizeMaxBytes=512M ``` `05-bios.conf` (if we're building a BIOS bootable image): ```ini [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M ``` `10-root.conf` ```ini [Partition] Type=root Format= CopyFiles=/ Minimize=guess ``` Note that if either `mkosi.repart/` is found or `RepartDirectories=` is used, we will not use any of the default partition definitions. All these files are optional. Note that the location of all these files may also be configured during invocation via command line switches, and as settings in `mkosi.conf`, in case the default settings are not acceptable for a project. # CACHING **mkosi** supports three different caches for speeding up repetitive re-building of images. Specifically: 1. The package cache of the distribution package manager may be cached between builds. This is configured with the `--cache-directory=` option or the `mkosi.cache/` directory. This form of caching relies on the distribution's package manager, and caches distribution packages (RPM, deb, …) after they are downloaded, but before they are unpacked. 2. If the incremental build mode is enabled with `--incremental`, cached copies of the final image and build overlay are made immediately before the build sources are copied in (for the build overlay) or the artifacts generated by `mkosi.build` are copied in (in case of the final image). This form of caching allows bypassing the time-consuming package unpacking step of the distribution package managers, but is only effective if the list of packages to use remains stable, but the build sources and its scripts change regularly. Note that this cache requires manual flushing: whenever the package list is modified the cached images need to be explicitly removed before the next re-build, using the `-f` switch. 3. Finally, between multiple builds the build artifact directory may be shared, using the `mkosi.builddir/` directory. This directory allows build systems such as Meson to reuse already compiled sources from a previous built, thus speeding up the build process of a `mkosi.build` build script. The package cache and incremental mode are unconditionally useful. The final cache only apply to uses of **mkosi** with a source tree and build script. When all three are enabled together turn-around times for complete image builds are minimal, as only changed source files need to be recompiled. # Building multiple images If the `mkosi.images/` directory exists, **mkosi** will load individual subimage configurations from it and build each of them. Image configurations can be either directories containing **mkosi** configuration files or regular files with the `.conf` extension. When image configurations are found in `mkosi.images/`, **mkosi** will build the images specified in the `Dependencies=` setting of the main image and all of their dependencies (or all of them if no images were explicitly configured using `Dependencies=` in the main image configuration). To add dependencies between subimages, the `Dependencies=` setting can be used as well. Subimages are always built before the main image. When images are defined, **mkosi** will first read the main image configuration (configuration outside of the `mkosi.images/` directory), followed by the image specific configuration. Several "universal" settings apply to the main image and all its subimages and cannot be configured separately in subimages. The following settings are universal and cannot be configured in subimages: - `Architecture=` - `BuildDirectory=` - `BuildSources=` - `BuildSourcesEphemeral=` - `CacheDirectory=` - `CacheOnly=` - `Distribution=` - `ExtraSearchPaths=` - `Incremental=` - `LocalMirror=` - `Mirror=` - `OutputDirectory=` - `OutputMode=` - `PackageCacheDirectory=` - `PackageDirectories=` - `Profiles=` - `ProxyClientCertificate=` - `ProxyClientKey=` - `ProxyExclude=` - `ProxyPeerCertificate=` - `ProxyUrl=` - `Release=` - `RepartOffline=` - `Repositories=` - `RepositoryKeyCheck=` - `SandboxTrees=` - `SourceDateEpoch=` - `ToolsTree=` - `ToolsTreeCertificates=` - `UseSubvolumes=` - `SecureBootCertificate=` - `SecureBootCertificateSource=` - `SecureBootKey=` - `SecureBootKeySource=` - `VerityCertificate=` - `VerityCertificateSource=` - `VerityKey=` - `VerityKeySource=` - `SignExpectedPcrCertificate=` - `SignExpectedPcrCertificateSource=` - `SignExpectedPcrKey=` - `SignExpectedPcrKeySource=` - `VolatilePackageDirectories=` - `WithNetwork=` - `WithTests` - `WorkspaceDirectory=` There are also settings which are passed down to subimages but can be overridden. For these settings, values configured explicitly in the subimage will take priority over values configured on the CLI or in the main image config. Currently the following settings are passed down to subimages but can be overridden: - `ImageId=` - `ImageVersion=` - `SectorSize=` Images can refer to outputs of images they depend on. Specifically, for the following options, **mkosi** will only check whether the inputs exist just before building the image: - `BaseTrees=` - `ExtraTrees=` - `Initrds=` To refer to outputs of a image's dependencies, simply configure any of these options with a relative path to the output to use in the output directory of the dependency. Or use the `%O` specifier to refer to the output directory. A good example on how to build multiple images can be found in the [systemd](https://github.com/systemd/systemd/tree/main/mkosi.images) repository. # ENVIRONMENT VARIABLES * `$MKOSI_LESS` overrides options for **less** when it is invoked by **mkosi** to page output. * `$MKOSI_DNF` can be used to override the executable used as **dnf**. This is particularly useful to select between **dnf** and **dnf5**. * `$EPEL_MIRROR` can be used to override the default mirror location used for the epel repositories when `Mirror=` is used. By default **mkosi** looks for the epel repositories in the `fedora` subdirectory of the parent directory of the mirror specified in `Mirror=`. For example if the mirror is set to `https://mirror.net/centos-stream` **mkosi** will look for the epel repositories in `https://mirror.net/fedora/epel`. * `SYSEXT_SCOPE` and `CONFEXT_SCOPE` can be used to override the default value of the respective `extension-release` file when building a sysext or confext. By default the value is set to `initrd system portable`. # EXAMPLES Create and run a raw *GPT* image with *ext4*, as `image.raw`: ```console # mkosi -p systemd --incremental boot ``` Create and run a bootable *GPT* image, as `foobar.raw`: ```console $ mkosi -d fedora -p kernel-core -p systemd -p systemd-boot -p udev -o foobar.raw # mkosi --output foobar.raw boot $ mkosi --output foobar.raw vm ``` Create and run a *Fedora Linux* image in a plain directory: ```console # mkosi --distribution fedora --format directory boot ``` Create a compressed image `image.raw.xz` with SSH installed and add a checksum file: ```console $ mkosi --distribution fedora --format disk --checksum --compress-output --package=openssh-clients ``` Inside the source directory of an **automake**-based project, configure **mkosi** so that simply invoking **mkosi** without any parameters builds an OS image containing a built version of the project in its current state: ```console $ cat >mkosi.conf <mkosi.build <, include /path/to/mkosi flags=(default_allow) { userns, } ``` # Frequently Asked Questions (FAQ) - Why does `mkosi vm` with KVM not work on Debian/Kali/Ubuntu? While other distributions are OK with allowing access to `/dev/kvm`, on Debian/Kali/Ubuntu this is only allowed for users in the `kvm` group. Because **mkosi** unshares a user namespace when running unprivileged, even if the calling user was in the kvm group, when **mkosi** unshares the user namespace to run unprivileged, it loses access to the `kvm` group and by the time we start **qemu** we don't have access to `/dev/kvm` anymore. As a workaround, you can change the permissions of the device nodes to `0666` which is sufficient to make KVM work unprivileged. To persist these settings across reboots, copy `/usr/lib/tmpfiles.d/static-nodes-permissions.conf` to `/etc/tmpfiles.d/static-nodes-permissions.conf` and change the mode of `/dev/kvm` from `0660` to `0666`. - How do I add a regular user to an image? You can use the following snippet in a post-installation script: ```sh useradd --create-home --user-group $USER --password "$(openssl passwd -stdin -6 <$USER_PASSWORD_FILE)" ``` Note that from systemd v256 onwards, if enabled, **systemd-homed-firstboot.service** will prompt to create a regular user on first boot if there are no regular users. - Why do I see failures to chown files when building images? When not running as root, your user is not able to change ownership of files to arbitrary owners. Various distributions still ship files in their packages that are not owned by the root user. When not running as root, mkosi maps the current user to root when invoking package managers, which means that changing ownership to root will work but changing ownership to any other user or group will fail. Note that chown calls are only suppressed when running package managers, but not when running scripts. If this is required, e.g. for a build script, you can set the `MKOSI_CHROOT_SUPPRESS_CHOWN` variable to a true value (`1`, `yes`, `true`) to suppress chown calls in **mkosi-chroot** and `.chroot` scripts. If this behavior causes applications running in your image to misbehave, you can consider running **mkosi** as root which avoids this problem. Alternatively, if running **mkosi** as root is not desired, you can use `unshare --map-auto --map-current-user --setuid 0 --setgid 0` to become root in a user namespace with more than one user assuming the UID/GID mappings in `/etc/subuid` and `/etc/subgid` are configured correctly. Note that running mkosi as root or with `unshare` means that all output files produced by **mkosi** will not be owned by your current user anymore. Note that for systemd services that need directories in `/var` owned by the service user and group, an alternative to shipping these directories in packages or creating them via systemd-tmpfiles is to use `StateDirectory=`, `CacheDirectory=` or `LogsDirectory=` in the service file which instructs systemd to create the directory when it first starts the service. Alternatively, the `z` or `Z` directives for `systemd-tmpfiles` can be used to chown various directories and files to their owning user when the system first boots up. - Why does `portablectl inspect `/`systemd-dissect ` say my portable service isn't one? `systemd-dissect` and`portablectl inspect` check for `PORTABLE_PREFIXES=` in `os-release` and if the key is missing, will fail to recognise a portable service as one, showing ✗ under *Use as* for in the case of `systemd-dissect` or `n/a` under *Portable Service* for `portablectl`. Since there is no good default to set for this key and the generated portable service images will still attach properly, even when the key is not set, **mkosi** doesn't set one. You can set `PORTABLE_PREFIXES=` in the `os-release` file yourself in a postinst script. # REFERENCES * [Primary mkosi git repository on GitHub](https://github.com/systemd/mkosi/) * [mkosi — A Tool for Generating OS Images](https://0pointer.net/blog/mkosi-a-tool-for-generating-os-images.html) introductory blog post by Lennart Poettering * [The mkosi OS generation tool](https://lwn.net/Articles/726655/) story on LWN # SEE ALSO **systemd-nspawn**(1), **systemd-repart**(8), **dnf**(8) mkosi-25.3/mkosi/resources/man/mkosi.news.7.md000066400000000000000000002032021474711424400212670ustar00rootroot00000000000000% mkosi.news(7) % % # mkosi Changelog ## v25 - Instead of using bubblewrap, sandboxing is now done with a new tool `mkosi-sandbox`. This tool has a public API and can be used independently of mkosi. - Image builds are now done in a user namespace with a single user when running unprivileged instead of using newuidmap/newgidmap. When running unprivileged, all files and directories in the image will be owned by the invoking user (and by root inside any produced archives). Any attempt to chown files to other users in scripts will fail unless the new environment variable `$MKOSI_CHROOT_SUPPRESS_CHOWN` is set to a true value. - `mkosi` does not drop privileges anymore to the invoking user when running as root for various steps. - A new `cat-config` verb will show all configuration files that were included for each configured image. - Added support for Azure Linux - Added support for Kali Linux - If `mkosi.version` is executable, we now execute it and read the version from stdout. - Added `--wipe-build-dir` to wipe the build directory before rebuilding the image. - Introduced `RepositoryKeyFetch=` to control whether to fetch distribution GPG keys remotely. This setting is **disabled** by default for security reasons except when building rpm based or Arch Linux images on Ubuntu. - We now handle `SIGHUP` gracefully - Universal settings that take a collection of values cannot be appended to anymore in subimages. Usage of package manager trees in subimages will have to be moved to the top level image. Similarly, repositories will have to be enabled in the top level image. - Repository metadata is not copied into images anymore. - Repository metadata from base trees is not used anymore. - Package manager trees are now named sandbox trees. - Package manager trees (sandbox trees) do not use the skeleton trees as their default anymore if unset. - Note to packagers: The manual pages have been moved to resources/man and now include man pages for mkosi-initrd and mkosi-sandbox as well. - `InitrdInclude=` was removed. If you're using `InitrdInclude=`, please build your initrd via a subimage in `mkosi.images` containing `Include=mkosi-initrd` and any customizations you wish to add and use the `Initrds=` setting to use it as the initrd for the main image instead of the default initrd. - Added `History=` to have mkosi save the config used to build the image and reuse it when verbs such as `qemu`, `boot`, … are invoked without `-f`. - Introduced new `[Build]` section and moved various settings to it. - Moved `Include=` to `[Include]` section - Added `sysupdate` verb as a wrapper around `systemd-sysupdate` which invokes it with definitions from `mkosi.sysupdate`. - Added `RuntimeHome=` to mount the current home directory to `/root` when running a command that boots the image - More directories aside from `/etc` and `/usr` are now picked up from sandbox trees (formerly known as package manager trees). - Profile configuration from `mkosi.profiles` is now parsed after `mkosi.conf.d` instead of before it. To set defaults for use in `mkosi.conf.d` based on the configured profile, use an early dropin in `mkosi.conf.d` that matches on the configured profile instead. - `Profile=` is renamed to `Profiles=` and takes a comma separated list of profiles now. Scripts now receive `$PROFILES` with a space-separated list of profiles instead of `$PROFILE`. The `%p` specifier for profiles is removed. - Multiple sync, prepare, build, postinst, finalize, postoutput and clean scripts are now picked up from `mkosi.$SCRIPT.d`. - `run0` is now automatically used to escalate privileges for commands that need it, like the `burn` verb. - `/usr/share/keyrings` and `/usr/share/distribution-gpg-keys` are no longer automatically picked up from the tools tree when `ToolsTreeCertificates=` is set, since they aren't certificates, use a sandbox tree instead. This allows one to override `SignedBy=` keys for APT repositories. - The `agetty.autologin` and `login.noauth` credentials are no longer set unconditionally. - Access to the output directory in build scripts was removed. To put artifacts from the build directory into the output directory, copy them from the build directory to the output directory in a post-installation script which does have access to the build directory and the output directory. - `BuildDirectory=` is no longer available in `PrepareScripts=`. If you need to acquire some files for the build process place them somewhere sensible within `$BUILDROOT` so that they can be cached when building incrementally. - When using a tools tree and a relaxed sandbox is used to run a command (qemu, nspawn, ...), we now keep all entries from `$PATH` outside of `/usr` intact. Note that this may cause issues if a `$PATH` entry contains binaries linked against libraries in `/usr` from the host. - Introduced a new specifier `%I` which resolves to the name of the current subimage when used in a config under `mkosi.images/`. This differs to `%o` as it is always the name of the config file without extension (or the name of the directory). - If `/dev/fuse` is found in the host context, it is made available in the sandbox context too. - Added a `sandbox` verb to run a command within a relaxed mkosi sandbox (the same sandbox that `mkosi vm`, `mkosi boot`, ... run in). - OpenSSL providers are now supported as key sources for the various key settings if a recent enough systemd version (257 or newer) is used. - Added support for loading X.509 certificates from OpenSSL providers if a recent enough systemd version (257 or newer) is used. - Added `ToolsTreePackageDirectories=` - Added `--kernel-image=` to `mkosi-initrd` to specify the kernel image to use when building a UKI. - Setting a collection based setting to the empty string via the CLI and then appending to the same setting will now override the settings coming from configuration files, whereas previously the CLI values would be appended to the values from configuration files. - The `mkosi-initrd` default config now includes various extra kernel modules by default. - The `coredumpctl` and `journalctl` verbs will now always operate on the image, even if `ForwardJournal=` is configured. - Bumped default Fedora release to `41`. - Added `addon` output format to build UKI addons. - Renamed `[Host]` section to `[Runtime]` section. - Renamed various settings from `[Host]`. - Binaries coming from `ExtraSearchPaths=` are now executed with the tools tree mounted if one is configured (unlike before where the tools tree was not mounted). This means that any binaries coming from `ExtraSearchPaths=` have to be linked against libraries from the tools tree (or have to be statically linked). Alternatively, the tools tree distribution and release have to match the host. - Binaries from `ExtraSearchPaths=` are not used anymore when building the default tools tree. - Dropped support for `pesign` as a secure boot signing tool. - Added support for `systemd-sbsign` as a secure boot signing tool. - Added `--register=` to control whether to register containers and VMs with systemd-machined or not. - `mkosi.profiles` is now parsed in subimages as well. - `mkosi-initrd` now uses `dnf5` on systems where it is the default. - Added various packages to the default tools tree. - Dropped support for Ubuntu Focal. - Added `Devicetree=` setting for configuring bootloader device trees - Added systemd-machined registration using varlink for `mkosi qemu` vms, which includes the vsock CID so that `ssh vsock/` or `ssh machine/` will work on systems running `systemd-machined` 257 or newer. - Bumped CentOS Stream default release to 10. - mkosi now manages the pacman keyring itself so `/etc/pacman.d/gnupg` from the host is not used anymore and mkosi will run `pacman-key --init` and `pacman-key --populate` itself. - Added `ToolsTreeRelease=` match - mkosi now enforces that images built with `Overlay=yes` only add files on top of the base tree(s) and don't overwrite any existing files or directories. - Added a `mkosi-addon` tool and accompanying kernel-install plugin that allows building PE addons to extend a vendor provided unified kernel image. - Added `systemd-boot-signed`, `uki-signed` and `grub-signed` variants for the `Bootloader=` option which instruct mkosi to only install pre-signed EFI binaries. - `mkosi.profiles` is now parsed in configuration included with `Include=`. - Any initrds configured with `Initrds=` are now used as fallback when booting with qemu direct kernel boot (`--firmware=linux`) if no split initrd was produced by the image build. - mkosi now makes a greater effort to ensure the crypto-policies are configured to allow GPG keys from older distributions. - We don't pick up pre-signed bootloader binaries anymore when `ShimBootloader=signed` is configured. To force usage of pre-signed EFI binaries, use the new `systemd-boot-signed`, `uki-signed` and `grub-signed` variants for the `Bootloader=` option. - Added a new constant `microsoft-mok` for the `FirmwareVariables=` option. If specified, a firmware variables file with the Microsoft keys enrolled will be extended to include a `MokList` entry that trusts the certificate configured with `SecureBootCertificate=` and passed to `qemu`. - We now use `mkosi.pkgcache` as the package cache directory if the directory exists. - `BuildSourcesEphemeral=` learned a new variant `buildcache` in which case the overlay will be cached in the build directory configured with `BuildDirectory=`. ## v24 - The default kernel command line of `console=ttyS0` (or equivalent for other architectures) has been removed. The required `console=` argument to have the kernel output to the serial console has to be added manually from `v24` onwards. - Support for installing local packages located in directories in `BuildSources=` was dropped. Instead, the packages can be made available for installation via `PackageManagerTrees=`. - Configuration parsing was reworked to remove the need for the `@` specifier and to streamline building multiple images with `mkosi.images/`. If you were building multiple images with `mkosi.images/`, you'll need to adapt your configuration to the rework. Read the **Building multiple images** section in the documentation for more information. - mkosi has gained the option to generate completion scripts for bash, fish and zsh. Packagers should generate the scripts during packaging and ship them in the appropriate places. - Added support for CentOS Stream 10. - mkosi now installs a separate `mkosi-initrd` script that can be used to build initramfs images intended for use on the local system. - We do not automatically append `centos-stream` or `fedora` anymore to CentOS (and derivatives) and Fedora mirrors specified with `Mirror=` as not all mirrors store the repository metadata under these subdirectories. Users are now required to add these subdirectories themselves in `Mirror=`. If the EPEL repositories are enabled for CentOS Stream (and derivatives) and `Mirror=` is used, we look for the EPEL repositories in `../fedora` relative to the mirror specified in `Mirror=`. - We now support compressed tar archives wherever we already accept tar archives as input. - We now always rerun the build if `Format=none` and don't remove previous outputs in that case (unless `--force` is specified). This allows using `mkosi -t none` to rerun the build scripts without removing the previous image. This can then be combined with `RuntimeBuildSources=yes` to make the build script outputs available in a booted container or virtual machine so they can be installed without having to rebuild the image. - We now use `virtconsole` to provide the serial console when booting with `qemu`. - `root=PARTUUID` and `mount.usr=PARTUUID` on the kernel command line are now automatically extended with the actual PARTUUID of the corresponding partition. - All available OpenSUSE repositories are now supported and can be enabled with `Repositories=`. - Building OpenSUSE `aarch64` images is now supported - `mkosi dependencies` was beefed up to handle more scenarios properly - The default list of kernel modules that are always added to the initramfs was extended with various virtualization modules. - Added a `Repositories=` match. - Cached images are now invalidated if packages specified via `PackageDirectories=` change. - Added `VolatilePackageDirectories=` which can be used to provide local packages that do not invalidate cached images. - `mkosi.pkgmngr` is now used as the default path for `PackageManagerTrees=`. - The package directory that build scripts can use to make built packages available for installation (`$PACKAGEDIR`) is now shared between all image builds. This means that packages built in earlier images and stored in `$PACKAGEDIR` become available for installation in all subsequent image builds. - The default tools tree distribution is now chosen based on the host distribution instead of the target distribution. - mkosi can now be invoked from the initramfs. ## v23.1 - Respin due to git tag mismatch ## v23 - Added `CleanScripts=` to allow running custom cleanup code whenever mkosi cleans up the output directory. This allows cleaning up extra outputs produced by e.g. a build script that mkosi doesn't know about. - Added `ConfigureScripts=` to allow dynamically modifying the mkosi configuration. Each configure script receives the current config as JSON on stdin and should output the new config as JSON on stdout. - When building a UKI, we don't measure for the TPM SHA1 PCR bank anymore. - All keys in the mkosi config JSON output are now in pascal case, except for credentials and environments, where the keys encode names of credentials and environment variables and are therefore case sensitive. - Added various settings to allow running mkosi behind a proxy. - Various fixes to kernel module filtering that should result in fewer modules being pulled into the default initrd when `KernelModulesExclude=` or `KernelModulesInitrdExclude=` are used. - Added `ToolsTreeDistribution=` match. - Removed `vmspawn` verb and replaced it with `VirtualMachineMonitor=`. - New specifiers for various directories were added. `%D` resolves to the directory that mkosi was invoked in, `%P` to the current working directory, and `%C` to the parent directory of the config file. - Added `ForwardJournal=` to have systemd inside a container/VM forward its journal to the specified file or directory. - Systemd scopes are now allocated for qemu, swtpm, virtiofsd and systemd-journal-remote if available. - The `mkosi qemu` virtual machine is now registered with systemd-machined if available. - Added new `oci` output format - Runtime trees without a target are now mounted to `/root/src` instead of a subdirectory of it (To have the same behaviour as `BuildSources=`). - Added `RuntimeBuildSources=` to mount build and source directories when booting the image with `mkosi nspawn` or `mkosi qemu`. - Introduced `--append` to allow command line settings to be parsed after parsing configuration files. - `distribution-release` is not installed by default anymore on OpenSUSE. - Setting `QemuSmp=` to `0` will now make qemu use all available CPUs - Free page reporting and discard request processing are now enabled by default in VMs spawned by `mkosi qemu`. - Added `ToolsTreeCertificates=` to allow configuring whether to use certificates and keys from the tools tree (if one is used) or the host. - Added `never` for `CacheOnly=` to specify that repository metadata should always be refreshed. - Renamed the `none` option for `CacheOnly=` to `auto`. - Added `ProxyExclude=` to configure hostnames for which requests should not go through the configured proxy. - The default tools tree is now reused on incremental builds. - Added `VolatilePackages=` and `InitrdVolatilePackages=` to configure packages that should be installed after executing build scripts and which should not be cached when using `Incremental=`. - `PackageDirectories=` now has an associated default path `mkosi.packages`. - `reprepro` is now used to generate local apt repositories. - Support for BSD tar/cpio was dropped. - When both `ExtraSearchPaths=` and `ToolsTree=` are used, mkosi will now prefer running a binary found in `ExtraSearchPaths=` without the tools tree over running the binary from the tools tree. If a binary is not found in `ExtraSearchPaths=`, the tools tree is used instead. - An artifact directory is now made available when running scripts which can be used to pass around data between different scripts. mkosi will also look for microcode and initrds in the artifact directory under the `io.mkosi.microcode` and `io.mkosi.initrd` subdirectories. - Added `Environment=` match setting to check for environment variables defined with the `Environment=` setting. - The `basesystem` package is now always installed in Fedora and CentOS images instead of the `filesystem` package. - The `qemu`, `shell` and `boot` verbs do not automatically build the image anymore unless `--force` is specified. - `SplitArtifacts=` is now supported for the portable, sysext and confext outputs. - The `WithDocs=` option was implemented for pacman-based distributions. - The default Fedora release was bumped to 40. - `QemuSwtpm=` can now be used with `QemuFirmware=` set to `linux` or `bios`. - Added `UnitProperties=` to allow configure properties on the scopes generated by `systemd-nspawn` and `systemd-run`. - mkosi now only builds a single default tools tree per build using the settings from the last regular image that we'll build. - Configure scripts are now only executed for verbs which imply an image build and are executed with the tools tree instead of without it. - `$QEMU_ARCHITECTURE` is now set for configure scripts to easily allow scripts to figure out which qemu binary will be used to run qemu. - A file ID can now be specified for `QemuDrives=`. This allows adding multiple qemu drives that are backed by the same file. - mkosi doesn't fail anymore if images already exist when running `mkosi build`. - Image names from `mkosi.images/` are now preferred over the specified image ID when determining the output filename to use for an image. - `--include` now has a shorthand option `-I`. - The `WITH_NETWORK` environment variable is now passed to build and finalize scripts. - We now clamp mtimes to the specified source date epoch timestamp instead of resetting all mtimes. This means that we won't touch any mtimes that are already older than the given source date epoch timestamp. - Removed support for CentOS 8 Stream as it is now EOL. - The `coredumpctl` and `journalctl` verbs now operrate on the path specified in `ForwardJournal=` if one is set. - Added `UnifiedKernelImageFormat=` format setting to allow configuring the naming of unified kernel images generated by mkosi. - The `versionlock` plugin is now enabled by default for dnf with a noop configuration. - `Repositories=` is now implemented for zypper. - `KernelModulesInclude=` and `KernelModulesInitrdInclude=` now take the special values `host` and `default` to include the host's loaded modules and the default kernel modules defined in `mkosi-initrd` respectively. - `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` are now deprecated. - Added `mkosi dependencies` to output the list of packages required by mkosi to build and boot images. ## v22 - We'll now try to delete btrfs subvolumes with `btrfs subvolume delete` first before falling back to recursively deleting the directory. - The invoking user is now always mapped to `root` when running sync scripts. This fixes an issue where we would fail when a package manager tree or skeleton tree contained a `/usr` directory as we would not have permissions to run mount in the sandbox. - We now use qemu's official firmware descriptions to find EDK2/OVMF UEFI firmware. Addititionally, `QemuFirmware=uefi` now boots without SecureBoot support, and `QemuFirmware=uefi-secure-boot` was introduced to boot with SecureBoot support. By default we will still boot with SecureBoot support if `QemuFirmware=auto`. - Added support for `QemuFirmwareVariables=custom` and `QemuFirmwareVariables=microsoft` to use OVMF/EDK2 variables with either the user's custom keys enrolled or with the Microsoft keys enrolled. - Added `UnifiedKernelImages=` to control whether we generate unified kernel images or not. - `Bootloader=grub` will now generate a grub EFI image and install it. If `SecureBoot=` is enabled and `ShimBootloader=` is not set to `signed`, the grub EFI image will be signed for SecureBoot. - `ShimBootloader=signed` will now also instruct mkosi to look for and install already signed grub, systemd-boot, kernel and UKI binaries. - We now build grub images with a fixed set of modules and don't copy any grub modules to the ESP anymore. - The configuration is now made available as a JSON file to all mkosi scripts via the `$MKOSI_CONFIG` environment variable. - `$PROFILE` is now set for all mkosi scripts containing the value of `Profile=` if it is set. ## v21 - We now handle unmerged-usr systems correctly - Builtin configs (`mkosi-initrd`, `mkosi-tools`) can now be included using `Include=` (e.g. `Include=mkosi-initrd`) - The kernel-install plugin now uses the builtin `mkosi-initrd` config so there's no need anymore to copy the full `mkosi-initrd` config into `/usr/lib/mkosi-initrd`. - We don't require a build anymore for the `journalctl` and `coredumpctl` verbs. - `mkosi ssh` works again when used with `ToolsTree=default` - We now use `.zst` instead of `.zstd` for compressed split artifacts produced by `systemd-repart`. - `systemd-repart` uses a persistent temporary directory again for assembling images instead of a tmpfs. - Added `MicrocodeHost=` setting to only include the CPU specific microcode for the current host system. - The kernel-install plugin now only includes the CPU specific microcode - Introduced `PackageCacheDirectory=` to set the directory for package manager caches. This setting defaults to a suitable location in the system or user directory depending on how mkosi is invoked. `CacheDirectory=` is only used for incremental cached images now. - Repository metadata is now synced once at the start of each image build and never during an image build. Each image includes a snapshot of the repository metadata in the canonical locations in `/var` so that incremental images and extension images can reuse the same snapshot. When building an image intended to be used with `BaseTrees=`, disable `CleanPackageMetadata=` to make sure the repository metadata in `/var` is not cleaned up, otherwise any extension images using this image as their base tree will not be able to install additional packages. - Implemented `CacheOnly=metadata`. Note that in the JSON output, the value of `CacheOnly=` will now be a string instead of a boolean. - Added `CompressLevel=` to set the compression level to use. - Dropped experimental Gentoo support. - Added `TriggerMatch=` to specify multiple match sections of which only one should be satisfied. - Added `jq`, `attr`, `acl`, `git`, `sed`, `grep` and `findutils` to the default tools tree. - Added `mkosi-install`, `mkosi-upgrade`, `mkosi-remove` and `mkosi-reinstall` scripts which allow writing scripts that are independent of the package manager being used to build the image. - We now expand specifiers in `Match` section values - Made GPG key handling for Fedora rawhide more robust - If systemd-repart 256 or newer is available, mkosi will instruct it to generate `/etc/fstab` and `/etc/crypttab` for the image if any partition definitions contain the corresponding settings (`MountPoint=` and `EncryptedVolume=`). - `bash` is now started in the debug shell instead of `sh`. - The default release for Ubuntu is now `noble`. - Ubuntu is now used as the default tools tree distribution for Ubuntu instead of Debian. - Added `mkosi vmspawn` which boots the image with `systemd-vmspawn`. Note that `systemd-vmspawn` is experimental and its interface may still change. As such `mkosi vmspawn` is also considered experimental. Note that `systemd-vmspawn` version `256` or newer is required. - Added `SyncScripts=` which can be used to update various build sources before starting the image build. - The `DISTRIBUTION=` and `RELEASE=` environment variables are now set when running scripts. - Added `ToolsTreeRepositories=` and `ToolsTreePackageManagerTrees=`. - Added `RuntimeNetwork=` to configure the networking used when booting the image. - Added `SecureBootKeySource=` and `VerityKeySource=` to support signing images with OpenSSL engines. Note that these settings require various systemd tools to be version `256` or newer. - We don't clean up package manager metadata anymore unless explicitly requested with `CleanPackageManagerMetadata=yes` when building `directory` and `tar` images. ## v20.2 - Fixed a bug in signing unsigned shim EFI binaries. - We now build an early microcode initrd in the mkosi kernel-install plugin. - Added `PackageDirectories=` to allow providing extra packages to be made available during the build. - Fixed issue where `KernelModulesIncludeHost` was including unnecessary modules - Fixed `--mirror` specification for CentOS (and variants) and Fedora. Previously a subdirectory within the mirror had to be specified which prevented using CentOS and EPEL repositories from the same mirror. Now only the URL has be specified. - We now mount package manager cache directories when running scripts on the host so that any packages installed in scripts are properly cached. - We don't download filelists on Fedora anymore - Nested build sources don't cause errors anymore when trying to install packages. - We don't try to build the same tools tree more than once anymore when building multiple images. - We now create the `/etc/mtab` compatibility symlink in mkosi's sandbox. - We now always hash the root password ourselves instead of leaving it to `systemd-firstboot`. - `/srv` and `/mnt` are not mounted read-only anymore during builds. - Fixed a crash when running mkosi in a directory with fewer than two parent directories. - Implemented `RepositoryKeyCheck=` for apt-based distributions. ## v20.1 - `BuildSources=` are now mounted when we install packages so local packages can be made available in the sandbox. - Fixed check to see if we're running as root which makes sure we don't do shared mounts when running as root. - The extension release file is now actually written when building system or configuration extensions. - The nspawn settings are copied to the output directory again. - Incremental caching is now skipped when `Overlay=` is enabled as this combination isn't supported. - The SELinux relabel check is more granular and now checks for all required files instead of just whether there's a policy configured. - `qemu-system-xxx` binaries are now preferred over the generic `qemu` and `qemu-kvm` binaries. - Grub tools from the tools tree are now used to install grub instead of grub tools from the image itself. The grub tools were added to the default tools trees as well. - The pacman keyring in tools trees is now only populated from the Arch Linux keyring (and not the Debian/Ubuntu ones anymore). - `gpg` is allowed to access `/run/pscsd/pscsd.comm` on the host if it exists to allow interaction with smartcards. ## v20 - The current working directory is not mounted unconditionally to `/work/src` anymore. Instead, the default value for `BuildSources=` now mounts the current working directory to `/work/src`. This means that the current working directory is no longer implicitly included when `BuildSources=` is explicitly configured. - Assigning the empty string to a setting that takes a list of values now overrides any configured default value as well. - The github action does not build and install systemd from source anymore. Instead, `ToolsTree=default` can be used to make sure a recent version of systemd is used to do the image build. - Added `EnvironmentFiles=` to read environment variables from environment files. - We drastically reduced how much of the host system we expose to scripts. Aside from `/usr`, a few directories in `/etc`, `/tmp`, `/var/tmp` and various directories configured in mkosi settings, all host directories are hidden from scripts, package managers and other tools executed by mkosi. - Added `RuntimeScratch=` to automatically mount a directory with extra scratch space into mkosi-spawned containers and virtual machines. - Package manager trees can now be used to configure every tool invoked by mkosi while building an image that reads config files from `/etc` or `/usr`. - Added `SELinuxRelabel=` to specify whether to relabel selinux files or not. - Many fixes to tools trees were made and tools trees are now covered by CI. Some combinations aren't possible yet but we're actively working to make these possible. - `mkosi qemu` now supports direct kernel boots of `s390x` and `powerpc` images. - Added `HostArchitecture=` match to match against the host architecture. - We don't use the user's SSH public/private keypair anymore for `mkosi ssh` but instead use a separate key pair which can be generated by `mkosi genkey`. Users using `mkosi ssh` will have to run `mkosi genkey` once to generate the necessary files to keep `mkosi ssh` working. - We don't automatically set `--offline=no` anymore when we detect the `Subvolumes=` setting is used in a `systemd-repart` partition definition file. Instead, use the new `RepartOffline=` option to explicitly disable running `systemd-repart` in offline mode. - During the image build we now install UKIs/kernels/initrds to `/boot` instead of `/efi`. While this will generally not be noticeable, users with custom systemd-repart ESP partition definitions will need to add `CopyFiles=/boot:/` along with the usual `CopyFiles=/efi:/` to their ESP partition definitions. By installing UKIs/kernels/initrds to `/boot`, it becomes possible to use `/boot` to populate an XBOOTLDR partition which wasn't possible before. Note that this is also safe to do before `v20` so `CopyFiles=/boot:/` can unconditionally be added to any ESP partition definition files. - Added `QemuFirmwareVariables=` to allow specifying a custom OVMF variables file to use. - Added `MinimumVersion=` to allow specifying the minimum required mkosi version to build an image. - Added support for Arch Linux's debug repositories. - Merged the mkosi-initrd project into mkosi itself. mkosi-initrd is now used to build the default initrd. - Implemented mkosi-initrd for all supported distributions. - Added `ShimBootloader=` to support installing shim to the ESP. - Added sysext, confext and portable output formats. These will produce signed disk images that can be used as sysexts, confexts and portable services respectively. - Added `QemuVsockConnectionId=` to configure how to allocate the vsock connection ID when `QemUVsock=` is enabled. - Added documentation on how to build sysexts with mkosi. - Global systemd user presets are now also configured. - Implemented `WithDocs=` for `apt`. - On supported package managers, locale data for other locales is now stripped if the local is explicitly configured using `Locale=`. - All `rpm` plugins are now disabled when building images. - Added `KernelModulesIncludeHost=` and `KernelModulesInitrdIncludeHost=` to only include modules loaded on the host system in the image/initrd respectively. - Implemented `RemovePackages=` for Arch Linux. - Added `useradd` and `groupadd` scripts to configure these binaries to operate on the image during builds instead on the host. - Added microcode support. If installed into the image, an early microcode initrd will automatically be built and prepended to the initrd. - A passwordless root account may now be created by specifying `hashed:`. - The `Autologin=` feature was extended with support for `arm64`, `s390x` and `powerpc` architectures. - Added `SecureBootAutoEnroll=` to control automatic enrollment of secureboot keys separately from signing `systemd-boot` and generated UKIs. - `ImageVersion=` is no longer automatically appended to the output files, instead this is automatically appended to `Output=` if not specified and results in the `%o` specifier being equivalent to `%i` or `%i_%v` depending on whether `ImageVersion=` is specified. ## v19 - Support for RHEL was added! - Added `journalctl` and `coredumpctl` verbs for running the respective tools on built directory or disk images. - Added a `burn` verb to write the output image to a block device. - Added a new `esp` output format, which is largely similar to the existing `uki` output format but wraps it in a disk image with only an ESP. - `Presets` were renamed to `Images`. `mkosi.images/` is now used instead of `mkosi.presets/`, the `Presets=` setting was renamed to `Images=` and the `Presets` section was merged into the `Config` section. The old names can still be used for backwards compatibility. - Added profiles to support building variants of the same image in one repository. Profiles can be defined in `mkosi.profiles/` and one can be selected using the new `Profile=` setting. - mkosi will now parse `mkosi.local.conf` before any other config files if that exists. - Added a kernel-install plugin. This is only shipped in source tree and not included in the Python module. - Added a `--json` option to get the output of `mkosi summary` as JSON. - Added shorthand `-a` for `--autologin`. - Added a `--debug-workspace` option to not remove the workspace directory after a build. This is useful to inspect the workspace after failing builds. As a consequence the prefix for the default workspace directory prefix has been changed from `.mkosi-tmp` to `mkosi-workspace`. - Scripts with the `.chroot` extension are now executed in the image automatically. - Added `rpm` helper script to have `rpm` automatically operate on the image when running scripts. - Added `mkosi-as-caller` helper script that can be used in scripts to run commands as the user invoking mkosi. - `mkosi-chroot` will now start a shell if no arguments are specified. - Added `WithRecommends=` to configure whether to install recommended packages by default or not where this is supported. It is disabled by default. - Added `ToolsTreeMirror=` setting for configuring the mirror to use for the default tools tree. - `WithDocs=` is now enabled by default. - Added `BuildSourcesEphemeral=` to make source directories ephemeral when running scripts. This means any changes made to source directories while running scripts will be undone after the scripts have finished executing. - Added `QemuDrives=` to have mkosi create extra qemu drives and pass them to qemu when using the `qemu` verb. - Added `BuildSources=` match to match against configured build source targets. - `PackageManagerTrees=` was moved to the `Distribution` section. - We now automatically configure the qemu firmware, kernel cmdline and initrd based on what type of kernel is passed by the user via `-kernel` or `QemuKernel=`. - The mkosi repository itself now ships configuration to build basic bootable images that can be used to test mkosi. - Added support for enabling `updates-testing` repositories for Fedora. - GPG keys for CentOS, Fedora, Alma and Rocky are now looked up locally first before fetching them remotely. - Signatures are not required for local packages on Arch anymore. - Packages on opensuse are now always downloaded in advance before installation when using zypper. - The tar output is now reproducible. - We now make sure `git` can be executed from mkosi scripts without running into permission errors. - We don't create subdirectories beneath the configured cache directory anymore. - Workspace directories are now created outside of any source directories. mkosi will either use `XDG_CACHE_HOME`, `$HOME/.cache` or `/var/tmp` depending on the situation. - Added environment variable `MKOSI_DNF` to override which dnf to use for building images (`dnf` or `dnf5`). - The rootfs can now be modified when running build scripts (with all changes thrown away after the last build script has been executed). - mkosi now fails if configuration specified via the CLI does not apply to any image (because it is overridden). - Added a new doc on building rpms from source with mkosi (`docs/building-rpms-from-source.md`). - `/etc/resolv.conf` will now only be mounted for scripts when they are run with network access. ## v18 - `$SCRIPT` was renamed to `$CHROOT_SCRIPT`. `$SCRIPT` can still be used but is considered deprecated. - Added `RuntimeTrees=` setting to mount directories when booting images via `mkosi boot`, `mkosi shell` or `mkosi qemu`. The directories are mounted with a uid map that maps the user invoking mkosi to the root user so that all files in the directory appear as if owned by the root user in the container or virtual machine and any new files created in the directories are owned by the user invoking mkosi. To make this work in VMs, we use `VirtioFS` via `virtiofsd`. Note that this requires systemd v254 or newer to be installed in the image. - Added support for booting directory images with `mkosi qemu` via `VirtioFS`. When `CONFIG_VIRTIOFS` and `CONFIG_VIRTIO_PCI` are builtin modules, no initramfs is required to make this work. - Added `Include=` or `--include` to include extra configuration files or directories. - Added support for specifiers to access the current value of certain settings during configuration file parsing. - `mkosi` will now exit with an error when no configuration was provided. - Multiple scripts of the same type are now supported. - Custom distributions are now supported via the new `custom` distribution. When using `custom` as the distribution, the rootfs must be provided via base trees, skeleton trees or prepare scripts. - We now use local GPG keys for rpm based distributions if the `distribution-gpg-keys` package is installed on the host. - Added `RuntimeSize=` to grow the image to a specific size before booting it when using `mkosi boot` or `mkosi qemu`. - We now set `MKOSI_UID` and `MKOSI_GID` when running scripts which are set to the uid and gid of the user invoking mkosi respectively. These can be used to run commands as the user that invoked mkosi. - Added an `Architecture=` match - Initrds specified with `Initrds=` are now used for grub menuentries as well. - `ImageId=` and `ImageVersion=` are now written to os-release as `IMAGE_ID` and `IMAGE_VERSION` if provided. - We pass command line arguments passed to the `build` verb to the build script again. - We added support for the "RHEL Universal Base Image" distribution. ## v17.1 - Fixed bug where `--autologin` was broken when used in combination with a tools tree when using a packaged version of mkosi. ## v17 - Added `ToolsTreePackages=` to add extra packages to the default tools tree. - Added `SystemdVersion=` match to match on the host's systemd version - Added `Format=` match to match on the configured output format - `Presets=` can now be configured in global configuration files to select which presets to build - UKIs can now be booted using direct linux boot. - We don't try to make images UEFI bootable anymore on architectures that do not support UEFI - Fixed `--help` to show all options again - We now warn when settings are configured in the wrong section ## v16 - `mkosi.version` is now picked up from preset and dropin directories as well following the usual config precedence logic - Removed the "first assignment wins" logic from configuration parsing. Settings parsed later will now override earlier values - Removed the `!` operator for lists. Instead, assign the empty string to the list to remove all previous values. - Added support for configuring custom default values for settings by prefixing their name in the configuration file with `@`. - Added `QemuCdrom=` to attach the image to the virtual machine as a CD-ROM instead of a block device. - Added `SectorSize=` to set the sector size of the disk images built by systemd-repart. - Added back grub support (BIOS/UEFI). Note that we don't install grub on UEFI yet but we do add the necessary configuration and partitions. - Added `Bootloader=` option to configure which EFI bootloader to install. Added `uki` option to install just the UKI without systemd-boot and `grub` to generate grub configuration to chainload into the built UKIs. - Added `BiosBootloader=` to configure whether grub for BIOS gets installed or not. - Added `QemuFirmware=` to select which qemu firmware to use (OVMF, Seabios or direct kernel boot). - Added `QemuKernel=` to specify the kernel that should be used with direct kernel boot. - `/var/lib/dbus/machine-id` is now removed if it was added by a package manager postinstall script. - The manifest is not generated by default anymore. Use `ManifestFormat=json` to make sure the manifest is generated. - Added `SourceDateEpoch=` to enable more reproducible image builds. - Added `Seed=` to set the seed passed to systemd-repart. - Updated the default Fedora release to Fedora 39. - If `ToolsTree=` is set to `default`, mkosi will now build a default tools tree containing all the necessary tools to build images. The distribution and release to use can be configured with `ToolsTreeDistribution=` and `ToolsTreeRelease=` or are determined automatically based on the image being built. - Added `uki` output format. This is similar to `cpio`, except the cpio is packaged up as a UKI with a kernel image and stub picked up from the rootfs. ## v15.1 - The man page can be generated from the markdown file via `tools/make-man-page.sh`. - Fixed issue where not all packages and data files where included in the generated python package. - mkosi doesn't try to unshare the network namespace anymore when it doesn't have `CAP_NET_ADMIN`. - Fixed issue when the workspace was located in `/tmp`. - Don't try to run `timedatectl` or `ssh-add` when they're not installed. ## v15 - Migrated to systemd-repart. Many options are dropped in favor of specifying them directly in repart partition definition files: - Format=gpt_xxx options are replaced with a single "disk" options. Filesystem to use can now be specified with repart's Format= option - Format=plain_squashfs (Can be reproduced by a single repart squashfs root partition combined with SplitArtifacts=yes) - Verity= (Replaced by repart's Verity= options) - Encrypt= (Replaced by repart's Encrypt= option) - RootSize=, HomeSize=, VarSize=, TmpSize=, ESPSize=, SwapSize=, SrvSize= (Replaced by repart's size options) - UsrOnly= (replaced with `CopyFiles=/:/usr` in a usr partition definition) - OutputSplitRoot=, OutputSplitVerity=, (Replaced by repart's SplitName= option) - OutputSplitKernel= (UKI is now always written to its own output file) - GPTFirstLBA (Removed, no equivalent in repart) - ReadOnly= (Replaced by repart's ReadOnly= option per partition) - Minimize= (Replaced by repart's Minimize= option per partition) - CompressFs= (No equivalent in repart, can be replicated by replacing mkfs. in $PATH with a script that adds the necessary command line option) - MkSquashfs= (Can be replaced with a script in $PATH that invokes the correct binary) We also remove the WithoutUnifiedKernelImages= switch as building unified kernel images is trivial and fast these days. - Support for --qemu-boot was dropped - Support for --use-host-repositories was dropped, use --repository-directory instead - `RepositoryDirectory` was removed, use `PackageManagerTrees=` or `SkeletonTrees=` instead. - `--repositories` is now only usable on Debian/RPM based distros and can only be used to enable additional repositories. Specifically, it cannot be used on Arch Linux anymore to add new repositories. - The `_epel` distributions were removed. Use `--repositories=epel` instead to enable the EPEL repository. - Removed `-stream` from CentOS release specifiers. Instead of specifying `8-stream`, you know just specify `8`. - Removed default kernel command line arguments `rhgb`, `selinux=0` and `audit=0`. - Dropped --all and --all-directory as this functionality is better implemented by using a build system. - mkosi now builds images without needing root privileges. - Removed `--no-chown`, `--idmap` and `--nspawn-keep-unit` options as they were made obsolete by moving to rootless builds. - Removed `--source-file-transfer`, `--source-file-transfer-final`, `--source-resolve-symlinks` and `--source-resolve-symlinks-final` in favor of always mounting the source directory into the build image. `--source-file-transfer-final` might be reimplemented in the future using virtiofsd. - Dropped `--include-dir` option. Usage can be replaced by using `--incremental` and reading includes from the cached build image tree. - Removed `--machine-id` in favor of shipping images without a machine ID at all. - Removed `--skip-final-phase` as we only have a single phase now. - The post install script is only called for the final image now and not for the build image anymore. Use the prepare script instead. - `--ssh-key`, `--ssh-agent`, `--ssh-port` and `--ssh-timeout` options were dropped as the SSH support was reimplemented using VSock. `mkosi ssh` can only be used with images booted with `mkosi qemu`. Use `machinectl` to access images booted with `mkosi boot`. Use --extra-tree or --credential with the `.ssh.authorized_keys.root` credentials as alternatives for provisioning the public key inside the image. - Only configuration files matching `*.conf` are parsed in dropin directories now. - Removed `--qemu-headless`, we now start qemu in the terminal by default and configure the serial console at runtime. Use the new `--qemu-gui` option to start qemu in its graphical interface. - Removed `--netdev`. Can be replaced by manually installing systemd-networkd, putting a network file in the image and enabling systemd-networkd. - If `mkosi.extra/` or `mkosi.skeleton/` exist, they are now always used instead of only when no explicit extra/skeleton trees are defined. - mkosi doesn't install any default packages anymore aside from packages required by the distro or the base filesystem layout package if there are no required packages. In practice, this means systemd and other basic tools have to be installed explicitly from now on. - Removed `--base-packages` as it's not needed anymore since we don't install any packages by default anymore aside from the base filesystem layout package. - Removed `--qcow2` option in favor of supporting only raw disk images as the disk image output format. - Removed `--bmap` option as it can be trivially added manually by utilizing a finalize script. - The `never` value for `--with-network` was spun of into its own custom option `--cache-only`. - `--bootable` now defaults to `auto`. When set to `auto`, mkosi will generate a bootable image only if all the necessary packages are installed. Documentation was added in docs/bootable.md on how a bootable image can be generated on mainstream distros. - The RPM db is no longer rebuilt in bdb format on CentOS Stream 8. To be able to install packages on a CentOS Stream 8 image with a RPM db in sqlite format, rewrite the db in bdb format using `rpm --rebuilddb --define _db_backend bdb`. - Repositories are now only written to /etc/apt/sources.list if apt is installed in the image. - Removed the dependency on `debootstrap` to build Ubuntu or Debian images. - Apt now uses the keyring from the host instead of the keyring from the image. This means `debian-archive-keyring` or `ubuntu-archive-keyring` are now required to be installed to build Debian or Ubuntu images respectively. - `--base-image` is split into `--base-tree` and `--overlay`. - Removed `--cache-initrd`, instead, use a prebuilt initrd with `Initrds=` to avoid rebuilding the initrd all the time. - Disk images are now resized to 8G when booted to give some disk space to play around with in the booted image. - Removed `--install-directory=` option. This was originally added for caching the installation results, but this doesn't work properly as it might result in leftover files in the install directory from a previous installation, so we have to empty the directory before reusing it, invalidating the caching, so the option was removed. - Build scripts are now executed on the host. See the `SCRIPTS` section in the manual for more information. Existing build scripts will need to be updated to make sure they keep working. Specifically, most paths in scripts will need to be prefixed with $BUILDROOT to have them operate on the image instead of on the host system. To ensure the host system cannot be modified when running a script, most host directories are mounted read-only when running a script to ensure a script cannot modify the host in any way. Alternatively to making the script run on the host, the script can also still be executed in the image itself by putting the following snippet at the top of the script: ```sh if [ "$container" != "mkosi" ]; then exec mkosi-chroot "$SCRIPT" "$@" fi ``` - Removed `--tar-strip-selinux-context=` option. We now label all files properly if selinux is enabled and if users don't want the labels, they can simply exclude them when extracting the archive. - Gentoo is now marked as experimental and unsupported and there's no guarantee at all that it will work. Issues related to gentoo will generally not receive attention from core maintainers. All gentoo specific hacks outside of the gentoo implementation module have been removed. - A verb `documentation` has been added. Calling mkosi with this verb will show the documentation. This is useful when running mkosi during development to always have the documentation in the correct version available. By default it will try several ways to output the documentation, but a specific option can be chosen with the `--doc-format` option. Distro packagers are encouraged to add a file `mkosi.1` into the `mkosi/resources` directory of the Python package, if it is missing, as well es install it in the appropriate search path for man pages. The man page can be generated from the markdown file `mkosi/resources/mkosi.md` e.g via `pandoc -t man -s -o mkosi.1 mkosi.md`. - BuildSources= now takes source:target pairs which specify the source directory and where to mount it relative to the top level source directory when running scripts. (e.g. BuildSources=../my-project:my-project) ## v14 - Support for Clear Linux was dropped. See https://github.com/systemd/mkosi/pull/1037 for more information. - Support for Photon was dropped. See https://github.com/systemd/mkosi/pull/1048 for more information. - The Arch kernel/bootloader pacman hooks were removed. For anyone that still wants to use them, they can be found [here](https://github.com/systemd/mkosi/tree/v13/mkosi/resources/arch). - mkosi now creates `distro~release` subdirectories inside the build, cache and output directories for each `distro~release` combination that is built. This allows building for multiple distros without throwing away the results of a previous distro build every time. - The preferred names for mkosi configuration files and directories are now `mkosi.conf` and `mkosi.conf.d/` respectively. The old names (`mkosi.default` and `mkosi.default.d`) have been removed from the docs but are still supported for backwards compatibility. - `plain_squashfs` type images will now also be named with a `.raw` suffix. - `tar` type images will now respect the `--compress` option. - Pacman's `SigLevel` option was changed to use the same default value as used on Arch which is `SigLevel = Required DatabaseOptional`. If this results in keyring errors, you need to update the keyring by running `pacman-key --populate archlinux`. - Support for CentOS 7 was dropped. If you still need to support CentOS 7, we recommend using any mkosi version up to 13. - Support for BIOS/grub was dropped. because EFI hardware is widely available and legacy BIOS systems do not support the feature set to fully verify a boot chain from firmware to userland and it has become bothersome to maintain for little use. To generate BIOS images you can use any version of mkosi up to mkosi 13 or the new `--bios-size` option. This can be used to add a BIOS boot partition of the specified size on which `grub` (or any other bootloader) can be installed with the help of mkosi's script support (depending on your needs most likely `mkosi.postinst` or `mkosi.finalize`). This method can also be used for other EFI bootloaders that mkosi intentionally does not support. - mkosi now unconditionally copies the kernel, initrd and kernel cmdline from the image that were previously only copied out for Qemu boot. - mkosi now runs apt and dpkg on the host. As such, we now require apt and dpkg to be installed on the host along with debootstrap in order to be able to build debian/ubuntu images. - Split dm-verity artifacts default names have been changed to match what `systemd` and other tools expect: `image.root.raw`, `image.root.verity`, `image.root.roothash`, `image.root.roothash.p7s` (same for `usr` variants). - `mkosi` will again default to the same OS release as the host system when the host system uses the same distribution as the image that's being built. - By default, `mkosi` will now change the owner of newly created directories to `SUDO_UID` or `PKEXEC_UID` if defined, unless `--no-chown` is used. - If `systemd-nspawn` v252 or newer is used, bind-mounted directories with `systemd-nspawn` will use the new `rootidmap` option so files and directories created from within the container will be owned by the actual directory owner on the host. ## v13 - The `--network-veth` option has been renamed to `--netdev`. The old name made sense with virtual ethernet devices, but when booting images with qemu a TUN/TAP device is used instead. - The network config file installed by mkosi when the `--netdev` (previously `--network-veth`) option is used (formerly `/etc/systemd/network/80-mkosi-network-veth.network` in the image) now only matches network interfaces using the `virtio_net` driver. Please make sure you weren't relying on this file to configure any network interfaces other than the tun/tap virtio-net interface created by mkosi when booting the image in QEMU with the `--netdev` option. If you were relying on this config file when the host system uses the same distribution as the image that's being built. Instead, when no release is specified, mkosi will now always default to the default version embedded in mkosi itself. - `mkosi` will now use the `pacman` keyring from the host when building Arch images. This means that users will, on top of installing `archlinux-keyring`, also have to run `pacman-key --init` and `pacman-key --populate archlinux` on the host system to be able to build Arch images. Also, unless the package manager is configured to do it automatically, the host keyring will have to be updated after `archlinux-keyring` updates by running `pacman-key --populate archlinux` and `pacman-key --updatedb`. - Direct qemu linux boot is now supported with `BootProtocols=linux`. When enabled, the kernel image, initrd, and cmdline will be extracted from the image and passed to `qemu` by `mkosi qemu` to directly boot into the kernel image without a bootloader. This can be used to boot for example s390x images in `qemu`. - The initrd will now always be rebuilt after the extra trees and build artifacts have been installed into the image. - The github action has been migrated to Ubuntu Jammy. To migrate any jobs using the action, add `runs-on: ubuntu-22.04` to the job config. - All images are now configured by default with the `C.UTF-8` locale. - New `--repository-directory` option can be used to configure a directory with extra repository files to be used by the package manager when building an image. Note that this option is currently only supported for `pacman` and `dnf`-based distros. - Option `--skeleton-tree` is now supported on Debian-based distros. - Removed `--hostname` as its trivial to configure using systemd-firstboot. - Removed default locale configuration as its trivial to configure using systemd-firstboot and systemd writes a default locale well. ## v12 - Fix handling of baselayout in Gentoo installations. ## v11 - Support for Rocky Linux, Alma Linux, and Gentoo has been added! - A new `ManifestFormat=` option can be used to generate "manifest" files that describe what packages were installed. With `json`, a JSON file that shows the names and versions of all installed packages will be created. With `changelog`, a longer human-readable file that shows package descriptions and changelogs will be generated. This latter format should be considered experimental and likely to change in later versions. - A new `RemovePackages=` option can be used to uninstall packages after the build and finalize scripts have been done. This is useful for the case where packages are required by the build scripts, or pulled in as dependencies for scriptlets of other packages, but are not necessary in the final image. - A new `BaseImage=` option can be used to build "system extensions" a.k.a. "sysexts" — partial images which are mounted on top of an existing system to provide additional files under `/usr/`. See the [systemd-sysext man page](https://www.freedesktop.org/software/systemd/man/systemd-sysext.html) for more information. - A new `CleanPackageMetadata=` option can be used to force or disable the removal of package manager files. When this option is not used, they are removed when the package manager is not installed in the final image. - A new `UseHostRepositories=` option instructs mkosi to use repository configuration from the host system, instead of the internal list. - A new `SshAgent=` option configures the path to the ssh agent. - A new `SshPort=` option overrides the port used for ssh. - The `Verity=` setting supports a new value `signed`. When set, verity data will be signed and the result inserted as an additional partition in the image. See https://systemd.io/DISCOVERABLE_PARTITIONS for details about signed disk images. This information is used by `systemd-nspawn`, `systemd-dissect`, `systemd-sysext`, `systemd-portabled` and `systemd`'s `RootImage=` setting (among others) to cryptographically validate the image file systems before use. - The `--build-environment=` option was renamed to `--environment=` and extended to cover *all* invoked scripts, not just the `mkosi.build`. The old name is still understood. - With `--with-network=never`, `dnf` is called with `--cacheonly`, so that the package lists are not refreshed. This gives a degree of reproducibility when doing repeated installs with the same package set (and also makes installs significantly faster). - The `--debug=` option gained a new value `disk` to show information about disk sized and partition allocations. - Some sections and settings have been renamed for clarity: [Packages] is now [Content], `Password=`, `PasswordIsHashed=`, and `Autologin=` are now in [Content]. The old names are still supported, but not documented. - When `--prepare-script=`/`--build-script=`/`--finalize-script=` is used with an empty argument, the corresponding script will not be called. - Python 3.7 is the minimal supported version. - Note to packagers: the Python `cryptography` module is needed for signing of verity data. ## v10 - Minimum supported Python version is now 3.7. - Automatic configuration of the network for Arch Linux was removed to bring different distros more in line with each other. To add it back, add a postinstall script to configure your network manager of choice. - The `--default` option was changed to not affect the search location of `mkosi.default.d/`. mkosi now always searches for `mkosi.default.d/` in the working directory. - `quiet` was dropped from the default kernel command line. - `--source-file-transfer` and `--source-file-transfer-final` now accept an empty value as the argument which can be used to override a previous setting. - A new command `mkosi serve` can be used to serve build artifacts using a small embedded HTTP server. This is useful for `machinectl pull-raw …` and `machinectl pull-tar …`. - A new command `mkosi genkey` can be used to generate secure boot keys for use with mkosi's `--secure-boot` options. The number of days the keys should remain valid can be specified via `--secure-boot-valid-days=` and their CN via `--secure-boot-common-name=`. - When booting images with `qemu`, firmware that supports Secure Boot will be used if available. - `--source-resolve-symlinks` and `--source-resolve-symlinks-final` options are added to control how symlinks in the build sources are handled when `--source-file-transfer[-final]=copy-all` is used. - `--build-environment=` option was added to set variables for the build script. - `--usr-only` option was added to build images that comprise only the `/usr/` directory, instead of the whole root file system. This is useful for stateless systems where `/etc/` and `/var/` are populated by `systemd-tmpfiles`/`systemd-sysusers` and related calls at boot, or systems that are originally shipped without a root file system, but where `systemd-repart` adds one on the first boot. - Support for "image versions" has been added. The version number can be set with `--version-number=`. It is included in the default output filename and passed as `$IMAGE_VERSION` to the build script. In addition, `mkosi bump` can be used to increase the version number by one, and `--auto-bump` can be used to increase it automatically after successful builds. - Support for "image identifiers" has been added. The id can be set with `--image=id` and is passed to the build script as `$IMAGE_ID`. - The list of packages to install can be configured with `--base-packages=`. With `--base-packages=no`, only packages specified with `--packages=` will be installed. With `--base-packages=conditional`, various packages will be installed "conditionally", i.e. only if some other package is otherwise pulled in. For example, `systemd-udev` may be installed only if `systemd` is listed in `--packages=`. - CPIO output format has been added. This is useful for kernel initramfs images. - Output compression can be configured with `--compress-fs=` and `--compress-output=`, and support for `zstd` has been added. - `--ssh-key=` option was added to control the ssh key used to connect to the image. - `--remove-files=` option was added to remove file from the generated images. - Inline comments are now allowed in config files (anything from `#` until the end of the line will be ignored). - The development branch was renamed from `master` to `main`. ## v9 ### Highlighted Changes - The mkosi Github action now defaults to the current release of mkosi instead of the tip of the master branch. - Add a `ssh` verb and accompanying `--ssh` option. The latter sets up SSH keys for direct SSH access into a booted image, whereas the former can be used to start an SSH connection to the image. - Allow for distribution specific `mkosi.*` files in subdirectories of `mkosi.default.d/`. These files are only processed if a subdirectory named after the target distribution of the image is found in `mkosi.default.d/`. - The summary of used options for the image is now only printed when building the image for the first time or when the `summary` verb is used. - All of mkosi's output, except for the build script, will now go to stderr. There was no clear policy on this before and this choice makes it easier to use images generated and booted via mkosi with language servers using stdin and stdout for communication. - `--source-file-transfer` now defaults to `copy-git-others` to also include untracked files. - [black](https://github.com/psf/black) is now used as a code style and conformance with it is checked in CI. - Add a new `--ephemeral` option to boot into a temporary snapshot of the image that will be thrown away on shutdown. - Add a new option `--network-veth` to set up a virtual Ethernet link between the host and the image for usage with nspawn or QEMU - Add a new `--autologin` option to automatically log into the root account upon boot of the image. This is useful when using mkosi for boot tests. - Add a new `--hostonly` option to generate host specific initrds. This is useful when using mkosi for boot tests. - Add a new `--install-directory` option and special directory `mkosi.installdir/` that will be used as `$DESTDIR` for the build script, so that the contents of this directory can be shared between builds. - Add a new `--include-directory` option and special directory `mkosi.includedir/` that will be mounted at `/usr/include` during the build. This way headers files installed during the build can be made available to the host system, which is useful for usage with language servers. - Add a new `--source-file-transfer-final` option to complement `--source-file-transfer`. It does the same `--source-file-transfer` does for the build image, but for the final one. - Add a new `--tar-strip-selinux-context` option to remove SELinux xattrs. This is useful when an image with a target distribution not using SELinux is generated on a host that is using it. - Document the `--no-chown` option. Using this option, artifacts generated by mkosi are not chowned to the user invoking mkosi when it is invoked via sudo. It has been with as for a while, but hasn't been documented until now. ### Fixed Issues - [#506](https://github.com/systemd/mkosi/issues/506) - [#559](https://github.com/systemd/mkosi/issues/559) - [#561](https://github.com/systemd/mkosi/issues/561) - [#562](https://github.com/systemd/mkosi/issues/562) - [#575](https://github.com/systemd/mkosi/issues/575) - [#580](https://github.com/systemd/mkosi/issues/580) - [#593](https://github.com/systemd/mkosi/issues/593) ### Authors - Daan De Meyer - Joerg Behrmann - Luca Boccassi - Peter Hutterer - ValdikSS mkosi-25.3/mkosi/resources/mkosi-addon/000077500000000000000000000000001474711424400201365ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-addon/mkosi.conf000066400000000000000000000020461474711424400221310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Distribution] Distribution=custom [Output] Output=addon Format=addon ManifestFormat= SplitArtifacts= [Content] Bootable=no # Needs to be available for the addon stub, but don't want it in the initrd ExtraTrees=/usr/lib/systemd/boot/efi:/usr/lib/systemd/boot/efi RemoveFiles=/usr/lib/systemd/boot/efi/ RemoveFiles= # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/vmlinux* /usr/lib/modules/*/System.map # This is an addon so drop all modules files as these would override the ones from the base image. /usr/lib/modules/*/modules.* # Arch Linux specific file. /usr/lib/modules/*/pkgbase # Drop microcode directories explicitly as these are not dropped by the kernel modules processing # logic. /usr/lib/firmware/intel-ucode /usr/lib/firmware/amd-ucode mkosi-25.3/mkosi/resources/mkosi-initrd/000077500000000000000000000000001474711424400203425ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf000066400000000000000000000066011474711424400223360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Output=initrd Format=cpio ManifestFormat= SplitArtifacts= [Content] Bootable=no MakeInitrd=yes CleanPackageMetadata=yes Packages= systemd # sine qua non udev bash # for emergency logins less # this makes 'systemctl' much nicer to use ;) p11-kit # dl-opened by systemd lvm2 RemoveFiles= # we don't need this after the binary catalogs have been built /usr/lib/systemd/catalog /etc/udev/hwdb.d /usr/lib/udev/hwdb.d # this is not needed by anything updated in the last 20 years /etc/services # Including kernel images in the initrd is generally not useful. # This also stops mkosi from extracting the kernel image out of the image as a separate output. /usr/lib/modules/*/vmlinuz* /usr/lib/modules/*/vmlinux* /usr/lib/modules/*/System.map /var/cache /var/log # Configure locale explicitly so that all other locale data is stripped on distros whose package manager supports it. Locale=C.UTF-8 WithDocs=no # Make sure various core modules are always included in the initrd. KernelModulesInclude= /ahci.ko /autofs4.ko /binfmt_misc.ko /btrfs.ko /cdrom.ko /cfg80211.ko /configfs.ko /dm-crypt.ko /dm-integrity.ko /dm-mod.ko /dm-multipath.ko /dm-raid.ko /dm-verity.ko /dmi-sysfs.ko /drm_buddy.ko /efi-pstore.ko /efivarfs.ko /erofs.ko /ext4.ko /i2c-algo-bit.ko /i2c-mux.ko /i2c-smbus.ko /intel-uncore-frequency-common.ko /intel[-_]vsec.ko /kvm.ko /libphy.ko /loop.ko /mdio_devres.ko /mei.ko /mxm-wmi.ko /nvme.ko /overlay.ko /parport.ko /pmt_telemetry.ko /qemu_fw_cfg.ko /raid[0-9]*.ko /scsi_mod.ko /sd_mod.ko /serio.ko /sg.ko /skx_edac_common.ko /snd-intel-dspcfg.ko /snd-soc-hda-codec.ko /squashfs.ko /usb-storage.ko /vfat.ko /virtio_balloon.ko /virtio_blk.ko /virtio_console.ko /virtio_dma_buf.ko /virtio_mmio.ko /virtio_net.ko /virtio_pci.ko /virtio_scsi.ko /virtio-rng.ko /virtiofs.ko /vmw_vsock_virtio_transport.ko /vsock.ko /wmi.ko /x_tables.ko /xfs.ko /xhci-pci-renesas.ko ^fs/nls/ crypto/ mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/000077500000000000000000000000001474711424400226325ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-arch.conf000066400000000000000000000016531474711424400246410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= gzip # For compressed keymap unpacking by loadkeys btrfs-progs e2fsprogs xfsprogs erofs-utils dosfstools # Various libraries that are dlopen'ed by systemd libfido2 tpm2-tss util-linux RemoveFiles= # Arch Linux doesn't split their gcc-libs package so we manually remove # unneeded stuff here to make sure it doesn't end up in the initrd. /usr/lib/libgfortran.so* /usr/lib/libgo.so* /usr/lib/libgomp.so* /usr/lib/libgphobos.so* /usr/lib/libobjc.so* /usr/lib/libgdruntime.so* # Remove all files that are only required for development. /usr/lib/*.a /usr/include/* /usr/share/i18n/* /usr/share/hwdata/* /usr/share/iana-etc/* /usr/share/locale/* mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-azure-centos-fedora.conf000066400000000000000000000006041474711424400275740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|azure [Content] Packages= # Various libraries that are dlopen'ed by systemd tpm2-tss # File system checkers for supported root file systems e2fsprogs xfsprogs dosfstools mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-azure.conf000066400000000000000000000002051474711424400250420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Content] Packages= btrfs-progs util-linux mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos/000077500000000000000000000000001474711424400243435ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos/mkosi.conf000066400000000000000000000005011474711424400263300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel [Content] Packages= libfido2 util-linux RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos/mkosi.conf.d/000077500000000000000000000000001474711424400266335ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-centos/mkosi.conf.d/20-epel-packages.conf000066400000000000000000000002051474711424400324170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Repositories=epel [Content] Packages= btrfs-progs erofs-utils mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/000077500000000000000000000000001474711424400265305ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf000066400000000000000000000012011474711424400305130ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= kmod # Not pulled in as a dependency on Debian/Ubuntu dmsetup # Not pulled in as a dependency on Debian/Ubuntu libcryptsetup12 # xfsprogs pulls in python on Debian (???) and XFS generally # isn't used on Debian so we don't install xfsprogs. btrfs-progs e2fsprogs erofs-utils dosfstools util-linux # Various libraries that are dlopen'ed by systemd libfido2-1 RemoveFiles= /usr/share/locale/* mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001474711424400310205ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d/10-dpkg.conf000066400000000000000000000004641474711424400330360ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu Release=!jammy Release=!noble Release=!oracular [Content] RemovePackages= # Needs perl >= 5.40.0-8 and dash >= 0.5.12-7 to drop this dpkg 10-libtss.conf000066400000000000000000000003671474711424400333340ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= ^libtss2-esys-[0-9.]+-0$ ^libtss2-mu[0-9.-]+$ libtss2-rc0 libtss2-tcti-device0 10-systemd-cryptsetup.conf000066400000000000000000000004001474711424400357300ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu Release=!jammy Release=!noble [TriggerMatch] Distribution=kali [Content] Packages=systemd-cryptsetup 10-systemd-repart.conf000066400000000000000000000003361474711424400350130ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu [TriggerMatch] Distribution=kali [Content] Packages=systemd-repart mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-fedora.conf000066400000000000000000000004631474711424400251620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= btrfs-progs libfido2 util-linux-core erofs-utils RemovePackages= # Various packages pull in shadow-utils to create users, we can remove it afterwards shadow-utils mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/10-opensuse.conf000066400000000000000000000016531474711424400255650ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= patterns-base-minimal_base # Various packages added as dependencies. If they are not explicitly installed, the zypper inner # logic picks the busybox-package variant, which adds also busybox in the initrd. diffutils grep gzip xz # Various libraries that are dlopen'ed by systemd libfido2-1 libtss2-esys0 libtss2-mu0 libtss2-rc0 libtss2-tcti-device0 # File system checkers for supported root file systems btrfsprogs e2fsprogs xfsprogs erofs-utils dosfstools util-linux RemovePackages= # Various packages pull in shadow to create users, we can remove it afterwards shadow sysuser-shadow RemoveFiles= /usr/share/locale/* /usr/etc/services mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.conf.d/20-stub.conf000066400000000000000000000001651474711424400246770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Format=uki Distribution=!arch [Content] Packages=systemd-boot mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/000077500000000000000000000000001474711424400226065ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/000077500000000000000000000000001474711424400234175ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/000077500000000000000000000000001474711424400241655ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/000077500000000000000000000000001474711424400256555ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/000077500000000000000000000000001474711424400305015ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system-preset/99-mkosi.preset000066400000000000000000000002111474711424400333000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later # Make sure that services are disabled by default (primarily for Debian/Ubuntu). disable * mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/000077500000000000000000000000001474711424400272015ustar00rootroot00000000000000systemd-cryptsetup@.service.d/000077500000000000000000000000001474711424400350135ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/systemcredential.conf000066400000000000000000000004501474711424400377730ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/systemd/system/systemd-cryptsetup@.service.d[Service] ImportCredential=cryptsetup.* # Compat with older systemd versions that don't support ImportCredential=. LoadCredential=cryptsetup.passphrase LoadCredential=cryptsetup.fido2-pin LoadCredential=cryptsetup.tpm2-pin LoadCredential=cryptsetup.luks2-pin LoadCredential=cryptsetup.pkcs11-pin mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/000077500000000000000000000000001474711424400251305ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/000077500000000000000000000000001474711424400265045ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-dm.rules000066400000000000000000000004161474711424400330060ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90dm/11-dm.rules SUBSYSTEM!="block", GOTO="dm_end" KERNEL!="dm-[0-9]*", GOTO="dm_end" ACTION!="add|change", GOTO="dm_end" OPTIONS+="db_persist" LABEL="dm_end" mkosi-25.3/mkosi/resources/mkosi-initrd/mkosi.extra/usr/lib/udev/rules.d/10-mkosi-initrd-md.rules000066400000000000000000000017531474711424400330130ustar00rootroot00000000000000# SPDX-License-Identifier: GPL-2.0-only # Copied from https://github.com/dracutdevs/dracut/blob/059/modules.d/90mdraid/59-persistent-storage-md.rules SUBSYSTEM!="block", GOTO="md_end" ACTION!="add|change", GOTO="md_end" # Also don't process disks that are slated to be a multipath device ENV{DM_MULTIPATH_DEVICE_PATH}=="1", GOTO="md_end" KERNEL!="md[0-9]*|md_d[0-9]*|md/*", KERNEL!="md*", GOTO="md_end" # partitions have no md/{array_state,metadata_version} ENV{DEVTYPE}=="partition", GOTO="md_ignore_state" # container devices have a metadata version of e.g. 'external:ddf' and # never leave state 'inactive' ATTR{md/metadata_version}=="external:[A-Za-z]*", ATTR{md/array_state}=="inactive", GOTO="md_ignore_state" TEST!="md/array_state", GOTO="md_end" ATTR{md/array_state}=="|clear|inactive", GOTO="md_end" LABEL="md_ignore_state" IMPORT{program}="/sbin/mdadm --detail --export $devnode" IMPORT{builtin}="blkid" OPTIONS+="link_priority=100" OPTIONS+="watch" OPTIONS+="db_persist" LABEL="md_end" mkosi-25.3/mkosi/resources/mkosi-tools/000077500000000000000000000000001474711424400202115ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf000066400000000000000000000011521474711424400222010ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Output] Format=directory Output=mkosi.tools ManifestFormat= [Build] BuildSources= [Content] Bootable=no SELinuxRelabel=no Packages= acl attr bash ca-certificates coreutils cpio curl diffutils dosfstools e2fsprogs findutils grep jq keyutils kmod less mtools nano opensc openssl sed socat strace swtpm systemd tar util-linux xfsprogs zstd mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/000077500000000000000000000000001474711424400225015ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/05-ubuntu.conf000066400000000000000000000001651474711424400251160ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=ubuntu [Distribution] Repositories=main,universe mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-arch.conf000066400000000000000000000015771474711424400245150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= apt archlinux-keyring base btrfs-progs createrepo_c debian-archive-keyring distribution-gpg-keys dnf dpkg edk2-ovmf erofs-utils git grub libseccomp openssh pacman perf pipewire pipewire-audio pkcs11-provider python-cryptography qemu-audio-pipewire qemu-base qemu-hw-display-virtio-gpu qemu-hw-display-virtio-gpu-gl qemu-hw-display-virtio-vga qemu-hw-display-virtio-vga-gl qemu-ui-opengl qemu-ui-sdl reprepro sbsigntools shadow squashfs-tools systemd-ukify ubuntu-keyring virt-firmware virtiofsd xz mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-azure-centos-fedora/000077500000000000000000000000001474711424400265745ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-azure-centos-fedora/mkosi.conf000066400000000000000000000011331474711424400305630ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|centos Distribution=|alma Distribution=|rocky Distribution=|rhel Distribution=|fedora Distribution=|azure [Content] Packages= createrepo_c git-core grub2-tools libseccomp openssh-clients policycoreutils python3-cryptography qemu-img qemu-kvm-core shadow-utils squashfs-tools swtpm-tools systemd-container systemd-journal-remote systemd-udev systemd-ukify virt-firmware virtiofsd xz mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-azure-centos-fedora/mkosi.conf.d/000077500000000000000000000000001474711424400310645ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-azure-centos-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002161474711424400331000ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] HostArchitecture=|x86-64 HostArchitecture=|arm64 [Content] Packages= edk2-ovmf mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-azure.conf000066400000000000000000000004101474711424400247070ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=azure [Content] Packages= btrfs-progs distribution-gpg-keys dnf5 dnf5-plugins qemu-system-aarch64-core qemu-system-s390x-core systemd-ukify mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/000077500000000000000000000000001474711424400242125ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf000066400000000000000000000002211474711424400261760ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=centos [Content] Packages= dnf dnf-plugins-core perf mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/000077500000000000000000000000001474711424400265025ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/10-epel-10.conf000066400000000000000000000001431474711424400307300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=10 [Distribution] Repositories=epel mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/10-epel-9.conf000066400000000000000000000001541474711424400306620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=9 [Distribution] Repositories=epel,epel-next mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/20-epel-packages-10.conf000066400000000000000000000002321474711424400325040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Repositories=epel Release=10 [Content] Packages= btrfs-progs distribution-gpg-keys mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/20-epel-packages-9.conf000066400000000000000000000004151474711424400324370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Repositories=epel Release=9 [Content] Packages= apt archlinux-keyring btrfs-progs debian-keyring distribution-gpg-keys pacman sbsigntools ubu-keyring mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/20-erofs-utils.conf000066400000000000000000000002241474711424400320420ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Repositories=epel Release=9 [TriggerMatch] Release=10 [Content] Packages=erofs-utils mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-centos/mkosi.conf.d/20-pkcs11-provider.conf000066400000000000000000000001451474711424400325220ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Release=10 [Content] Packages=pkcs11-provider mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/000077500000000000000000000000001474711424400263775ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf000066400000000000000000000017661474711424400304020ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= ?exact-name(distribution-gpg-keys) ?exact-name(grub-pc-bin) ?exact-name(kali-archive-keyring) ?exact-name(virtiofsd) apt archlinux-keyring btrfs-progs createrepo-c debian-archive-keyring dnf erofs-utils git-core grub-common libarchive-tools libcryptsetup12 libseccomp2 libtss2-dev libnss-resolve libnss-myhostname makepkg openssh-client ovmf pacman-package-manager policycoreutils python3-cryptography python3-pefile qemu-efi-aarch64 qemu-system reprepro sbsigntool squashfs-tools swtpm-tools systemd-container systemd-coredump systemd-journal-remote uidmap xz-utils zypper mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001474711424400306675ustar00rootroot00000000000000linux-perf.conf000066400000000000000000000001751474711424400335530ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali [Content] Packages=linux-perf pkcs11-provider.conf000066400000000000000000000003671474711424400344170ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bookworm Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages= pkcs11-provider python3-virt-firmware.conf000066400000000000000000000003751474711424400356640ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bookworm Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages= python3-virt-firmware systemd-boot.conf000066400000000000000000000003311474711424400341050ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages=systemd-boot systemd-repart.conf000066400000000000000000000003361474711424400344440ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu [TriggerMatch] Distribution=kali [Content] Packages=systemd-repart systemd-ukify.conf000066400000000000000000000003541474711424400342760ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye Release=!bookworm [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages=systemd-ukify ubuntu-keyring.conf000066400000000000000000000002131474711424400344430ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|!debian Release=|!bookworm [Content] Packages= ubuntu-keyring mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/000077500000000000000000000000001474711424400241575ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf000066400000000000000000000007111474711424400261470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=fedora [Content] Packages= apt archlinux-keyring btrfs-progs debian-keyring distribution-gpg-keys dnf5 dnf5-plugins erofs-utils pacman perf pkcs11-provider qemu-system-aarch64-core qemu-system-ppc-core qemu-system-s390x-core reprepro ubu-keyring zypper mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/000077500000000000000000000000001474711424400264475ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-fedora/mkosi.conf.d/10-uefi.conf000066400000000000000000000002101474711424400304550ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= sbsigntools mkosi-25.3/mkosi/resources/mkosi-tools/mkosi.conf.d/10-opensuse.conf000066400000000000000000000016051474711424400254310ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= btrfsprogs ca-certificates-mozilla createrepo_c distribution-gpg-keys dnf5 dnf5-plugins erofs-utils git-core glibc-gconv-modules-extra grep libseccomp2 openssh-clients ovmf patterns-base-minimal_base perf pkcs11-provider policycoreutils python3-cryptography python3-pefile qemu-headless qemu-ipxe qemu-ovmf-x86_64 qemu-uefi-aarch64 reprepro sbsigntools shadow squashfs systemd-boot systemd-container systemd-coredump systemd-experimental systemd-journal-remote tpm2.0-tools virt-firmware virtiofsd xz zypper mkosi-25.3/mkosi/resources/mkosi-vm/000077500000000000000000000000001474711424400174735ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf000066400000000000000000000003501474711424400214620ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Content] Bootable=yes Packages= bash diffutils gawk grep gzip less nano sed strace systemd udev mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/000077500000000000000000000000001474711424400217635ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/arch.conf000066400000000000000000000003311474711424400235440ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=arch [Content] Packages= dbus-broker dbus-broker-units iproute iputils linux polkit tpm2-tss mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/000077500000000000000000000000001474711424400256405ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf000066400000000000000000000004531474711424400276330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|alma Distribution=|rocky Distribution=|centos Distribution=|fedora Distribution=|azure [Content] Packages= iproute iputils kernel polkit systemd-resolved tpm2-tss util-linux mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/000077500000000000000000000000001474711424400301305ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/networkd.conf000066400000000000000000000002151474711424400326320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|fedora Distribution=|azure [Content] Packages= systemd-networkd mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/azure-centos-fedora/mkosi.conf.d/uefi.conf000066400000000000000000000002111474711424400317210ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Architecture=|x86-64 Architecture=|arm64 [Content] Packages= systemd-boot mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/000077500000000000000000000000001474711424400254435ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf000066400000000000000000000005311474711424400274330ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=|debian Distribution=|kali Distribution=|ubuntu [Content] Packages= bash dbus-broker iproute2 iputils-ping linux-image-generic login polkitd systemd-coredump systemd-sysv tpm2-tools tzdata mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/000077500000000000000000000000001474711424400277335ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d/systemd-boot.conf000066400000000000000000000003311474711424400332300ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages=systemd-boot systemd-resolved.conf000066400000000000000000000003351474711424400340350ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/debian-kali-ubuntu/mkosi.conf.d# SPDX-License-Identifier: LGPL-2.1-or-later [TriggerMatch] Distribution=debian Release=!bullseye [TriggerMatch] Distribution=ubuntu Release=!jammy [TriggerMatch] Distribution=kali [Content] Packages=systemd-resolved mkosi-25.3/mkosi/resources/mkosi-vm/mkosi.conf.d/opensuse.conf000066400000000000000000000004701474711424400244740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Match] Distribution=opensuse [Content] Packages= iproute iputils kernel-default libtss2-tcti-device0 patterns-base-minimal_base polkit strace systemd-boot systemd-network tpm2.0-tools mkosi-25.3/mkosi/resources/repart/000077500000000000000000000000001474711424400172265ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/000077500000000000000000000000001474711424400215415ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/confext-unsigned.repart.d/000077500000000000000000000000001474711424400265375ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/confext-unsigned.repart.d/10-root.conf000066400000000000000000000001561474711424400306110ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/etc/ Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/confext.repart.d/000077500000000000000000000000001474711424400247255ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/confext.repart.d/10-root.conf000066400000000000000000000002161474711424400267740ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/etc/ Verity=data VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/confext.repart.d/20-root-verity.conf000066400000000000000000000001701474711424400303140ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/confext.repart.d/30-root-verity-sig.conf000066400000000000000000000001631474711424400310770ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-25.3/mkosi/resources/repart/definitions/portable-unsigned.repart.d/000077500000000000000000000000001474711424400267015ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/portable-unsigned.repart.d/10-root.conf000066400000000000000000000001521474711424400307470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/ Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/portable.repart.d/000077500000000000000000000000001474711424400250675ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/portable.repart.d/10-root.conf000066400000000000000000000002121474711424400271320ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/ Verity=data VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/portable.repart.d/20-root-verity.conf000066400000000000000000000001701474711424400304560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/portable.repart.d/30-root-verity-sig.conf000066400000000000000000000001631474711424400312410ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-25.3/mkosi/resources/repart/definitions/sysext-unsigned.repart.d/000077500000000000000000000000001474711424400264305ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/sysext-unsigned.repart.d/10-root.conf000066400000000000000000000001761474711424400305040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/opt/ CopyFiles=/usr/ Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/sysext.repart.d/000077500000000000000000000000001474711424400246165ustar00rootroot00000000000000mkosi-25.3/mkosi/resources/repart/definitions/sysext.repart.d/10-root.conf000066400000000000000000000002361474711424400266670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root Format=erofs CopyFiles=/opt/ CopyFiles=/usr/ Verity=data VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/sysext.repart.d/20-root-verity.conf000066400000000000000000000001701474711424400302050ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity Verity=hash VerityMatchKey=root Minimize=best mkosi-25.3/mkosi/resources/repart/definitions/sysext.repart.d/30-root-verity-sig.conf000066400000000000000000000001631474711424400307700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later [Partition] Type=root-verity-sig Verity=signature VerityMatchKey=root mkosi-25.3/mkosi/run.py000066400000000000000000000654111474711424400151040ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import errno import fcntl import functools import logging import os import queue import shlex import shutil import signal import subprocess import sys import threading import uuid from collections.abc import Awaitable, Collection, Iterator, Mapping, Sequence from contextlib import AbstractContextManager from pathlib import Path from types import TracebackType from typing import TYPE_CHECKING, Any, Callable, NoReturn, Optional, Protocol from mkosi.log import ARG_DEBUG, ARG_DEBUG_SANDBOX, ARG_DEBUG_SHELL, die from mkosi.sandbox import acquire_privileges, joinpath, umask from mkosi.util import _FILE, PathString, current_home_dir, flatten, one_zero, resource_path, unique SD_LISTEN_FDS_START = 3 # These types are only generic during type checking and not at runtime, leading # to a TypeError during compilation. # Let's be as strict as we can with the description for the usage we have. if TYPE_CHECKING: CompletedProcess = subprocess.CompletedProcess[str] Popen = subprocess.Popen[str] else: CompletedProcess = subprocess.CompletedProcess Popen = subprocess.Popen def make_foreground_process(*, new_process_group: bool = True) -> None: """ If we're connected to a terminal, put the process in a new process group and make that the foreground process group so that only this process receives SIGINT. """ STDERR_FILENO = 2 if os.isatty(STDERR_FILENO): if new_process_group: os.setpgrp() old = signal.signal(signal.SIGTTOU, signal.SIG_IGN) try: os.tcsetpgrp(STDERR_FILENO, os.getpgrp()) except OSError as e: if e.errno != errno.ENOTTY: raise e signal.signal(signal.SIGTTOU, old) def ensure_exc_info() -> tuple[type[BaseException], BaseException, TracebackType]: exctype, exc, tb = sys.exc_info() assert exctype assert exc assert tb return (exctype, exc, tb) @contextlib.contextmanager def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> Iterator[None]: rc = 0 try: yield except SystemExit as e: rc = e.code if isinstance(e.code, int) else 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) except KeyboardInterrupt: rc = 1 if ARG_DEBUG.get(): sys.excepthook(*ensure_exc_info()) else: logging.error("Interrupted") except subprocess.CalledProcessError as e: # We always log when subprocess.CalledProcessError is raised, so we don't log again here. rc = e.returncode # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those. # Failures from self come from the forks we spawn to build images in a user namespace. We've already # done all the logging for those failures so we don't log stacktraces for those either. if ( ARG_DEBUG.get() and e.cmd and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and "qemu-system" not in str(e.cmd[0]) ): sys.excepthook(*ensure_exc_info()) except BaseException: sys.excepthook(*ensure_exc_info()) rc = 1 finally: sys.stdout.flush() sys.stderr.flush() exit(rc) def fork_and_wait(target: Callable[..., None], *args: Any, **kwargs: Any) -> None: pid = os.fork() if pid == 0: with uncaught_exception_handler(exit=os._exit): make_foreground_process() target(*args, **kwargs) try: _, status = os.waitpid(pid, 0) except BaseException: os.kill(pid, signal.SIGTERM) _, status = os.waitpid(pid, 0) finally: make_foreground_process(new_process_group=False) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise subprocess.CalledProcessError(rc, ["self"]) def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returncode: int) -> None: if -returncode in (signal.SIGINT, signal.SIGTERM): logging.error(f"Interrupted by {signal.Signals(-returncode).name} signal") elif returncode < 0: logging.error( f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}"' f" was killed by {signal.Signals(-returncode).name} signal." ) elif returncode == 127: logging.error(f"{cmdline[0]} not found.") else: logging.error( f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}"' f" returned non-zero exit code {returncode}." ) def run( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, input: Optional[str] = None, user: Optional[int] = None, group: Optional[int] = None, env: Mapping[str, str] = {}, log: bool = True, foreground: bool = True, success_exit_status: Sequence[int] = (0,), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> CompletedProcess: if input is not None: assert stdin is None # stdin and input cannot be specified together stdin = subprocess.PIPE with spawn( cmdline, check=check, stdin=stdin, stdout=stdout, stderr=stderr, user=user, group=group, env=env, log=log, foreground=foreground, success_exit_status=success_exit_status, sandbox=sandbox, ) as process: out, err = process.communicate(input) return CompletedProcess(cmdline, process.returncode, out, err) def fd_move_above(fd: int, above: int) -> int: dup = fcntl.fcntl(fd, fcntl.F_DUPFD, above) os.close(fd) return dup def preexec( *, foreground: bool, preexec_fn: Optional[Callable[[], None]], pass_fds: Collection[int], ) -> None: if foreground: make_foreground_process() if preexec_fn: preexec_fn() if not pass_fds: return # The systemd socket activation interface requires any passed file descriptors to start at '3' and # incrementally increase from there. The file descriptors we got from the caller might be arbitrary, # so we need to move them around to make sure they start at '3' and incrementally increase. for i, fd in enumerate(pass_fds): # Don't do anything if the file descriptor is already what we need it to be. if fd == SD_LISTEN_FDS_START + i: continue # Close any existing file descriptor that occupies the id that we want to move to. This is safe # because using pass_fds implies using close_fds as well, except that file descriptors are closed # by python after running the preexec function, so we have to close a few of those manually here # to make room if needed. try: os.close(SD_LISTEN_FDS_START + i) except OSError as e: if e.errno != errno.EBADF: raise nfd = fcntl.fcntl(fd, fcntl.F_DUPFD, SD_LISTEN_FDS_START + i) # fcntl.F_DUPFD uses the closest available file descriptor ID, so make sure it actually picked # the ID we expect it to pick. assert nfd == SD_LISTEN_FDS_START + i @contextlib.contextmanager def spawn( cmdline: Sequence[PathString], check: bool = True, stdin: _FILE = None, stdout: _FILE = None, stderr: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, pass_fds: Collection[int] = (), env: Mapping[str, str] = {}, log: bool = True, foreground: bool = False, preexec_fn: Optional[Callable[[], None]] = None, success_exit_status: Sequence[int] = (0,), sandbox: AbstractContextManager[Sequence[PathString]] = contextlib.nullcontext([]), ) -> Iterator[Popen]: assert sorted(set(pass_fds)) == list(pass_fds) cmd = [os.fspath(x) for x in cmdline] if ARG_DEBUG.get(): logging.info(f"+ {shlex.join(cmd)}") if not stdout and not stderr: # Unless explicit redirection is done, print all subprocess output on stderr, since we do so as well # for mkosi's own output. stdout = sys.stderr if stdin is None: stdin = subprocess.DEVNULL env = { "PATH": os.environ["PATH"], "TERM": os.getenv("TERM", "vt220"), "LANG": "C.UTF-8", **env, } if "TMPDIR" in os.environ: env["TMPDIR"] = os.environ["TMPDIR"] for e in ("SYSTEMD_LOG_LEVEL", "SYSTEMD_LOG_LOCATION"): if e in os.environ: env[e] = os.environ[e] if "HOME" not in env: env["HOME"] = "/" # sandbox.py takes care of setting $LISTEN_PID if pass_fds: env["LISTEN_FDS"] = str(len(pass_fds)) with sandbox as sbx: prefix = [os.fspath(x) for x in sbx] if prefix: prfd, pwfd = os.pipe2(os.O_CLOEXEC) # Make sure the write end of the pipe (which we pass to the subprocess) is higher than all the # file descriptors we'll pass to the subprocess, so that it doesn't accidentally get closed by # the logic in preexec(). if pass_fds: pwfd = fd_move_above(pwfd, list(pass_fds)[-1]) exec_prefix = ["--exec-fd", f"{SD_LISTEN_FDS_START + len(pass_fds)}", "--"] pass_fds = [*pass_fds, pwfd] else: exec_prefix = [] prfd, pwfd = None, None try: with subprocess.Popen( [*prefix, *exec_prefix, *cmdline], stdin=stdin, stdout=stdout, stderr=stderr, text=True, user=user, group=group, # pass_fds only comes into effect after python has invoked the preexec function, so we make # sure that pass_fds contains the file descriptors to keep open after we've done our # transformation in preexec(). pass_fds=[SD_LISTEN_FDS_START + i for i in range(len(pass_fds))], env=env, preexec_fn=functools.partial( preexec, foreground=foreground, preexec_fn=preexec_fn, pass_fds=pass_fds, ), ) as proc: if pwfd is not None: os.close(pwfd) if prfd is not None: os.read(prfd, 1) os.close(prfd) def failed() -> bool: return check and (rc := proc.poll()) is not None and rc not in success_exit_status try: # Don't bother yielding if we've already failed by the time we get here. We'll raise an # exception later on so it's not a problem that we don't yield at all. if not failed(): yield proc except BaseException: proc.terminate() raise finally: returncode = proc.wait() if failed(): if log: log_process_failure(prefix, cmd, returncode) if ARG_DEBUG_SHELL.get(): subprocess.run( [*prefix, "bash"], check=False, stdin=sys.stdin, text=True, user=user, group=group, env=env, preexec_fn=functools.partial( preexec, foreground=True, preexec_fn=preexec_fn, pass_fds=tuple(), ), ) raise subprocess.CalledProcessError(returncode, cmdline) except FileNotFoundError as e: die(f"{e.filename} not found.") finally: if foreground: make_foreground_process(new_process_group=False) def finalize_path( root: Optional[Path] = None, extra: Sequence[Path] = (), prefix_usr: bool = False, relaxed: bool = False, ) -> str: root = root or Path("/") path = [os.fspath(p) for p in extra] if relaxed: path += [ s for s in os.environ["PATH"].split(":") if s in ("/usr/bin", "/usr/sbin") or not s.startswith("/usr") ] # Make sure that /usr/bin and /usr/sbin are always in $PATH. path += [s for s in ("/usr/bin", "/usr/sbin") if s not in path] else: path += ["/usr/bin", "/usr/sbin"] if prefix_usr: path = [os.fspath(root / s.lstrip("/")) if s in ("/usr/bin", "/usr/sbin") else s for s in path] return ":".join(unique(path)) def find_binary( *names: PathString, root: Optional[Path] = None, extra: Sequence[Path] = (), ) -> Optional[Path]: root = root or Path("/") path = finalize_path(root=root, extra=extra, prefix_usr=True) for name in names: if any(Path(name).is_relative_to(d) for d in extra): pass elif Path(name).is_absolute(): name = root / Path(name).relative_to("/") elif "/" in str(name): name = root / name if binary := shutil.which(name, path=path): if root != Path("/") and not Path(binary).is_relative_to(root): return Path(binary) else: return Path("/") / Path(binary).relative_to(root) return None class AsyncioThread(threading.Thread): """ The default threading.Thread() is not interruptible, so we make our own version by using the concurrency feature in python that is interruptible, namely asyncio. Additionally, we store any exception that the coroutine raises and re-raise it in join() if no other exception was raised before. """ def __init__(self, target: Awaitable[Any], *args: Any, **kwargs: Any) -> None: import asyncio self.target = target self.loop: queue.SimpleQueue[asyncio.AbstractEventLoop] = queue.SimpleQueue() self.exc: queue.SimpleQueue[BaseException] = queue.SimpleQueue() super().__init__(*args, **kwargs) def run(self) -> None: import asyncio async def wrapper() -> None: self.loop.put(asyncio.get_running_loop()) await self.target try: asyncio.run(wrapper()) except asyncio.CancelledError: pass except BaseException as e: self.exc.put(e) def cancel(self) -> None: import asyncio.tasks loop = self.loop.get() for task in asyncio.tasks.all_tasks(loop): loop.call_soon_threadsafe(task.cancel) def __enter__(self) -> "AsyncioThread": self.start() return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: self.cancel() self.join() if type is None: try: raise self.exc.get_nowait() except queue.Empty: pass class SandboxProtocol(Protocol): def __call__( self, *, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: ... def nosandbox( *, options: Sequence[PathString] = (), ) -> AbstractContextManager[list[PathString]]: return contextlib.nullcontext([]) def workdir(path: Path, sandbox: Optional[SandboxProtocol] = None) -> str: subdir = "/" if sandbox and sandbox == nosandbox else "/work" return joinpath(subdir, str(path)) def finalize_passwd_symlinks(root: PathString) -> list[PathString]: """ If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we run the command, to make sure that the command we run uses user/group information from the apivfs directory instead of from the host. """ return flatten( ("--symlink", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow") ) def network_options(*, network: bool) -> list[PathString]: return [ "--setenv", "SYSTEMD_OFFLINE", one_zero(network), *(["--unshare-net"] if not network else []), ] # fmt: skip @contextlib.contextmanager def vartmpdir() -> Iterator[Path]: # We want to use an empty subdirectory in the host's temporary directory as the sandbox's /var/tmp. d = Path(os.getenv("TMPDIR", "/var/tmp")) / f"mkosi-var-tmp-{uuid.uuid4().hex[:16]}" d.mkdir() try: yield d finally: # A directory that's used as an overlayfs workdir will contain a "work" subdirectory after the # overlayfs is unmounted. This "work" subdirectory will have permissions 000 and as such can't be # opened or searched unless the user has the CAP_DAC_OVERRIDE capability. shutil.rmtree() will try to # search the "work" subdirectory to remove anything in it which will fail with a permission error. To # circumvent this, if the work directory exists and is not empty, let's fork off a subprocess where # we acquire extra privileges and then invoke shutil.rmtree(). If the work directory exists but is # empty, let's just delete the "work" subdirectory first and then invoke shutil.rmtree(). Deleting # the subdirectory when it is empty is not a problem because deleting a subdirectory depends on the # permissions of the parent directory and not the directory itself. try: (d / "work").rmdir() except OSError as e: if e.errno == errno.ENOTEMPTY: def remove() -> None: acquire_privileges() shutil.rmtree(d) fork_and_wait(remove) return elif e.errno != errno.ENOENT: raise shutil.rmtree(d) @contextlib.contextmanager def sandbox_cmd( *, network: bool = False, devices: bool = False, scripts: Optional[Path] = None, tools: Path = Path("/"), relaxed: bool = False, overlay: Optional[Path] = None, options: Sequence[PathString] = (), setup: Sequence[PathString] = (), extra: Sequence[Path] = (), ) -> Iterator[list[PathString]]: assert not (overlay and relaxed) with contextlib.ExitStack() as stack: module = stack.enter_context(resource_path(sys.modules[__package__ or __name__])) cmdline: list[PathString] = [ *setup, *(["strace", "--detach-on=execve"] if ARG_DEBUG_SANDBOX.get() else []), sys.executable, "-SI", module / "sandbox.py", "--proc", "/proc", # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), ] # fmt: skip if overlay and (overlay / "usr").exists(): cmdline += [ "--overlay-lowerdir", tools / "usr", "--overlay-lowerdir", overlay / "usr", "--overlay", "/usr", ] # fmt: skip else: cmdline += ["--ro-bind", tools / "usr", "/usr"] for d in ("bin", "sbin", "lib", "lib32", "lib64"): if (p := tools / d).is_symlink(): cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)] elif p.is_dir(): cmdline += ["--ro-bind", p, Path("/") / p.relative_to(tools)] # If we're using /usr from a tools tree, we have to use /etc/alternatives and /etc/ld.so.cache from # the tools tree as well if they exists since those are directly related to /usr. In relaxed mode, we # only do this if the mountpoint already exists on the host as otherwise we'd modify the host's /etc # by creating the mountpoint ourselves (or fail when trying to create it). for p in (Path("etc/alternatives"), Path("etc/ld.so.cache")): if (tools / p).exists() and (not relaxed or (Path("/") / p).exists()): cmdline += ["--ro-bind", tools / p, Path("/") / p] if (tools / "nix/store").exists(): cmdline += ["--bind", tools / "nix/store", "/nix/store"] if relaxed: for p in Path("/").iterdir(): if p not in ( Path("/home"), Path("/proc"), Path("/usr"), Path("/nix"), Path("/bin"), Path("/sbin"), Path("/lib"), Path("/lib32"), Path("/lib64"), ): if p.is_symlink(): cmdline += ["--symlink", p.readlink(), p] else: cmdline += ["--bind", p, p] # /etc might be full of symlinks to /usr/share/factory, so make sure we use # /usr/share/factory from the host and not from the tools tree. if ( tools != Path("/") and (tools / "usr/share/factory").exists() and (factory := Path("/usr/share/factory")).exists() ): cmdline += ["--bind", factory, factory] if home := current_home_dir(): cmdline += ["--bind", home, home] else: cmdline += [ "--dir", "/var/tmp", "--dir", "/var/log", "--unshare-ipc", # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they # still use sandbox.py, so we make sure it is available inside the sandbox so it can be # executed there as well. "--ro-bind", module / "sandbox.py", "/sandbox.py", ] # fmt: skip if devices: cmdline += ["--bind", "/sys", "/sys", "--bind", "/dev", "/dev"] else: cmdline += ["--dev", "/dev"] if network: for p in (Path("/etc/resolv.conf"), Path("/run/systemd/resolve")): if p.exists(): cmdline += ["--ro-bind", p, p] path = finalize_path( root=tools, extra=[Path("/scripts"), *extra] if scripts else extra, relaxed=relaxed, ) cmdline += ["--setenv", "PATH", path] if scripts: cmdline += ["--ro-bind", scripts, "/scripts"] tmp: Optional[Path] if not overlay and not relaxed: tmp = stack.enter_context(vartmpdir()) yield [*cmdline, "--bind", tmp, "/var/tmp", "--dir", "/tmp", "--dir", "/run", *options] return for d in ("etc", "opt"): if overlay and (overlay / d).exists(): cmdline += ["--ro-bind", overlay / d, Path("/") / d] else: cmdline += ["--dir", Path("/") / d] for d in ("srv", "media", "mnt", "var", "run", "tmp"): tmp = None if d not in ("run", "tmp"): with umask(~0o755): tmp = stack.enter_context(vartmpdir()) if overlay and (overlay / d).exists(): work = None if tmp: with umask(~0o755): work = stack.enter_context(vartmpdir()) cmdline += [ "--overlay-lowerdir", overlay / d, "--overlay-upperdir", tmp or "tmpfs", *(["--overlay-workdir", str(work)] if work else []), "--overlay", Path("/") / d, ] # fmt: skip elif not relaxed: if tmp: cmdline += ["--bind", tmp, Path("/") / d] else: cmdline += ["--dir", Path("/") / d] # If we put an overlayfs on /var, and /var/tmp is not in the sandbox tree, make sure /var/tmp is a # bind mount of a regular empty directory instead of the overlays so tools like systemd-repart can # use the underlying filesystem features from btrfs when using /var/tmp. if overlay and not (overlay / "var/tmp").exists(): tmp = stack.enter_context(vartmpdir()) cmdline += ["--bind", tmp, "/var/tmp"] yield [*cmdline, *options] def apivfs_options(*, root: Path = Path("/buildroot")) -> list[PathString]: return [ "--tmpfs", root / "run", "--tmpfs", root / "tmp", "--proc", root / "proc", "--dev", root / "dev", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", root / "run/user/0", # Make sure anything running in the root directory thinks it's in a container. $container can't # always be accessed so we write /run/host/container-manager as well which is always accessible. "--write", "mkosi", root / "run/host/container-manager", ] # fmt: skip def chroot_options() -> list[PathString]: return [ # Let's always run as (fake) root when we chroot inside the image as tools executed within the image # could have builtin assumptions about files being owned by root. "--become-root", # Unshare IPC namespace so any tests that exercise IPC related features don't fail with permission # errors as --become-root implies unsharing a user namespace which won't have access to the parent's # IPC namespace anymore. "--unshare-ipc", "--setenv", "container", "mkosi", "--setenv", "HOME", "/", "--setenv", "PATH", "/usr/bin:/usr/sbin", "--setenv", "BUILDROOT", "/", ] # fmt: skip @contextlib.contextmanager def chroot_cmd( *, root: Path, network: bool = False, options: Sequence[PathString] = (), ) -> Iterator[list[PathString]]: with vartmpdir() as dir, resource_path(sys.modules[__package__ or __name__]) as module: cmdline: list[PathString] = [ sys.executable, "-SI", module / "sandbox.py", "--bind", root, "/", # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), *apivfs_options(root=Path("/")), *chroot_options(), ] # fmt: skip if network: for p in (Path("/etc/resolv.conf"), Path("/run/systemd/resolve")): if p.exists(): cmdline += ["--ro-bind", p, p] yield [*cmdline, "--bind", dir, "/var/tmp", *options] def finalize_interpreter(tools: bool) -> str: if tools: return "python3" exe = sys.executable if Path(exe).is_relative_to("/usr"): return exe return "python3" mkosi-25.3/mkosi/sandbox.py000077500000000000000000001015231474711424400157340ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later """ This is a standalone implementation of sandboxing which is used by mkosi. Note that this is invoked many times while building the image and as a result, the performance of this script has a substantial impact on the performance of mkosi itself. To keep the runtime of this script to a minimum, please don't import any extra modules if it can be avoided. """ import ctypes import os import sys import warnings # noqa: F401 (loaded lazily by os.execvp() which happens too late) __version__ = "25.3" # The following constants are taken from the Linux kernel headers. AT_EMPTY_PATH = 0x1000 AT_FDCWD = -100 AT_NO_AUTOMOUNT = 0x800 AT_RECURSIVE = 0x8000 AT_SYMLINK_NOFOLLOW = 0x100 BTRFS_SUPER_MAGIC = 0x9123683E CAP_NET_ADMIN = 12 CAP_SYS_ADMIN = 21 CLONE_NEWIPC = 0x08000000 CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x00020000 CLONE_NEWUSER = 0x10000000 EPERM = 1 ENOENT = 2 ENOSYS = 38 F_GETFD = 1 F_SETFD = 2 FD_CLOEXEC = 1 LINUX_CAPABILITY_U32S_3 = 2 LINUX_CAPABILITY_VERSION_3 = 0x20080522 MNT_DETACH = 2 MOUNT_ATTR_RDONLY = 0x00000001 MOUNT_ATTR_NOSUID = 0x00000002 MOUNT_ATTR_NODEV = 0x00000004 MOUNT_ATTR_NOEXEC = 0x00000008 MOUNT_ATTR_SIZE_VER0 = 32 MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 MS_BIND = 4096 MS_MOVE = 8192 MS_REC = 16384 MS_SHARED = 1 << 20 MS_SLAVE = 1 << 19 NR_mount_setattr = 442 NR_move_mount = 429 NR_open_tree = 428 OPEN_TREE_CLOEXEC = os.O_CLOEXEC OPEN_TREE_CLONE = 1 PR_CAP_AMBIENT = 47 PR_CAP_AMBIENT_RAISE = 2 # These definitions are taken from the libseccomp headers SCMP_ACT_ALLOW = 0x7FFF0000 SCMP_ACT_ERRNO = 0x00050000 class mount_attr(ctypes.Structure): _fields_ = [ ("attr_set", ctypes.c_uint64), ("attr_clr", ctypes.c_uint64), ("propagation", ctypes.c_uint64), ("userns_fd", ctypes.c_uint64), ] class cap_user_header_t(ctypes.Structure): # __user_cap_header_struct _fields_ = [ ("version", ctypes.c_uint32), ("pid", ctypes.c_int), ] class cap_user_data_t(ctypes.Structure): # __user_cap_data_struct _fields_ = [ ("effective", ctypes.c_uint32), ("permitted", ctypes.c_uint32), ("inheritable", ctypes.c_uint32), ] libc = ctypes.CDLL(None, use_errno=True) libc.syscall.restype = ctypes.c_long libc.unshare.argtypes = (ctypes.c_int,) libc.statfs.argtypes = (ctypes.c_char_p, ctypes.c_void_p) libc.eventfd.argtypes = (ctypes.c_int, ctypes.c_int) libc.mount.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_char_p) libc.pivot_root.argtypes = (ctypes.c_char_p, ctypes.c_char_p) libc.umount2.argtypes = (ctypes.c_char_p, ctypes.c_int) libc.capget.argtypes = (ctypes.c_void_p, ctypes.c_void_p) libc.capset.argtypes = (ctypes.c_void_p, ctypes.c_void_p) libc.fcntl.argtypes = (ctypes.c_int, ctypes.c_int, ctypes.c_int) def terminal_is_dumb() -> bool: return not sys.stdout.isatty() or not sys.stderr.isatty() or os.getenv("TERM", "") == "dumb" class Style: # fmt: off bold: str = "\033[0;1;39m" if not terminal_is_dumb() else "" blue: str = "\033[0;1;34m" if not terminal_is_dumb() else "" gray: str = "\033[0;38;5;245m" if not terminal_is_dumb() else "" red: str = "\033[31;1m" if not terminal_is_dumb() else "" yellow: str = "\033[33;1m" if not terminal_is_dumb() else "" reset: str = "\033[0m" if not terminal_is_dumb() else "" # fmt: on ENOSYS_MSG = f"""\ {Style.red}mkosi was unable to invoke the {{syscall}}() system call.{Style.reset} This probably means either the system call is not implemented by the running kernel version ({{kver}}) or the system call is prohibited via seccomp if mkosi is being executed inside a containerized environment.\ """ def oserror(syscall: str, filename: str = "") -> None: if ctypes.get_errno() == ENOSYS: print(ENOSYS_MSG.format(syscall=syscall, kver=os.uname().version), file=sys.stderr) raise OSError(ctypes.get_errno(), os.strerror(ctypes.get_errno()), filename or None) def unshare(flags: int) -> None: if libc.unshare(flags) < 0: oserror("unshare") def statfs(path: str) -> int: # struct statfs is 120 bytes, which equals 15 longs. Since we only care about the first field # and the first field is of type long, we avoid declaring the full struct by just passing an # array of 15 longs as the output argument. buffer = (ctypes.c_long * 15)() if libc.statfs(path.encode(), ctypes.byref(buffer)) < 0: oserror("statfs", path) return int(buffer[0]) def mount(src: str, dst: str, type: str, flags: int, options: str) -> None: srcb = src.encode() if src else None typeb = type.encode() if type else None optionsb = options.encode() if options else None if libc.mount(srcb, dst.encode(), typeb, flags, optionsb) < 0: oserror("mount", dst) def umount2(path: str, flags: int = 0) -> None: if libc.umount2(path.encode(), flags) < 0: oserror("umount2", path) def cap_permitted_to_ambient() -> None: """ When unsharing a user namespace and mapping the current user to itself, the user has a full set of capabilities in the user namespace. This allows the user to do mounts after unsharing a mount namespace for example. However, these capabilities are lost again when the user executes a subprocess. As we also want subprocesses invoked by the user to be able to mount stuff, we make sure the capabilities are inherited by adding all the user's capabilities to the inherited and ambient capabilities set, which makes sure that they are passed down to subprocesses. """ header = cap_user_header_t(LINUX_CAPABILITY_VERSION_3, 0) payload = (cap_user_data_t * LINUX_CAPABILITY_U32S_3)() if libc.capget(ctypes.addressof(header), ctypes.byref(payload)) < 0: oserror("capget") payload[0].inheritable = payload[0].permitted payload[1].inheritable = payload[1].permitted if libc.capset(ctypes.addressof(header), ctypes.byref(payload)) < 0: oserror("capset") effective = payload[1].effective << 32 | payload[0].effective with open("/proc/sys/kernel/cap_last_cap", "rb") as f: last_cap = int(f.read()) libc.prctl.argtypes = (ctypes.c_int, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong) for cap in range(ctypes.sizeof(ctypes.c_uint64) * 8): if cap > last_cap: break if effective & (1 << cap) and libc.prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, cap, 0, 0) < 0: oserror("prctl") def have_effective_cap(capability: int) -> bool: with open("/proc/self/status", "rb") as f: for line in f.readlines(): if line.startswith(b"CapEff:"): return (int(line[7:], 16) & (1 << capability)) != 0 return False def seccomp_suppress_chown() -> None: """ There's still a few files and directories left in distributions in /usr and /etc that are not owned by root. This causes package managers to fail to install the corresponding packages when run from a single uid user namespace. Unfortunately, non-root users can only create files owned by their own uid. To still allow non-root users to build images, if requested we install a seccomp filter that makes calls to chown() and friends a noop. """ libseccomp = ctypes.CDLL("libseccomp.so.2") if libseccomp is None: raise FileNotFoundError("libseccomp.so.2") libseccomp.seccomp_init.argtypes = (ctypes.c_uint32,) libseccomp.seccomp_init.restype = ctypes.c_void_p libseccomp.seccomp_release.argtypes = (ctypes.c_void_p,) libseccomp.seccomp_release.restype = None libseccomp.seccomp_syscall_resolve_name.argtypes = (ctypes.c_char_p,) libseccomp.seccomp_rule_add_exact.argtypes = ( ctypes.c_void_p, ctypes.c_uint32, ctypes.c_int, ctypes.c_uint, ) libseccomp.seccomp_load.argtypes = (ctypes.c_void_p,) seccomp = libseccomp.seccomp_init(SCMP_ACT_ALLOW) try: for syscall in (b"chown", b"chown32", b"fchown", b"fchown32", b"fchownat", b"lchown", b"lchown32"): id = libseccomp.seccomp_syscall_resolve_name(syscall) libseccomp.seccomp_rule_add_exact(seccomp, SCMP_ACT_ERRNO, id, 0) libseccomp.seccomp_load(seccomp) finally: libseccomp.seccomp_release(seccomp) def join_new_session_keyring() -> None: libkeyutils = ctypes.CDLL("libkeyutils.so.1") if libkeyutils is None: raise FileNotFoundError("libkeyutils.so.1") libkeyutils.keyctl_join_session_keyring.argtypes = (ctypes.c_char_p,) libkeyutils.keyctl_join_session_keyring.restype = ctypes.c_int32 keyring = libkeyutils.keyctl_join_session_keyring(None) if keyring == -1: oserror("keyctl") def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: """ When using the old mount syscall to do a recursive bind mount, mount options are not applied recursively. Because we want to do recursive read-only bind mounts in some cases, we use the new mount API for that which does allow recursively changing mount options when doing bind mounts. """ flags = AT_NO_AUTOMOUNT | AT_RECURSIVE | AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE try: libc.open_tree.argtypes = (ctypes.c_int, ctypes.c_char_p, ctypes.c_uint) fd = libc.open_tree(AT_FDCWD, src.encode(), flags) except AttributeError: libc.syscall.argtypes = (ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint) fd = libc.syscall(NR_open_tree, AT_FDCWD, src.encode(), flags) if fd < 0: oserror("open_tree", src) try: attr = mount_attr() attr.attr_set = attrs flags = AT_EMPTY_PATH | AT_RECURSIVE try: libc.mount_setattr.argtypes = ( ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ) r = libc.mount_setattr(fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) except AttributeError: libc.syscall.argtypes = ( ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, ) r = libc.syscall(NR_mount_setattr, fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) if r < 0: oserror("mount_setattr", src) try: libc.move_mount.argtypes = ( ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ) r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) except AttributeError: libc.syscall.argtypes = ( ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ) r = libc.syscall(NR_move_mount, fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) if r < 0: oserror("move_mount", dst) finally: os.close(fd) class umask: def __init__(self, mask: int) -> None: self.mask = mask def __enter__(self) -> None: self.mask = os.umask(self.mask) def __exit__(self, *args: object, **kwargs: object) -> None: os.umask(self.mask) def become_user(uid: int, gid: int) -> None: """ This function implements the required dance to unshare a user namespace and map the current user to itself or to root within it. The kernel only allows a process running outside of the unshared user namespace to write the necessary uid and gid mappings, so we fork off a child process, make it wait until the parent process has unshared a user namespace, and then writes the necessary uid and gid mappings. """ ppid = os.getpid() event = libc.eventfd(0, 0) if event < 0: oserror("eventfd") pid = os.fork() if pid == 0: try: os.read(event, ctypes.sizeof(ctypes.c_uint64)) os.close(event) with open(f"/proc/{ppid}/setgroups", "wb") as f: f.write(b"deny\n") with open(f"/proc/{ppid}/gid_map", "wb") as f: f.write(f"{gid} {os.getgid()} 1\n".encode()) with open(f"/proc/{ppid}/uid_map", "wb") as f: f.write(f"{uid} {os.getuid()} 1\n".encode()) except OSError as e: os._exit(e.errno or 1) except BaseException: os._exit(1) else: os._exit(0) try: unshare(CLONE_NEWUSER) except OSError as e: if e.errno == EPERM: print(UNSHARE_EPERM_MSG, file=sys.stderr) raise finally: os.write(event, ctypes.c_uint64(1)) os.close(event) _, status = os.waitpid(pid, 0) rc = os.waitstatus_to_exitcode(status) if rc != 0: raise OSError(rc, os.strerror(rc)) def acquire_privileges(*, become_root: bool = False) -> bool: if os.getuid() == 0 or (not become_root and have_effective_cap(CAP_SYS_ADMIN)): return False if become_root: become_user(0, 0) else: become_user(os.getuid(), os.getgid()) cap_permitted_to_ambient() return True def userns_has_single_user() -> bool: try: with open("/proc/self/uid_map", "rb") as f: lines = f.readlines() except FileNotFoundError: return False return len(lines) == 1 and int(lines[0].split()[-1]) == 1 def chase(root: str, path: str) -> str: if root == "/": return os.path.realpath(path) cwd = os.getcwd() fd = os.open("/", os.O_CLOEXEC | os.O_PATH | os.O_DIRECTORY) try: os.chroot(root) os.chdir("/") return joinpath(root, os.path.realpath(path)) finally: os.fchdir(fd) os.close(fd) os.chroot(".") os.chdir(cwd) def splitpath(path: str) -> tuple[str, ...]: return tuple(p for p in path.split("/") if p) def joinpath(path: str, *paths: str) -> str: return os.path.join(path, *[p.lstrip("/") for p in paths]) def is_relative_to(one: str, two: str) -> bool: return os.path.commonpath((one, two)) == two def fd_make_cloexec(fd: int) -> None: flags = libc.fcntl(fd, F_GETFD, 0) libc.fcntl(fd, F_SETFD, flags | FD_CLOEXEC) class FSOperation: def __init__(self, dst: str) -> None: self.dst = dst def execute(self, oldroot: str, newroot: str) -> None: raise NotImplementedError() @classmethod def optimize(cls, fsops: list["FSOperation"]) -> list["FSOperation"]: binds: dict[BindOperation, None] = {} rest = [] for fsop in fsops: if isinstance(fsop, BindOperation): binds[fsop] = None else: rest.append(fsop) # Drop all bind mounts that are mounted from beneath another bind mount to the same # location within the new rootfs. optimized = [ m for m in binds if not any( m != n and m.readonly == n.readonly and m.required == n.required and is_relative_to(m.src, n.src) and is_relative_to(m.dst, n.dst) and os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) for n in binds ) ] # Make sure bind mounts override other operations on the same destination by appending them # to the rest and depending on python's stable sort behavior. return sorted([*rest, *optimized], key=lambda fsop: splitpath(fsop.dst)) class BindOperation(FSOperation): def __init__(self, src: str, dst: str, *, readonly: bool, required: bool) -> None: self.src = src self.readonly = readonly self.required = required super().__init__(dst) def __hash__(self) -> int: return hash((splitpath(self.src), splitpath(self.dst), self.readonly, self.required)) def __eq__(self, other: object) -> bool: return isinstance(other, BindOperation) and self.__hash__() == other.__hash__() def execute(self, oldroot: str, newroot: str) -> None: src = chase(oldroot, self.src) if not os.path.exists(src) and not self.required: return # If we're mounting a file on top of a symlink, mount directly on top of the symlink instead of # resolving it. dst = joinpath(newroot, self.dst) if not os.path.isdir(src) and os.path.islink(dst): return mount_rbind(src, dst, attrs=MOUNT_ATTR_RDONLY if self.readonly else 0) dst = chase(newroot, self.dst) if not os.path.exists(dst): isfile = os.path.isfile(src) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) with umask(~0o644 if isfile else ~0o755): if isfile: os.close(os.open(dst, os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) else: os.mkdir(dst) mount_rbind(src, dst, attrs=MOUNT_ATTR_RDONLY if self.readonly else 0) class ProcOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) mount_rbind(joinpath(oldroot, "proc"), dst) class DevOperation(FSOperation): def __init__(self, ttyname: str, dst: str) -> None: self.ttyname = ttyname super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: # We don't put actual devices in /dev, just the API stuff in there that all manner of # things depend on, like /dev/null. dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) # Note that the mode is crucial here. If the default mode (1777) is used, trying to access # /dev/null fails with EACCESS for unknown reasons. mount("tmpfs", dst, "tmpfs", 0, "mode=0755") for node in ("null", "zero", "full", "random", "urandom", "tty", "fuse"): nsrc = joinpath(oldroot, "dev", node) if node == "fuse" and not os.path.exists(nsrc): continue ndst = joinpath(dst, node) os.close(os.open(ndst, os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) mount(nsrc, ndst, "", MS_BIND, "") for i, node in enumerate(("stdin", "stdout", "stderr")): os.symlink(f"/proc/self/fd/{i}", joinpath(dst, node)) os.symlink("/proc/self/fd", joinpath(dst, "fd")) os.symlink("/proc/kcore", joinpath(dst, "core")) with umask(~0o1777): os.mkdir(joinpath(dst, "shm"), mode=0o1777) with umask(~0o755): os.mkdir(joinpath(dst, "pts")) mount("devpts", joinpath(dst, "pts"), "devpts", 0, "newinstance,ptmxmode=0666,mode=620") os.symlink("pts/ptmx", joinpath(dst, "ptmx")) if self.ttyname: os.close(os.open(joinpath(dst, "console"), os.O_CREAT | os.O_CLOEXEC | os.O_EXCL)) mount(joinpath(oldroot, self.ttyname), joinpath(dst, "console"), "", MS_BIND, "") class TmpfsOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(dst, exist_ok=True) options = "" if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else "mode=0755" mount("tmpfs", dst, "tmpfs", 0, options) class DirOperation(FSOperation): def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) mode = 0o1777 if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else 0o755 if not os.path.exists(dst): with umask(~mode): os.mkdir(dst, mode=mode) class SymlinkOperation(FSOperation): def __init__(self, src: str, dst: str) -> None: self.src = src super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: dst = joinpath(newroot, self.dst) try: return os.symlink(self.src, dst) except FileExistsError: if os.path.islink(dst) and os.readlink(dst) == self.src: return if os.path.isdir(dst): raise # If the target already exists and is not a directory, create the symlink somewhere else and mount # it over the existing file or symlink. os.symlink(self.src, "/symlink") mount_rbind("/symlink", dst) os.unlink("/symlink") class WriteOperation(FSOperation): def __init__(self, data: str, dst: str) -> None: self.data = data super().__init__(dst) def execute(self, oldroot: str, newroot: str) -> None: dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) with open(dst, "wb") as f: f.write(self.data.encode()) class OverlayOperation(FSOperation): def __init__(self, lowerdirs: tuple[str, ...], upperdir: str, workdir: str, dst: str) -> None: self.lowerdirs = lowerdirs self.upperdir = upperdir self.workdir = workdir super().__init__(dst) # This supports being used as a context manager so we can reuse the logic for mount_overlay() # in mounts.py. def __enter__(self) -> None: self.execute("/", "/") def __exit__(self, *args: object, **kwargs: object) -> None: umount2(self.dst) def execute(self, oldroot: str, newroot: str) -> None: lowerdirs = tuple(chase(oldroot, p) for p in self.lowerdirs) upperdir = ( chase(oldroot, self.upperdir) if self.upperdir and self.upperdir != "tmpfs" else self.upperdir ) workdir = chase(oldroot, self.workdir) if self.workdir else None dst = chase(newroot, self.dst) with umask(~0o755): os.makedirs(os.path.dirname(dst), exist_ok=True) mode = 0o1777 if any(dst.endswith(suffix) for suffix in ("/tmp", "/var/tmp")) else 0o755 if not os.path.exists(dst): with umask(~mode): os.mkdir(dst, mode=mode) options = [ f"lowerdir={':'.join(lowerdirs)}", "userxattr", # Disable the inodes index and metacopy (only copy metadata upwards if possible) # options. If these are enabled (e.g., if the kernel enables them by default), # the mount will fail if the upper directory has been earlier used with a different # lower directory, such as with a build overlay that was generated on top of a # different temporary root. # See https://www.kernel.org/doc/html/latest/filesystems/overlayfs.html#sharing-and-copying-layers # and https://github.com/systemd/mkosi/issues/1841. "index=off", "metacopy=off", ] if upperdir and upperdir == "tmpfs": mount("tmpfs", dst, "tmpfs", 0, "mode=0755") with umask(~mode): os.mkdir(f"{dst}/upper", mode=mode) with umask(~0o755): os.mkdir(f"{dst}/work") options += [f"upperdir={dst}/upper", f"workdir={dst}/work"] else: if upperdir: options += [f"upperdir={upperdir}"] if workdir: options += [f"workdir={workdir}"] mount("overlayfs", dst, "overlay", 0, ",".join(options)) ANSI_HIGHLIGHT = "\x1b[0;1;39m" if os.isatty(2) else "" ANSI_NORMAL = "\x1b[0m" if os.isatty(2) else "" HELP = f"""\ mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...] {ANSI_HIGHLIGHT}Run the specified command in a custom sandbox.{ANSI_NORMAL} -h --help Show this help --version Show package version --tmpfs DST Mount a new tmpfs on DST --dev DST Mount dev on DST --proc DST Mount procfs on DST --dir DST Create a new directory at DST --bind SRC DST Bind mount the host path SRC to DST --bind-try SRC DST Bind mount the host path SRC to DST if it exists --ro-bind SRC DST Bind mount the host path SRC to DST read-only --ro-bind-try SRC DST Bind mount the host path SRC to DST read-only if it exists --symlink SRC DST Create a symlink at DST pointing to SRC --write DATA DST Write DATA to DST --overlay-lowerdir DIR Add a lower directory for the next overlayfs mount --overlay-upperdir DIR Set the upper directory for the next overlayfs mount --overlay-workdir DIR Set the working directory for the next overlayfs mount --overlay DST Mount an overlay filesystem at DST --unsetenv NAME Unset the environment variable with name NAME --setenv NAME VALUE Set the environment variable with name NAME to VALUE --chdir DIR Change the working directory in the sandbox to DIR --same-dir Change the working directory in the sandbox to $PWD --become-root Map the current user/group to root:root in the sandbox --suppress-chown Make chown() syscalls in the sandbox a noop --unshare-net Unshare the network namespace if possible --unshare-ipc Unshare the IPC namespace if possible --exec-fd FD Close FD before execve() See the mkosi-sandbox(1) man page for details.\ """ UNSHARE_EPERM_MSG = f"""\ {Style.red}mkosi was forbidden to unshare namespaces{Style.reset}. This probably means your distribution has restricted unprivileged user namespaces. Please consult the REQUIREMENTS section of the mkosi man page, e.g. via "mkosi documentation", for workarounds.\ """ def main() -> None: # We don't use argparse as it takes +- 10ms to import and since this is primarily for internal # use, it's not necessary to have amazing UX for this CLI interface so it's trivial to write # ourselves. argv = list(reversed(sys.argv[1:])) fsops: list[FSOperation] = [] setenv = [] unsetenv = [] lowerdirs = [] upperdir = "" workdir = "" chdir = None become_root = suppress_chown = unshare_net = unshare_ipc = False ttyname = os.ttyname(2) if os.isatty(2) else "" while argv: arg = argv.pop() if arg == "--": break if arg in ("-h", "--help"): print(HELP, file=sys.stderr) sys.exit(0) elif arg == "--version": print(__version__, file=sys.stderr) sys.exit(0) if arg == "--tmpfs": fsops.append(TmpfsOperation(argv.pop())) elif arg == "--dev": fsops.append(DevOperation(ttyname, argv.pop())) elif arg == "--proc": fsops.append(ProcOperation(argv.pop())) elif arg == "--dir": fsops.append(DirOperation(argv.pop())) elif arg in ("--bind", "--ro-bind", "--bind-try", "--ro-bind-try"): readonly = arg.startswith("--ro") required = not arg.endswith("-try") fsops.append(BindOperation(argv.pop(), argv.pop(), readonly=readonly, required=required)) elif arg == "--symlink": fsops.append(SymlinkOperation(argv.pop(), argv.pop())) elif arg == "--write": fsops.append(WriteOperation(argv.pop(), argv.pop())) elif arg == "--overlay-lowerdir": lowerdirs.append(argv.pop()) elif arg == "--overlay-upperdir": upperdir = argv.pop() elif arg == "--overlay-workdir": workdir = argv.pop() elif arg == "--overlay": fsops.append(OverlayOperation(tuple(reversed(lowerdirs)), upperdir, workdir, argv.pop())) upperdir = "" workdir = "" lowerdirs = [] elif arg == "--unsetenv": unsetenv.append(argv.pop()) elif arg == "--setenv": setenv.append((argv.pop(), argv.pop())) elif arg == "--chdir": chdir = argv.pop() elif arg == "--same-dir": chdir = os.getcwd() elif arg == "--become-root": become_root = True elif arg == "--suppress-chown": suppress_chown = True elif arg == "--unshare-net": unshare_net = True elif arg == "--unshare-ipc": unshare_ipc = True elif arg == "--exec-fd": fd_make_cloexec(int(argv.pop())) elif arg.startswith("-"): raise ValueError(f"Unrecognized option {arg}") else: argv.append(arg) break argv.reverse() argv = argv or ["bash"] # Make sure all destination paths are absolute. for fsop in fsops: if fsop.dst[0] != "/": raise ValueError(f"{fsop.dst} is not an absolute path") fsops = FSOperation.optimize(fsops) for k, v in setenv: os.environ[k] = v for e in unsetenv: if e in os.environ: del os.environ[e] # If $LISTEN_FDS is in the environment, let's automatically set $LISTEN_PID to the correct pid as well. if "LISTEN_FDS" in os.environ: os.environ["LISTEN_PID"] = str(os.getpid()) namespaces = CLONE_NEWNS if unshare_net and have_effective_cap(CAP_NET_ADMIN): namespaces |= CLONE_NEWNET if unshare_ipc: namespaces |= CLONE_NEWIPC userns = acquire_privileges(become_root=become_root) # If we're root in a user namespace with a single user, we're still not going to be able to # chown() stuff, so check for that and apply the seccomp filter as well in that case. if suppress_chown and (userns or userns_has_single_user()): seccomp_suppress_chown() try: unshare(namespaces) except OSError as e: # This can happen here as well as in become_user, it depends on exactly # how the userns restrictions are implemented. if e.errno == EPERM: print(UNSHARE_EPERM_MSG, file=sys.stderr) raise # If we unshared the user namespace the mount propagation of root is changed to slave automatically. if not userns: mount("", "/", "", MS_SLAVE | MS_REC, "") # We need a workspace to setup the sandbox, the easiest way to do this in a tmpfs, since it's # automatically cleaned up. We need a mountpoint to put the workspace on and it can't be root, # so let's use /tmp which is almost guaranteed to exist. mount("tmpfs", "/tmp", "tmpfs", 0, "") os.chdir("/tmp") with umask(~0o755): # This is where we set up the sandbox rootfs os.mkdir("newroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. os.mkdir("oldroot") # Make sure that newroot is a mountpoint. mount("newroot", "newroot", "", MS_BIND | MS_REC, "") # Make the workspace in /tmp / and put the old rootfs in oldroot. if libc.pivot_root(b".", b"oldroot") < 0: # pivot_root() can fail in the initramfs since / isn't a mountpoint there, so let's fall # back to MS_MOVE if that's the case. # First we move the old rootfs to oldroot. mount("/", "oldroot", "", MS_BIND | MS_REC, "") # Then we move the workspace (/tmp) to /. mount(".", "/", "", MS_MOVE, "") # chroot and chdir to fully make the workspace the new root. os.chroot(".") os.chdir(".") # When we use MS_MOVE we have to unmount oldroot/tmp manually to reveal the original /tmp # again as it might contain stuff that we want to mount into the sandbox. umount2("oldroot/tmp", MNT_DETACH) for fsop in fsops: fsop.execute("oldroot", "newroot") # Now that we're done setting up the sandbox let's pivot root into newroot to make it the new # root. We use the pivot_root(".", ".") process described in the pivot_root() man page. os.chdir("newroot") # We're guaranteed to have / be a mount when we get here, so pivot_root() won't fail anymore, # even if we're in the initramfs. if libc.pivot_root(b".", b".") < 0: oserror("pivot_root") # As documented in the pivot_root() man page, this will unmount the old rootfs. umount2(".", MNT_DETACH) # Avoid surprises by making sure the sandbox's mount propagation is shared. This doesn't # actually mean mounts get propagated into the host. Instead, a new mount propagation peer # group is set up. mount("", ".", "", MS_SHARED | MS_REC, "") if chdir: os.chdir(chdir) try: os.execvp(argv[0], argv) except OSError as e: # Let's return a recognizable error when the binary we're going to execute is not found. # We use 127 as that's the exit code used by shells when a program to execute is not found. if e.errno == ENOENT: sys.exit(127) raise if __name__ == "__main__": main() mkosi-25.3/mkosi/sysupdate.py000066400000000000000000000056401474711424400163170ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import subprocess import sys import tempfile from pathlib import Path from mkosi.config import Args, ArtifactOutput, Config from mkosi.log import die from mkosi.run import run from mkosi.user import become_root_cmd from mkosi.util import PathString def run_sysupdate(args: Args, config: Config) -> None: if ArtifactOutput.partitions not in config.split_artifacts: die("SplitArtifacts=partitions must be set to be able to use mkosi sysupdate") if not config.sysupdate_dir: die( "No sysupdate definitions directory specified", hint="Specify a directory containing systemd-sysupdate transfer definitions with " "SysupdateDirectory=", ) if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")): die("Could not find systemd-sysupdate") with tempfile.TemporaryDirectory() as tmp: if config.tools() != Path("/"): # We explicitly run this without a sandbox, because / has to be the original root mountpoint for # bootctl --print-root-device to work properly. blockdev = run(["bootctl", "--print-root-device"], stdout=subprocess.PIPE).stdout.strip() # If /run/systemd/volatile-root exists, systemd skips its root block device detection logic and # uses whatever block device /run/systemd/volatile-root points to instead. Let's make use of that # when using a tools tree as in that case the block device detection logic doesn't work properly. (Path(tmp) / "volatile-root").symlink_to(blockdev) cmd: list[PathString] = [ sysupdate, "--definitions", config.sysupdate_dir, "--transfer-source", config.output_dir_or_cwd(), *args.cmdline, ] # fmt: skip run( cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( devices=True, network=True, relaxed=True, setup=become_root_cmd(), options=[ *(["--bind", "/boot", "/boot"] if Path("/boot").exists() else []), *(["--bind", "/efi", "/efi"] if Path("/efi").exists() else []), *( [ # Make sure systemd-sysupdate parses os-release from the host and not the tools # tree. "--bind", "/usr/lib/os-release", "/usr/lib/os-release", "--bind", tmp, "/run/systemd", ] if config.tools() != Path("/") else [] ), "--same-dir", ], ), ) # fmt: skip mkosi-25.3/mkosi/tree.py000066400000000000000000000144171474711424400152370ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import errno import logging import shutil import subprocess import tempfile from collections.abc import Iterator from pathlib import Path from mkosi.config import ConfigFeature from mkosi.log import ARG_DEBUG, die from mkosi.run import SandboxProtocol, nosandbox, run, workdir from mkosi.sandbox import BTRFS_SUPER_MAGIC, statfs from mkosi.util import PathString, flatten from mkosi.versioncomp import GenericVersion def is_subvolume(path: Path) -> bool: return path.is_dir() and path.stat().st_ino == 256 and statfs(str(path)) == BTRFS_SUPER_MAGIC def cp_version(*, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: return GenericVersion( run( ["cp", "--version"], sandbox=sandbox(), stdout=subprocess.PIPE, ) .stdout.splitlines()[0] .split()[3] ) def make_tree( path: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: path = path.absolute() if statfs(str(path.parent)) != BTRFS_SUPER_MAGIC: if use_subvolumes == ConfigFeature.enabled: die(f"Subvolumes requested but {path} is not located on a btrfs filesystem") path.mkdir() return path if use_subvolumes != ConfigFeature.disabled: result = run( ["btrfs", "subvolume", "create", workdir(path, sandbox)], sandbox=sandbox(options=["--bind", path.parent, workdir(path.parent, sandbox)]), check=use_subvolumes == ConfigFeature.enabled, ).returncode else: result = 1 if result != 0: path.mkdir() return path @contextlib.contextmanager def preserve_target_directories_stat(src: Path, dst: Path) -> Iterator[None]: dirs = [p for d in src.glob("**/") if (dst / (p := d.relative_to(src))).exists()] with tempfile.TemporaryDirectory() as tmp: for d in dirs: (tmp / d).mkdir(exist_ok=True) shutil.copystat(dst / d, tmp / d) yield for d in dirs: shutil.copystat(tmp / d, dst / d) def copy_tree( src: Path, dst: Path, *, preserve: bool = True, dereference: bool = False, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() options: list[PathString] = [ "--ro-bind", src, workdir(src, sandbox), "--bind", dst.parent, workdir(dst.parent, sandbox), ] # fmt: skip def copy() -> None: cmdline: list[PathString] = [ "cp", "--recursive", "--dereference" if dereference else "--no-dereference", f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}", "--reflink=auto", "--copy-contents", workdir(src, sandbox), workdir(dst, sandbox), ] if dst.exists() and dst.is_dir() and any(dst.iterdir()) and cp_version(sandbox=sandbox) >= "9.5": cmdline += ["--keep-directory-symlink"] # If the source and destination are both directories, we want to merge the source directory with the # destination directory. If the source if a file and the destination is a directory, we want to copy # the source inside the directory. if src.is_dir(): cmdline += ["--no-target-directory"] run(cmdline, sandbox=sandbox(options=options)) # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( use_subvolumes == ConfigFeature.disabled or not preserve or not is_subvolume(src) or (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) ): with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst # btrfs can't snapshot to an existing directory so make sure the destination does not exist. if dst.exists(): dst.rmdir() result = run( ["btrfs", "subvolume", "snapshot", workdir(src, sandbox), workdir(dst, sandbox)], check=use_subvolumes == ConfigFeature.enabled, sandbox=sandbox(options=options), ).returncode if result != 0: with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: if not paths: return paths = tuple(p.absolute() for p in paths) if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p)}): # Silence and ignore failures since when not running as root, this will fail with a permission error # unless the btrfs filesystem is mounted with user_subvol_rm_allowed. run( ["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], check=False, sandbox=sandbox( options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes), ), stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None, ) filtered = sorted({p for p in paths if p.exists() or p.is_symlink()}) if filtered: run( ["rm", "-rf", "--", *(workdir(p, sandbox) for p in filtered)], sandbox=sandbox( options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in filtered), ), ) def move_tree( src: Path, dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() if src == dst: return dst if dst.is_dir(): dst = dst / src.name try: src.rename(dst) except OSError as e: if e.errno != errno.EXDEV: raise e logging.info( f"Could not rename {src} to {dst} as they are located on different devices, " "falling back to copying" ) copy_tree(src, dst, use_subvolumes=use_subvolumes, sandbox=sandbox) rmtree(src, sandbox=sandbox) return dst mkosi-25.3/mkosi/user.py000066400000000000000000000143411474711424400152520ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import fcntl import os import pwd import tempfile from pathlib import Path from mkosi.log import die from mkosi.run import find_binary, spawn from mkosi.sandbox import CLONE_NEWUSER, unshare from mkosi.util import flock, parents_below SUBRANGE = 65536 class INVOKING_USER: @classmethod def name(cls) -> str: try: return pwd.getpwuid(os.getuid()).pw_name except KeyError: if os.getuid() == 0: return "root" if not (user := os.getenv("USER")): die(f"Could not find user name for UID {os.getuid()}") return user @classmethod def home(cls) -> Path: if os.getuid() == 0 and Path.cwd().is_relative_to("/home") and len(Path.cwd().parents) > 2: return list(Path.cwd().parents)[-3] try: return Path(pwd.getpwuid(os.getuid()).pw_dir or "/") except KeyError: if not (home := os.getenv("HOME")): die(f"Could not find home directory for UID {os.getuid()}") return Path(home) @classmethod def is_regular_user(cls, uid: int) -> bool: return uid >= 1000 @classmethod def cache_dir(cls) -> Path: if (env := os.getenv("XDG_CACHE_HOME")) or (env := os.getenv("CACHE_DIRECTORY")): cache = Path(env) elif cls.is_regular_user(os.getuid()) and cls.home() != Path("/"): cache = cls.home() / ".cache" elif os.getuid() == 0 and Path.cwd().is_relative_to("/root") and "XDG_SESSION_ID" in os.environ: cache = Path("/root/.cache") else: cache = Path("/var/cache") return cache / "mkosi" @classmethod def runtime_dir(cls) -> Path: if (env := os.getenv("XDG_RUNTIME_DIR")) or (env := os.getenv("RUNTIME_DIRECTORY")): d = Path(env) elif cls.is_regular_user(os.getuid()): d = Path(f"/run/user/{os.getuid()}") else: d = Path("/run") return d / "mkosi" @classmethod def chown(cls, path: Path) -> None: # If we created a file/directory in a parent directory owned by a regular user, make sure the path # and any parent directories are owned by the invoking user as well. if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None): st = q.stat() os.chown(path, st.st_uid, st.st_gid) for parent in parents_below(path, q): os.chown(parent, st.st_uid, st.st_gid) def read_subrange(path: Path) -> int: if not path.exists(): die(f"{path} does not exist, cannot allocate subuid/subgid user namespace") uid = str(os.getuid()) try: user = pwd.getpwuid(os.getuid()).pw_name except KeyError: user = None for line in path.read_text().splitlines(): name, start, count = line.split(":") if name == uid or name == user: break else: die(f"No mapping found for {user or uid} in {path}") if int(count) < SUBRANGE: die( f"subuid/subgid range length must be at least {SUBRANGE}, " f"got {count} for {user or uid} from line '{line}'" ) return int(start) def become_root_in_subuid_range() -> None: """ Set up a new user namespace mapping using /etc/subuid and /etc/subgid. The current user is mapped to root and the current process becomes the root user in the new user namespace. The other IDs will be mapped through. """ if os.getuid() == 0: return subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) pid = os.getpid() with tempfile.NamedTemporaryFile(prefix="mkosi-uidmap-lock-") as lockfile: lock = Path(lockfile.name) # We map the private UID range configured in /etc/subuid and /etc/subgid into the user namespace # using newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi # to root so that we can access files and directories from the current user from within the user # namespace. newuidmap = [ "flock", "--exclusive", "--close", lock, "newuidmap", pid, 0, os.getuid(), 1, 1, subuid + 1, SUBRANGE - 1, ] # fmt: skip newgidmap = [ "flock", "--exclusive", "--close", lock, "newgidmap", pid, 0, os.getgid(), 1, 1, subgid + 1, SUBRANGE - 1, ] # fmt: skip # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid # mapping to the process in the user namespace. The mapping can only be assigned after the user # namespace has been unshared. To make this work, we first lock a temporary file, then spawn the # newuidmap and newgidmap processes, which we execute using flock so they don't execute before they # can get a lock on the same temporary file, then we unshare the user namespace and finally we unlock # the temporary file, which allows the newuidmap and newgidmap processes to execute. we then wait for # the processes to finish before continuing. with ( flock(lock) as fd, spawn([str(x) for x in newuidmap]) as uidmap, spawn([str(x) for x in newgidmap]) as gidmap, ): unshare(CLONE_NEWUSER) fcntl.flock(fd, fcntl.LOCK_UN) uidmap.wait() gidmap.wait() os.setresuid(0, 0, 0) os.setresgid(0, 0, 0) os.setgroups([0]) def become_root_in_subuid_range_cmd() -> list[str]: if os.getuid() == 0: return [] subuid = read_subrange(Path("/etc/subuid")) subgid = read_subrange(Path("/etc/subgid")) cmd = [ "unshare", "--setuid", "0", "--setgid", "0", "--map-users", f"0:{os.getuid()}:1", "--map-users", f"1:{subuid + 1}:{SUBRANGE - 1}", "--map-groups", f"0:{os.getgid()}:1", "--map-groups", f"1:{subgid + 1}:{SUBRANGE - 1}", "--keep-caps", ] # fmt: skip return [str(x) for x in cmd] def become_root_cmd() -> list[str]: if os.getuid() == 0: return [] return ["run0"] if find_binary("run0") else ["sudo"] mkosi-25.3/mkosi/util.py000066400000000000000000000172501474711424400152530ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import ast import contextlib import copy import enum import errno import fcntl import functools import hashlib import importlib import importlib.resources import itertools import logging import os import re import resource import stat import tempfile from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence from pathlib import Path from types import ModuleType from typing import IO, Any, Callable, Optional, Protocol, TypeVar, Union from mkosi.backport import as_file from mkosi.log import die T = TypeVar("T") V = TypeVar("V") S = TypeVar("S", bound=Hashable) # Borrowed from https://github.com/python/typeshed/blob/3d14016085aed8bcf0cf67e9e5a70790ce1ad8ea/stdlib/3/subprocess.pyi#L24 _FILE = Union[None, int, IO[Any]] PathString = Union[Path, str] # Borrowed from # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L19 # and # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... def dictify(f: Callable[..., Iterator[tuple[T, V]]]) -> Callable[..., dict[T, V]]: def wrapper(*args: Any, **kwargs: Any) -> dict[T, V]: return dict(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def tuplify(f: Callable[..., Iterable[T]]) -> Callable[..., tuple[T, ...]]: def wrapper(*args: Any, **kwargs: Any) -> tuple[T, ...]: return tuple(f(*args, **kwargs)) return functools.update_wrapper(wrapper, f) def one_zero(b: bool) -> str: return "1" if b else "0" def is_power_of_2(x: int) -> bool: return x > 0 and (x & x - 1 == 0) def round_up(x: int, blocksize: int = 4096) -> int: return (x + blocksize - 1) // blocksize * blocksize def startswith(s: str, prefix: str) -> Optional[str]: if s.startswith(prefix): return s.removeprefix(prefix) return None @dictify def read_env_file(path: PathString) -> Iterator[tuple[str, str]]: with Path(path).open() as f: for line_number, line in enumerate(f, start=1): line = line.rstrip() if not line or line.startswith("#"): continue if m := re.match(r"([A-Z][A-Z_0-9]+)=(.*)", line): name, val = m.groups() if val and val[0] in "\"'": val = ast.literal_eval(val) yield name, val else: logging.info(f"{path}:{line_number}: bad line {line!r}") def format_rlimit(rlimit: int) -> str: limits = resource.getrlimit(rlimit) soft = "infinity" if limits[0] == resource.RLIM_INFINITY else str(limits[0]) hard = "infinity" if limits[1] == resource.RLIM_INFINITY else str(limits[1]) return f"{soft}:{hard}" def sort_packages(packages: Iterable[str]) -> list[str]: """Sorts packages: normal first, paths second, conditional third""" m = {"(": 2, "/": 1} return sorted(packages, key=lambda name: (m.get(name[0], 0), name)) def flatten(lists: Iterable[Iterable[T]]) -> list[T]: """Flatten a sequence of sequences into a single list.""" return list(itertools.chain.from_iterable(lists)) @contextlib.contextmanager def chdir(directory: PathString) -> Iterator[None]: old = Path.cwd() if old == directory: yield return try: os.chdir(directory) yield finally: os.chdir(old) def make_executable(*paths: Path) -> None: for path in paths: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) @contextlib.contextmanager def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) logging.debug(f"Acquiring lock on {path}") fcntl.flock(fd, flags) logging.debug(f"Acquired lock on {path}") yield fd finally: os.close(fd) @contextlib.contextmanager def flock_or_die(path: Path) -> Iterator[Path]: try: with flock(path, fcntl.LOCK_EX | fcntl.LOCK_NB): yield path except OSError as e: if e.errno != errno.EWOULDBLOCK: raise e die( f"Cannot lock {path} as it is locked by another process", hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting " "multiple instances of the same image", ) @contextlib.contextmanager def scopedenv(env: Mapping[str, Any]) -> Iterator[None]: old = copy.deepcopy(os.environ) os.environ |= env # python caches the default temporary directory so when we might modify TMPDIR we have to make sure it # gets recalculated (see https://docs.python.org/3/library/tempfile.html#tempfile.tempdir). tempfile.tempdir = None try: yield finally: os.environ = old tempfile.tempdir = None class StrEnum(enum.Enum): def __str__(self) -> str: assert isinstance(self.value, str) return self.value # Used by enum.auto() to get the next value. @staticmethod def _generate_next_value_(name: str, start: int, count: int, last_values: Sequence[str]) -> str: return name.replace("_", "-") @classmethod def values(cls) -> list[str]: return list(s.replace("_", "-") for s in map(str, cls.__members__)) @classmethod def choices(cls) -> list[str]: return [*cls.values(), ""] def parents_below(path: Path, below: Path) -> list[Path]: parents = list(path.parents) return parents[: parents.index(below)] @contextlib.contextmanager def resource_path(mod: ModuleType) -> Iterator[Path]: t = importlib.resources.files(mod) with as_file(t) as p: # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking # user so that any commands executed as the invoking user can access files within it. if ( p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700 ): p.parent.chmod(0o755) yield p def hash_file(path: Path) -> str: # TODO Replace with hashlib.file_digest after dropping support for Python 3.10. h = hashlib.sha256() b = bytearray(16 * 1024**2) mv = memoryview(b) with path.open("rb", buffering=0) as f: while n := f.readinto(mv): h.update(mv[:n]) return h.hexdigest() def try_or(fn: Callable[..., T], exception: type[Exception], default: T) -> T: try: return fn() except exception: return default def groupby(seq: Sequence[T], key: Callable[[T], S]) -> list[tuple[S, list[T]]]: grouped: dict[S, list[T]] = {} for i in seq: k = key(i) if k not in grouped: grouped[k] = [] grouped[k].append(i) return [(key, group) for key, group in grouped.items()] def current_home_dir() -> Optional[Path]: home = Path(h) if (h := os.getenv("HOME")) else None if Path.cwd() in (Path("/"), Path("/home")): return home if Path.cwd().is_relative_to("/root"): return Path("/root") if Path.cwd().is_relative_to("/home"): # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards. # TODO: Remove list() when we depend on Python 3.10 or newer. return list(Path.cwd().parents)[-3] return home def unique(seq: Sequence[T]) -> list[T]: return list(dict.fromkeys(seq)) def mandatory_variable(name: str) -> str: try: return os.environ[name] except KeyError: die(f"${name} must be set in the environment") mkosi-25.3/mkosi/versioncomp.py000066400000000000000000000155631474711424400166470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import functools import itertools import string from typing import Final @functools.total_ordering class GenericVersion: # These constants follow the convention of the return value of rpmdev-vercmp that are followed # by systemd-analyze compare-versions when called with only two arguments (without a comparison # operator), recreated in the compare_versions method. _EQUAL: Final[int] = 0 _RIGHT_SMALLER: Final[int] = 1 _LEFT_SMALLER: Final[int] = -1 def __init__(self, version: str): self._version = version @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): if c in valid_version_chars: return s[i:] return "" def digit_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.digits, s)) def letter_prefix(s: str) -> str: return "".join(itertools.takewhile(lambda c: c in string.ascii_letters, s)) while True: # Any characters which are outside of the set of listed above (a-z, A-Z, 0-9, -, ., ~, # ^) are skipped in both strings. In particular, this means that non-ASCII characters # that are Unicode digits or letters are skipped too. v1 = rstrip_invalid_version_chars(v1) v2 = rstrip_invalid_version_chars(v2) # If the remaining part of one of strings starts with "~": if other remaining part does # not start with ~, the string with ~ compares lower. Otherwise, both tilde characters # are skipped. if v1.startswith("~") and v2.startswith("~"): v1 = v1.removeprefix("~") v2 = v2.removeprefix("~") elif v1.startswith("~"): return cls._LEFT_SMALLER elif v2.startswith("~"): return cls._RIGHT_SMALLER # If one of the strings has ended: if the other string hasn’t, the string that has # remaining characters compares higher. Otherwise, the strings compare equal. if not v1 and not v2: return cls._EQUAL elif not v1 and v2: return cls._LEFT_SMALLER elif v1 and not v2: return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "-": if the other remaining part # does not start with -, the string with - compares lower. Otherwise, both minus # characters are skipped. if v1.startswith("-") and v2.startswith("-"): v1 = v1.removeprefix("-") v2 = v2.removeprefix("-") elif v1.startswith("-"): return cls._LEFT_SMALLER elif v2.startswith("-"): return cls._RIGHT_SMALLER # If the remaining part of one of strings starts with "^": if the other remaining part # does not start with ^, the string with ^ compares higher. Otherwise, both caret # characters are skipped. if v1.startswith("^") and v2.startswith("^"): v1 = v1.removeprefix("^") v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? return cls._LEFT_SMALLER # cls._RIGHT_SMALLER elif v2.startswith("^"): return cls._RIGHT_SMALLER # cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot # characters are skipped. if v1.startswith(".") and v2.startswith("."): v1 = v1.removeprefix(".") v2 = v2.removeprefix(".") elif v1.startswith("."): return cls._LEFT_SMALLER elif v2.startswith("."): return cls._RIGHT_SMALLER # If either of the remaining parts starts with a digit: numerical prefixes are compared # numerically. Any leading zeroes are skipped. The numerical prefixes (until the first # non-digit character) are evaluated as numbers. If one of the prefixes is empty, it # evaluates as 0. If the numbers are different, the string with the bigger number # compares higher. Otherwise, the comparison continues at the following characters at # point 1. v1_digit_prefix = digit_prefix(v1) v2_digit_prefix = digit_prefix(v2) if v1_digit_prefix or v2_digit_prefix: v1_digits = int(v1_digit_prefix) if v1_digit_prefix else 0 v2_digits = int(v2_digit_prefix) if v2_digit_prefix else 0 if v1_digits < v2_digits: return cls._LEFT_SMALLER elif v1_digits > v2_digits: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_digit_prefix) v2 = v2.removeprefix(v2_digit_prefix) continue # Leading alphabetical prefixes are compared alphabetically. The substrings are # compared letter-by-letter. If both letters are the same, the comparison continues # with the next letter. Capital letters compare lower than lower-case letters (A < # a). When the end of one substring has been reached (a non-letter character or the end # of the whole string), if the other substring has remaining letters, it compares # higher. Otherwise, the comparison continues at the following characters at point 1. v1_letter_prefix = letter_prefix(v1) v2_letter_prefix = letter_prefix(v2) if v1_letter_prefix < v2_letter_prefix: return cls._LEFT_SMALLER elif v1_letter_prefix > v2_letter_prefix: return cls._RIGHT_SMALLER v1 = v1.removeprefix(v1_letter_prefix) v2 = v2.removeprefix(v2_letter_prefix) def __eq__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._EQUAL def __lt__(self, other: object) -> bool: if isinstance(other, (str, int)): other = GenericVersion(str(other)) elif not isinstance(other, GenericVersion): return False return self.compare_versions(self._version, other._version) == self._LEFT_SMALLER def __str__(self) -> str: return self._version mkosi-25.3/mkosi/vmspawn.py000066400000000000000000000075021474711424400157700ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import sys from pathlib import Path from mkosi.config import ( Args, Config, Firmware, Network, OutputFormat, yes_no, ) from mkosi.log import die from mkosi.qemu import ( apply_runtime_size, copy_ephemeral, finalize_credentials, finalize_firmware, finalize_kernel_command_line_extra, finalize_register, ) from mkosi.run import run from mkosi.util import PathString, current_home_dir def run_vmspawn(args: Args, config: Config) -> None: if config.output_format not in (OutputFormat.disk, OutputFormat.esp, OutputFormat.directory): die(f"{config.output_format} images cannot be booted in systemd-vmspawn") if config.firmware == Firmware.bios: die("systemd-vmspawn cannot boot BIOS firmware images") if config.cdrom: die("systemd-vmspawn does not support CD-ROM images") if config.firmware_variables and config.firmware_variables != Path("microsoft"): die("mkosi vmspawn does not support FirmwareVariables=") kernel = config.linux firmware = finalize_firmware(config, kernel) if not kernel and firmware == Firmware.linux: kernel = config.output_dir_or_cwd() / config.output_split_kernel if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", hint="Please install a kernel in the image or provide a --linux argument to mkosi vmspawn", ) cmdline: list[PathString] = [ "systemd-vmspawn", "--cpus", str(config.cpus or os.cpu_count()), "--ram", str(config.ram), "--kvm", config.kvm.to_tristate(), "--vsock", config.vsock.to_tristate(), "--tpm", config.tpm.to_tristate(), "--secure-boot", yes_no(config.secure_boot), "--register", yes_no(finalize_register(config)), "--console", str(config.console), ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["--network-user-mode"] elif config.runtime_network == Network.interface: cmdline += ["--network-tap"] cmdline += [f"--set-credential={k}:{v}" for k, v in finalize_credentials(config).items()] with contextlib.ExitStack() as stack: fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) apply_runtime_size(config, fname) if config.runtime_build_sources: for t in config.build_sources: src, dst = t.with_prefix("/work/src") cmdline += ["--bind", f"{src}:{dst}"] if config.build_dir: cmdline += ["--bind", f"{config.build_dir}:/work/build"] for tree in config.runtime_trees: target = Path("/root/src") / (tree.target or "") cmdline += ["--bind", f"{tree.source}:{target}"] if config.runtime_home and (p := current_home_dir()): cmdline += ["--bind", f"{p}:/root"] if kernel: cmdline += ["--linux", kernel] if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] owner = os.stat(fname).st_uid if owner != 0: cmdline += [f"--private-users={str(owner)}"] else: cmdline += ["--image", fname] if config.forward_journal: cmdline += ["--forward-journal", config.forward_journal] cmdline += [*args.cmdline, *finalize_kernel_command_line_extra(config)] run( cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ | config.environment, log=False, sandbox=config.sandbox( network=True, devices=True, relaxed=True, options=["--same-dir"], ), ) mkosi-25.3/pyproject.toml000066400000000000000000000035141474711424400155140ustar00rootroot00000000000000[build-system] requires = ["setuptools", "setuptools-scm"] build-backend = "setuptools.build_meta" [project] name = "mkosi" authors = [ {name = "mkosi contributors", email = "systemd-devel@lists.freedesktop.org"}, ] version = "25.3" description = "Build Bespoke OS Images" readme = "README.md" requires-python = ">=3.9" license = {file = "LICENSE"} [project.optional-dependencies] bootable = [ "pefile >= 2021.9.3", ] [project.scripts] mkosi = "mkosi.__main__:main" mkosi-initrd = "mkosi.initrd:main" mkosi-sandbox = "mkosi.sandbox:main" mkosi-addon = "mkosi.addon:main" [tool.setuptools] packages = [ "mkosi", "mkosi.distributions", "mkosi.installer", "mkosi.resources", ] [tool.setuptools.package-data] "mkosi.resources" = [ "completion.*", "man/*", "mkosi-addon/**/*", "mkosi-initrd/**/*", "mkosi-tools/**/*", "mkosi-vm/**/*", "repart/**/*", ] [tool.isort] profile = "black" include_trailing_comma = true multi_line_output = 3 py_version = "39" [tool.pyright] pythonVersion = "3.9" [tool.mypy] python_version = 3.9 # belonging to --strict warn_unused_configs = true disallow_any_generics = true disallow_subclassing_any = true disallow_untyped_calls = true disallow_untyped_defs = true disallow_untyped_decorators = true disallow_incomplete_defs = true check_untyped_defs = true no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = false warn_return_any = true no_implicit_reexport = true # extra options not in --strict pretty = true show_error_codes = true show_column_numbers = true warn_unreachable = true strict_equality = true scripts_are_modules = true [tool.ruff] target-version = "py39" line-length = 109 lint.select = ["E", "F", "I", "UP"] [tool.pytest.ini_options] markers = [ "integration: mark a test as an integration test." ] addopts = "-m \"not integration\"" mkosi-25.3/tests/000077500000000000000000000000001474711424400137375ustar00rootroot00000000000000mkosi-25.3/tests/.gitignore000066400000000000000000000000071474711424400157240ustar00rootroot00000000000000/*.pyc mkosi-25.3/tests/__init__.py000066400000000000000000000132551474711424400160560ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import dataclasses import os import subprocess import sys import uuid from collections.abc import Iterator, Mapping, Sequence from pathlib import Path from types import TracebackType from typing import Any, Optional import pytest from mkosi.distributions import Distribution from mkosi.run import CompletedProcess, fork_and_wait, run from mkosi.sandbox import acquire_privileges from mkosi.tree import rmtree from mkosi.user import INVOKING_USER from mkosi.util import _FILE, PathString @dataclasses.dataclass(frozen=True) class ImageConfig: distribution: Distribution release: str debug_shell: bool tools: Optional[Path] class Image: def __init__(self, config: ImageConfig) -> None: self.config = config def __enter__(self) -> "Image": if (cache := INVOKING_USER.cache_dir()) and os.access(cache, os.W_OK): tmpdir = cache else: tmpdir = Path("/var/tmp") self.output_dir = Path(os.getenv("TMPDIR", tmpdir)) / uuid.uuid4().hex[:16] return self def __exit__( self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: def clean() -> None: acquire_privileges() rmtree(self.output_dir) fork_and_wait(clean) def mkosi( self, verb: str, options: Sequence[PathString] = (), args: Sequence[str] = (), stdin: _FILE = None, user: Optional[int] = None, group: Optional[int] = None, check: bool = True, env: Mapping[str, str] = {}, ) -> CompletedProcess: return run( [ "python3", "-m", "mkosi", *(["--tools-tree", os.fspath(self.config.tools)] if self.config.tools else []), "--debug", *options, verb, *args, ], check=check, stdin=stdin, stdout=sys.stdout, user=user, group=group, env=os.environ | env, ) # fmt: skip def build( self, options: Sequence[PathString] = (), args: Sequence[str] = (), env: Mapping[str, str] = {}, ) -> CompletedProcess: kcl = [ "loglevel=6", "systemd.log_level=debug", "udev.log_level=info", "systemd.show_status=false", "systemd.journald.forward_to_console", "systemd.journald.max_level_console=info", "systemd.firstboot=no", "systemd.unit=mkosi-check-and-shutdown.service", ] opt: list[PathString] = [ "--distribution", str(self.config.distribution), "--release", self.config.release, *(f"--kernel-command-line={i}" for i in kcl), "--force", "--incremental=strict", "--output-directory", self.output_dir, *(["--debug-shell"] if self.config.debug_shell else []), *options, ] # fmt: skip self.mkosi("summary", opt, env=env) return self.mkosi( "build", opt, args, stdin=sys.stdin if sys.stdin.isatty() else None, env=env, ) def boot(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "boot", [ "--runtime-build-sources=no", "--ephemeral", "--register=no", *options, ], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def vm(self, options: Sequence[str] = (), args: Sequence[str] = ()) -> CompletedProcess: result = self.mkosi( "vm", [ "--runtime-build-sources=no", "--vsock=yes", # TODO: Drop once both Hyper-V bugs are fixed in Github Actions. "--qemu-args=-cpu max,pcid=off", "--ram=2G", "--ephemeral", "--register=no", *options, ], args, stdin=sys.stdin if sys.stdin.isatty() else None, check=False, ) if result.returncode != 123: raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr) return result def genkey(self) -> CompletedProcess: return self.mkosi("genkey", ["--force"]) @pytest.fixture(scope="session", autouse=True) def suspend_capture_stdin(pytestconfig: Any) -> Iterator[None]: """ When --capture=no (or -s) is specified, pytest will still intercept stdin. Let's explicitly make it not capture stdin when --capture=no is specified so we can debug image boot failures by logging into the emergency shell. """ capmanager: Any = pytestconfig.pluginmanager.getplugin("capturemanager") if pytestconfig.getoption("capture") == "no": capmanager.suspend_global_capture(in_=True) yield if pytestconfig.getoption("capture") == "no": capmanager.resume_global_capture() @contextlib.contextmanager def ci_group(s: str) -> Iterator[None]: github_actions = os.getenv("GITHUB_ACTIONS") if github_actions: print(f"\n::group::{s}", flush=True) try: yield finally: if github_actions: print("\n::endgroup::", flush=True) mkosi-25.3/tests/conftest.py000066400000000000000000000035161474711424400161430ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from collections.abc import Iterator from pathlib import Path from typing import Any, cast import pytest import mkosi.resources from mkosi.config import parse_config from mkosi.distributions import Distribution, detect_distribution from mkosi.log import log_setup from mkosi.util import resource_path from . import ImageConfig, ci_group def pytest_addoption(parser: Any) -> None: parser.addoption( "-D", "--distribution", metavar="DISTRIBUTION", help="Run the integration tests for the given distribution.", default=detect_distribution()[0], type=Distribution, choices=[Distribution(d) for d in Distribution.values()], ) parser.addoption( "-R", "--release", metavar="RELEASE", help="Run the integration tests for the given release.", ) parser.addoption( "--debug-shell", help="Pass --debug-shell when running mkosi", action="store_true", ) @pytest.fixture(scope="session") def config(request: Any) -> ImageConfig: distribution = cast(Distribution, request.config.getoption("--distribution")) with resource_path(mkosi.resources) as resources: release = cast( str, request.config.getoption("--release") or parse_config(["-d", str(distribution)], resources=resources)[1][0].release, ) return ImageConfig( distribution=distribution, release=release, debug_shell=request.config.getoption("--debug-shell"), tools=p if (p := Path("mkosi.tools")).exists() else None, ) @pytest.fixture(autouse=True) def ci_sections(request: Any) -> Iterator[None]: with ci_group(request.node.name): yield @pytest.fixture(scope="session", autouse=True) def logging() -> None: log_setup() mkosi-25.3/tests/test_boot.py000066400000000000000000000061051474711424400163150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import subprocess import pytest from mkosi.config import Bootloader, Firmware, OutputFormat from mkosi.distributions import Distribution from mkosi.qemu import find_virtiofsd from mkosi.run import find_binary, run from mkosi.sandbox import userns_has_single_user from mkosi.versioncomp import GenericVersion from . import Image, ImageConfig pytestmark = pytest.mark.integration def have_vmspawn() -> bool: return find_binary("systemd-vmspawn") is not None and ( GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) @pytest.mark.parametrize("format", [f for f in OutputFormat if not f.is_extension_image()]) def test_format(config: ImageConfig, format: OutputFormat) -> None: with Image(config) as image: if image.config.distribution == Distribution.rhel_ubi and format in ( OutputFormat.esp, OutputFormat.uki, ): pytest.skip("Cannot build RHEL-UBI images with format 'esp' or 'uki'") image.genkey() image.build(options=["--format", str(format)]) if format in (OutputFormat.disk, OutputFormat.directory) and os.getuid() == 0: # systemd-resolved is enabled by default in Arch/Debian/Ubuntu (systemd default preset) but fails # to start in a systemd-nspawn container with --private-users so we mask it out here to avoid CI # failures. # FIXME: Remove when Arch/Debian/Ubuntu ship systemd v253 args = ["systemd.mask=systemd-resolved.service"] if format == OutputFormat.directory else [] image.boot(args=args) if format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp): pytest.skip("Default image is too large to be able to boot in CPIO/UKI/ESP format") if image.config.distribution == Distribution.rhel_ubi: return if format in (OutputFormat.tar, OutputFormat.oci, OutputFormat.none, OutputFormat.portable): return if format == OutputFormat.directory: if not find_virtiofsd(): pytest.skip("virtiofsd is not installed, cannot boot from directory output") if userns_has_single_user(): pytest.skip("Running in user namespace with single user, cannot boot from directory") return image.vm() if have_vmspawn() and format in (OutputFormat.disk, OutputFormat.directory): image.vm(options=["--vmm=vmspawn"]) if format != OutputFormat.disk: return image.vm(["--firmware=bios"]) @pytest.mark.parametrize("bootloader", Bootloader) def test_bootloader(config: ImageConfig, bootloader: Bootloader) -> None: if config.distribution == Distribution.rhel_ubi or bootloader.is_signed(): return firmware = Firmware.linux if bootloader == Bootloader.none else Firmware.auto with Image(config) as image: image.genkey() image.build(["--format=disk", "--bootloader", str(bootloader)]) image.vm(["--firmware", str(firmware)]) mkosi-25.3/tests/test_config.py000066400000000000000000001123631474711424400166230ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import argparse import itertools import logging import operator import os from pathlib import Path import pytest from mkosi import expand_kernel_specifiers from mkosi.config import ( Architecture, ArtifactOutput, Compression, Config, ConfigFeature, ConfigTree, OutputFormat, Verb, config_parse_bytes, parse_config, parse_ini, ) from mkosi.distributions import Distribution, detect_distribution from mkosi.util import chdir def test_compression_enum_creation() -> None: assert Compression["none"] == Compression.none assert Compression["zstd"] == Compression.zstd assert Compression["zst"] == Compression.zstd assert Compression["xz"] == Compression.xz assert Compression["bz2"] == Compression.bz2 assert Compression["gz"] == Compression.gz assert Compression["lz4"] == Compression.lz4 assert Compression["lzma"] == Compression.lzma def test_compression_enum_bool() -> None: assert not bool(Compression.none) assert bool(Compression.zstd) assert bool(Compression.xz) assert bool(Compression.bz2) assert bool(Compression.gz) assert bool(Compression.lz4) assert bool(Compression.lzma) def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" assert str(Compression.zst) == "zstd" assert str(Compression.xz) == "xz" assert str(Compression.bz2) == "bz2" assert str(Compression.gz) == "gz" assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" def test_parse_ini(tmp_path: Path) -> None: p = tmp_path / "ini" p.write_text( """\ [MySection] Value=abc Other=def ALLCAPS=txt # Comment [EmptySection] [AnotherSection] EmptyValue= Multiline=abc def qed ord """ ) g = parse_ini(p) assert next(g) == ("MySection", "Value", "abc") assert next(g) == ("MySection", "Other", "def") assert next(g) == ("MySection", "ALLCAPS", "txt") assert next(g) == ("MySection", "", "") assert next(g) == ("EmptySection", "", "") assert next(g) == ("AnotherSection", "EmptyValue", "") assert next(g) == ("AnotherSection", "Multiline", "abc\ndef\nqed\nord") def test_parse_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Architecture=arm64 Repositories=epel,epel-next [Content] Packages=abc [Build] Environment=MY_KEY=MY_VALUE [Output] Format=cpio ImageId=base [Runtime] Credentials=my.cred=my.value """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.ubuntu assert config.architecture == Architecture.arm64 assert config.packages == ["abc"] assert config.output_format == OutputFormat.cpio assert config.image_id == "base" with chdir(d): _, [config] = parse_config( [ "--distribution", "fedora", "--environment", "MY_KEY=CLI_VALUE", "--credential", "my.cred=cli.value", "--repositories", "universe", ] ) # fmt: skip # Values from the CLI should take priority. assert config.distribution == Distribution.fedora assert config.environment["MY_KEY"] == "CLI_VALUE" assert config.credentials["my.cred"] == "cli.value" assert config.repositories == ["epel", "epel-next", "universe"] with chdir(d): _, [config] = parse_config( [ "--distribution", "", "--environment", "", "--credential", "", "--repositories", "", ] ) # fmt: skip # Empty values on the CLIs resets non-collection based settings to their defaults and collection based # settings to empty collections. assert config.distribution == (detect_distribution()[0] or Distribution.custom) assert "MY_KEY" not in config.environment assert "my.cred" not in config.credentials assert config.repositories == [] (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/d1.conf").write_text( """\ [Distribution] Distribution=debian [Content] Packages=qed def [Output] ImageId=00-dropin ImageVersion=0 @Output=abc """ ) with chdir(d): _, [config] = parse_config(["--package", "last"]) # Setting a value explicitly in a dropin should override the default from mkosi.conf. assert config.distribution == Distribution.debian # Lists should be merged by appending the new values to the existing values. Any values from the CLI # should be appended to the values from the configuration files. assert config.packages == ["abc", "qed", "def", "last"] assert config.output_format == OutputFormat.cpio assert config.image_id == "00-dropin" assert config.image_version == "0" # '@' specifier should be automatically dropped. assert config.output == "abc" (d / "mkosi.version").write_text("1.2.3") (d / "mkosi.conf.d/d2.conf").write_text( """\ [Content] Packages= [Output] ImageId= """ ) with chdir(d): _, [config] = parse_config() # Test that empty assignment resets settings. assert config.packages == [] assert config.image_id is None # mkosi.version should only be used if no version is set explicitly. assert config.image_version == "0" (d / "mkosi.conf.d/d1.conf").unlink() with chdir(d): _, [config] = parse_config() # ImageVersion= is not set explicitly anymore, so now the version from mkosi.version should be used. assert config.image_version == "1.2.3" (d / "abc").mkdir() (d / "abc/mkosi.conf").write_text( """\ [Content] Bootable=yes BuildPackages=abc """ ) (d / "abc/mkosi.conf.d").mkdir() (d / "abc/mkosi.conf.d/abc.conf").write_text( """\ [Output] SplitArtifacts=yes """ ) with chdir(d): _, [config] = parse_config() assert config.bootable == ConfigFeature.auto assert config.split_artifacts == ArtifactOutput.compat_no() # Passing the directory should include both the main config file and the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc")] * 2) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts == ArtifactOutput.compat_yes() # The same extra config should not be parsed more than once. assert config.build_packages == ["abc"] # Passing the main config file should not include the dropin. _, [config] = parse_config(["--include", os.fspath(d / "abc/mkosi.conf")]) assert config.bootable == ConfigFeature.enabled assert config.split_artifacts == ArtifactOutput.compat_no() (d / "mkosi.images").mkdir() (d / "mkosi.images/one.conf").write_text( """\ [Content] Packages=one """ ) (d / "mkosi.images/two").mkdir() (d / "mkosi.images/two/mkosi.skeleton").mkdir() (d / "mkosi.images/two/mkosi.conf").write_text( """ [Content] Packages=two [Output] ImageVersion=4.5.6 """ ) with chdir(d): _, [one, two, config] = parse_config( ["--package", "qed", "--build-package", "def", "--repositories", "cli"] ) # Universal settings should always come from the main image. assert one.distribution == config.distribution assert two.distribution == config.distribution assert one.release == config.release assert two.release == config.release # Non-universal settings should not be passed to the subimages. assert one.packages == ["one"] assert two.packages == ["two"] assert one.build_packages == [] assert two.build_packages == [] # But should apply to the main image of course. assert config.packages == ["qed"] assert config.build_packages == ["def"] # Inherited settings should be passed down to subimages but overridable by subimages. assert one.image_version == "1.2.3" assert two.image_version == "4.5.6" # Default values from subimages for universal settings should not be picked up. assert len(one.sandbox_trees) == 0 assert len(two.sandbox_trees) == 0 with chdir(d): _, [one, two, config] = parse_config(["--image-version", "7.8.9"]) # Inherited settings specified on the CLI should not override subimages that configure the setting # explicitly. assert config.image_version == "7.8.9" assert one.image_version == "7.8.9" assert two.image_version == "4.5.6" def test_parse_includes_once(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Content] Bootable=yes BuildPackages=abc """ ) (d / "abc.conf").write_text( """\ [Content] BuildPackages=def """ ) with chdir(d): _, [config] = parse_config(["--include", "abc.conf", "--include", "abc.conf"]) assert config.build_packages == ["def", "abc"] (d / "mkosi.images").mkdir() for n in ("one", "two"): (d / "mkosi.images" / f"{n}.conf").write_text( """\ [Config] Include=abc.conf """ ) with chdir(d): _, [one, two, config] = parse_config([]) assert one.build_packages == ["def"] assert two.build_packages == ["def"] def test_profiles(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.profiles").mkdir() (d / "mkosi.profiles/profile.conf").write_text( """\ [Distribution] Distribution=fedora [Runtime] KVM=yes """ ) (d / "mkosi.conf").write_text( """\ [Config] Profiles=profile """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Distribution] Distribution=debian """ ) with chdir(d): _, [config] = parse_config() assert config.profiles == ["profile"] # The profile should override mkosi.conf.d/ assert config.distribution == Distribution.fedora assert config.kvm == ConfigFeature.enabled (d / "mkosi.conf").unlink() with chdir(d): _, [config] = parse_config(["--profile", "profile"]) assert config.profiles == ["profile"] # The profile should override mkosi.conf.d/ assert config.distribution == Distribution.fedora assert config.kvm == ConfigFeature.enabled (d / "mkosi.conf").write_text( """\ [Config] Profiles=profile,abc """ ) (d / "mkosi.profiles/abc.conf").write_text( """\ [Match] Profiles=abc [Distribution] Distribution=opensuse """ ) with chdir(d): _, [config] = parse_config() assert config.profiles == ["profile", "abc"] assert config.distribution == Distribution.opensuse # Check that mkosi.profiles/ is parsed in subimages as well. (d / "mkosi.images/subimage/mkosi.profiles").mkdir(parents=True) (d / "mkosi.images/subimage/mkosi.profiles/abc.conf").write_text( """ [Build] Environment=Image=%I """ ) with chdir(d): _, [subimage, config] = parse_config() assert subimage.environment["Image"] == "subimage" def test_override_default(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Build] Environment=MY_KEY=MY_VALUE ToolsTree=default """ ) with chdir(d): _, [config] = parse_config(["--tools-tree", "", "--environment", ""]) assert config.tools_tree is None assert "MY_KEY" not in config.environment def test_local_config(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.local.conf").write_text( """\ [Distribution] Distribution=debian [Content] WithTests=yes """ ) with chdir(d): _, [config] = parse_config() assert config.distribution == Distribution.debian (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=fedora [Content] WithTests=no """ ) with chdir(d): _, [config] = parse_config() # Local config should take precedence over non-local config. assert config.distribution == Distribution.debian assert config.with_tests with chdir(d): _, [config] = parse_config(["--distribution", "fedora", "-T"]) assert config.distribution == Distribution.fedora assert not config.with_tests def test_parse_load_verb(tmp_path: Path) -> None: with chdir(tmp_path): assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["clean"])[0].verb == Verb.clean assert parse_config(["genkey"])[0].verb == Verb.genkey assert parse_config(["bump"])[0].verb == Verb.bump assert parse_config(["serve"])[0].verb == Verb.serve assert parse_config(["build"])[0].verb == Verb.build assert parse_config(["shell"])[0].verb == Verb.shell assert parse_config(["boot"])[0].verb == Verb.boot assert parse_config(["qemu"])[0].verb == Verb.qemu assert parse_config(["vm"])[0].verb == Verb.vm assert parse_config(["journalctl"])[0].verb == Verb.journalctl assert parse_config(["coredumpctl"])[0].verb == Verb.coredumpctl with pytest.raises(SystemExit): parse_config(["invalid"]) def test_os_distribution(tmp_path: Path) -> None: with chdir(tmp_path): for dist in Distribution: _, [config] = parse_config(["-d", dist.value]) assert config.distribution == dist with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d", "invalidDistro"]) with pytest.raises(tuple((argparse.ArgumentError, SystemExit))): parse_config(["-d"]) for dist in Distribution: Path("mkosi.conf").write_text(f"[Distribution]\nDistribution={dist}") _, [config] = parse_config() assert config.distribution == dist def test_parse_config_files_filter(tmp_path: Path) -> None: with chdir(tmp_path): confd = Path("mkosi.conf.d") confd.mkdir() (confd / "10-file.conf").write_text("[Content]\nPackages=yes") (confd / "20-file.noconf").write_text("[Content]\nPackages=nope") _, [config] = parse_config() assert config.packages == ["yes"] def test_compression(tmp_path: Path) -> None: with chdir(tmp_path): _, [config] = parse_config(["--format", "disk", "--compress-output", "False"]) assert config.compress_output == Compression.none def test_match_only(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|directory Format=|disk """ ) Path("mkosi.conf.d").mkdir() Path("mkosi.conf.d/10-abc.conf").write_text( """\ [Output] ImageId=abcde """ ) _, [config] = parse_config(["--format", "tar"]) assert config.image_id != "abcde" def test_match_multiple(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Format=|disk Format=|directory [Match] Architecture=|x86-64 Architecture=|arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # Only a single section is matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "s390x"]) assert config.image_id != "abcde" # Both sections are matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=disk Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # Both sections are not matched, so image ID should not be "abcde". _, [config] = parse_config(["--format", "tar", "--architecture", "s390x"]) assert config.image_id != "abcde" # The first section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "x86-64"]) assert config.image_id == "abcde" # The second section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "directory", "--architecture", "arm64"]) assert config.image_id == "abcde" # Parts of all section are matched, but none is matched fully, so image ID should not be "abcde". _, [config] = parse_config(["--format", "disk", "--architecture", "arm64"]) assert config.image_id != "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # The first section is matched, so image ID should be "abcde". _, [config] = parse_config(["--format", "disk"]) assert config.image_id == "abcde" Path("mkosi.conf").write_text( """\ [TriggerMatch] Format=|disk Format=|directory Architecture=x86-64 [TriggerMatch] Format=directory Architecture=arm64 [Output] ImageId=abcde """ ) # No sections are matched, so image ID should be not "abcde". _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id != "abcde" # Mixing both [Match] and [TriggerMatch] Path("mkosi.conf").write_text( """\ [Match] Format=disk [TriggerMatch] Architecture=arm64 [TriggerMatch] Architecture=x86-64 [Output] ImageId=abcde """ ) # Match and first TriggerMatch sections match _, [config] = parse_config(["--format", "disk", "--architecture=arm64"]) assert config.image_id == "abcde" # Match section matches, but no TriggerMatch section matches _, [config] = parse_config(["--format", "disk", "--architecture=s390x"]) assert config.image_id != "abcde" # Second TriggerMatch section matches, but the Match section does not _, [config] = parse_config(["--format", "tar", "--architecture=x86-64"]) assert config.image_id != "abcde" def test_match_empty(tmp_path: Path) -> None: with chdir(tmp_path): Path("mkosi.conf").write_text( """\ [Match] Profiles= [Build] Environment=ABC=QED """ ) _, [config] = parse_config([]) assert config.environment.get("ABC") == "QED" _, [config] = parse_config(["--profile", "profile"]) assert config.environment.get("ABC") is None @pytest.mark.parametrize( "dist1,dist2", itertools.combinations_with_replacement([Distribution.debian, Distribution.opensuse], 2), ) def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribution) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution={dist1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Distribution={dist1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Distribution={dist2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Distribution=|{dist1} Distribution=|{dist2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if dist1 == dist2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages @pytest.mark.parametrize("release1,release2", itertools.combinations_with_replacement([36, 37], 2)) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora Release={release1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] Release={release1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] Release={release2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] Release=|{release1} Release=|{release2} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if release1 == release2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages def test_match_build_sources(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] BuildSources=kernel BuildSources=/kernel [Output] Output=abc """ ) with chdir(d): _, [config] = parse_config(["--build-sources", ".:kernel"]) assert config.output == "abc" def test_match_repositories(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Repositories=epel [Content] Output=qed """ ) with chdir(d): _, [config] = parse_config(["--repositories", "epel,epel-next"]) assert config.output == "qed" @pytest.mark.parametrize("image1,image2", itertools.combinations_with_replacement(["image_a", "image_b"], 2)) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( f"""\ [Distribution] Distribution=fedora [Output] ImageId={image1} """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageId={image1} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageId={image2} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageId=|{image1} ImageId=|{image2} [Content] Packages=testpkg3 """ ) child4 = Path("mkosi.conf.d/child4.conf") child4.write_text( """\ [Match] ImageId=image* [Content] Packages=testpkg4 """ ) _, [conf] = parse_config() assert "testpkg1" in conf.packages if image1 == image2: assert "testpkg2" in conf.packages else: assert "testpkg2" not in conf.packages assert "testpkg3" in conf.packages assert "testpkg4" in conf.packages @pytest.mark.parametrize( "op,version", itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123], ), ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { "==": operator.eq, "!=": operator.ne, "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, }.get(op, operator.eq) with chdir(tmp_path): parent = Path("mkosi.conf") parent.write_text( """\ [Output] ImageId=testimage ImageVersion=123 """ ) Path("mkosi.conf.d").mkdir() child1 = Path("mkosi.conf.d/child1.conf") child1.write_text( f"""\ [Match] ImageVersion={op}{version} [Content] Packages=testpkg1 """ ) child2 = Path("mkosi.conf.d/child2.conf") child2.write_text( f"""\ [Match] ImageVersion=<200 ImageVersion={op}{version} [Content] Packages=testpkg2 """ ) child3 = Path("mkosi.conf.d/child3.conf") child3.write_text( f"""\ [Match] ImageVersion=>9000 ImageVersion={op}{version} [Content] Packages=testpkg3 """ ) _, [conf] = parse_config() assert ("testpkg1" in conf.packages) == opfunc(123, version) assert ("testpkg2" in conf.packages) == opfunc(123, version) assert "testpkg3" not in conf.packages def test_match_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV=abc [Content] ImageId=matched """ ) with chdir(d): _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYENV=bad"]) assert conf.image_id != "matched" _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" _, [conf] = parse_config(["--environment", "MYEN=bad"]) assert conf.image_id != "matched" (d / "mkosi.conf").write_text( """\ [Match] Environment=MYENV [Content] ImageId=matched """ ) with chdir(d): _, [conf] = parse_config(["--environment", "MYENV=abc"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYENV=bad"]) assert conf.image_id == "matched" _, [conf] = parse_config(["--environment", "MYEN=abc"]) assert conf.image_id != "matched" def test_paths_with_default_factory(tmp_path: Path) -> None: """ If both paths= and default_factory= are defined, default_factory= should not be used when at least one of the files/directories from paths= has been found. """ with chdir(tmp_path): Path("mkosi.sandbox.tar").touch() _, [config] = parse_config() assert config.sandbox_trees == [ ConfigTree(Path.cwd() / "mkosi.sandbox.tar", None), ] @pytest.mark.parametrize( "sections,args,warning_count", [ (["Output"], [], 0), (["Content"], [], 1), (["Content", "Output"], [], 1), (["Output", "Content"], [], 1), (["Output", "Content", "Distribution"], [], 2), (["Content"], ["--image-id=testimage"], 1), ], ) def test_wrong_section_warning( tmp_path: Path, caplog: pytest.LogCaptureFixture, sections: list[str], args: list[str], warning_count: int, ) -> None: with chdir(tmp_path): # Create a config with ImageId in the wrong section, # and sometimes in the correct section Path("mkosi.conf").write_text( "\n".join( f"""\ [{section}] ImageId=testimage """ for section in sections ) ) with caplog.at_level(logging.WARNING): # Parse the config, with --image-id sometimes given on the command line parse_config(args) assert len(caplog.records) == warning_count def test_config_parse_bytes() -> None: assert config_parse_bytes(None) is None assert config_parse_bytes("1") == 4096 assert config_parse_bytes("8000") == 8192 assert config_parse_bytes("8K") == 8192 assert config_parse_bytes("4097") == 8192 assert config_parse_bytes("1M") == 1024**2 assert config_parse_bytes("1.9M") == 1994752 assert config_parse_bytes("1G") == 1024**3 assert config_parse_bytes("7.3G") == 7838318592 with pytest.raises(SystemExit): config_parse_bytes("-1") with pytest.raises(SystemExit): config_parse_bytes("-2K") with pytest.raises(SystemExit): config_parse_bytes("-3M") with pytest.raises(SystemExit): config_parse_bytes("-4G") def test_specifiers(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Distribution] Distribution=ubuntu Release=lunar Architecture=arm64 [Output] ImageId=my-image-id ImageVersion=1.2.3 OutputDirectory=abcde Output=test [Build] Environment=Distribution=%d Release=%r Architecture=%a Image=%I ImageId=%i ImageVersion=%v OutputDirectory=%O Output=%o ConfigRootDirectory=%D ConfigRootConfdir=%C ConfigRootPwd=%P Filesystem=%F """ ) (d / "mkosi.conf.d").mkdir() (d / "mkosi.conf.d/abc.conf").write_text( """\ [Build] Environment=ConfigAbcDirectory=%D ConfigAbcConfdir=%C ConfigAbcPwd=%P """ ) (d / "mkosi.conf.d/qed").mkdir() (d / "mkosi.conf.d/qed/mkosi.conf").write_text( """ [Build] Environment=ConfigQedDirectory=%D ConfigQedConfdir=%C ConfigQedPwd=%P """ ) (d / "mkosi.images").mkdir() (d / "mkosi.images/subimage.conf").write_text( """ [Build] Environment=Image=%I """ ) with chdir(d): _, [subimage, config] = parse_config() expected = { "Distribution": "ubuntu", "Release": "lunar", "Architecture": "arm64", "Image": "", "ImageId": "my-image-id", "ImageVersion": "1.2.3", "OutputDirectory": str(Path.cwd() / "abcde"), "Output": "test", "ConfigRootDirectory": os.fspath(d), "ConfigRootConfdir": os.fspath(d), "ConfigRootPwd": os.fspath(d), "ConfigAbcDirectory": os.fspath(d), "ConfigAbcConfdir": os.fspath(d / "mkosi.conf.d"), "ConfigAbcPwd": os.fspath(d), "ConfigQedDirectory": os.fspath(d), "ConfigQedConfdir": os.fspath(d / "mkosi.conf.d/qed"), "ConfigQedPwd": os.fspath(d / "mkosi.conf.d/qed"), "Filesystem": "ext4", } assert {k: v for k, v in config.environment.items() if k in expected} == expected assert subimage.environment["Image"] == "subimage" def test_kernel_specifiers(tmp_path: Path) -> None: kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 token = "MySystemImage" roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38" boot_count = "3" def test_expand_kernel_specifiers(text: str) -> str: return expand_kernel_specifiers( text, kver=kver, token=token, roothash=roothash, boot_count=boot_count, ) assert test_expand_kernel_specifiers("&&") == "&" assert test_expand_kernel_specifiers("&k") == kver assert test_expand_kernel_specifiers("&e") == token assert test_expand_kernel_specifiers("&h") == roothash assert test_expand_kernel_specifiers("&c") == boot_count assert test_expand_kernel_specifiers("Image_1.0.3") == "Image_1.0.3" assert ( test_expand_kernel_specifiers("Image~&c+&h-&k-&e") == f"Image~{boot_count}+{roothash}-{kver}-{token}" ) def test_output_id_version(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] ImageId=output ImageVersion=1.2.3 """ ) with chdir(d): _, [config] = parse_config() assert config.output == "output_1.2.3" def test_deterministic() -> None: assert Config.default() == Config.default() def test_environment(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """\ [Config] PassEnvironment=PassThisEnv [Build] Environment=TestValue2=300 TestValue3=400 PassThisEnv=abc EnvironmentFiles=other.env """ ) (d / "mkosi.env").write_text( """\ TestValue1=90 TestValue4=99 """ ) (d / "other.env").write_text( """\ TestValue1=100 TestValue2=200 """ ) (d / "mkosi.images").mkdir() (d / "mkosi.images/sub.conf").touch() with chdir(d): _, [sub, config] = parse_config() expected = { "TestValue1": "100", # from other.env "TestValue2": "300", # from mkosi.conf "TestValue3": "400", # from mkosi.conf "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well assert {k: config.environment[k] for k in expected.keys()} == expected assert config.environment_files == [Path.cwd() / "mkosi.env", Path.cwd() / "other.env"] assert sub.environment["PassThisEnv"] == "abc" assert "TestValue2" not in sub.environment def test_mkosi_version_executable(tmp_path: Path) -> None: d = tmp_path version = d / "mkosi.version" version.write_text("#!/bin/sh\necho '1.2.3'\n") with chdir(d): with pytest.raises(SystemExit) as error: _, [config] = parse_config() assert error.type is SystemExit assert error.value.code != 0 version.chmod(0o755) with chdir(d): _, [config] = parse_config() assert config.image_version == "1.2.3" def test_split_artifacts(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=uki """ ) with chdir(d): _, [config] = parse_config() assert config.split_artifacts == [ArtifactOutput.uki] (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=uki SplitArtifacts=kernel SplitArtifacts=initrd """ ) with chdir(d): _, [config] = parse_config() assert config.split_artifacts == [ ArtifactOutput.uki, ArtifactOutput.kernel, ArtifactOutput.initrd, ] def test_split_artifacts_compat(tmp_path: Path) -> None: d = tmp_path with chdir(d): _, [config] = parse_config() assert config.split_artifacts == ArtifactOutput.compat_no() (d / "mkosi.conf").write_text( """ [Output] SplitArtifacts=yes """ ) with chdir(d): _, [config] = parse_config() assert config.split_artifacts == ArtifactOutput.compat_yes() def test_cli_collection_reset(tmp_path: Path) -> None: d = tmp_path (d / "mkosi.conf").write_text( """ [Content] Packages=abc """ ) with chdir(d): _, [config] = parse_config(["--package", ""]) assert config.packages == [] _, [config] = parse_config(["--package", "", "--package", "foo"]) assert config.packages == ["foo"] _, [config] = parse_config(["--package", "foo", "--package", "", "--package", "bar"]) assert config.packages == ["bar"] _, [config] = parse_config(["--package", "foo", "--package", ""]) assert config.packages == [] mkosi-25.3/tests/test_extension.py000066400000000000000000000015611474711424400173670ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later from pathlib import Path import pytest from mkosi.config import OutputFormat from . import Image, ImageConfig pytestmark = pytest.mark.integration @pytest.mark.parametrize("format", [f for f in OutputFormat if f.is_extension_image()]) def test_extension(config: ImageConfig, format: OutputFormat) -> None: with Image(config) as image: image.build(["--clean-package-metadata=no", "--format=directory"]) with Image(image.config) as sysext: sysext.build( [ "--directory", "", "--incremental=no", "--base-tree", Path(image.output_dir) / "image", "--overlay", "--package=lsof", f"--format={format}", ] ) # fmt: skip mkosi-25.3/tests/test_initrd.py000066400000000000000000000202661474711424400166470ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import contextlib import os import subprocess import tempfile import textwrap from collections.abc import Iterator from pathlib import Path import pytest from mkosi.distributions import Distribution from mkosi.run import run from mkosi.sandbox import umask from mkosi.tree import copy_tree from mkosi.util import PathString from . import Image, ImageConfig pytestmark = pytest.mark.integration @contextlib.contextmanager def mount(what: PathString, where: PathString) -> Iterator[Path]: where = Path(where) if not where.exists(): with umask(~0o755): where.mkdir(parents=True) run(["mount", "--no-mtab", what, where]) try: yield where finally: run(["umount", "--no-mtab", where]) @pytest.fixture(scope="module") def passphrase() -> Iterator[Path]: # We can't use tmp_path fixture because pytest creates it in a nested directory we can't access using our # unprivileged user. # TODO: Use delete_on_close=False and close() instead of flush() when we require Python 3.12 or newer. with tempfile.NamedTemporaryFile(prefix="mkosi.passphrase", mode="w") as passphrase: passphrase.write("mkosi") passphrase.flush() st = Path.cwd().stat() os.fchown(passphrase.fileno(), st.st_uid, st.st_gid) os.fchmod(passphrase.fileno(), 0o600) yield Path(passphrase.name) def test_initrd(config: ImageConfig) -> None: with Image(config) as image: image.build(options=["--format=disk"]) image.vm() @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LVM test can only be executed as root") def test_initrd_lvm(config: ImageConfig) -> None: with Image(config) as image, contextlib.ExitStack() as stack: image.build(["--format=disk"]) lvm = Path(image.output_dir) / "lvm.raw" lvm.touch() os.truncate(lvm, 5000 * 1024**2) lodev = run( ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run(["lvm", "pvcreate", f"{lodev}p1"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", f"{lodev}p1"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) src = Path(stack.enter_context(tempfile.TemporaryDirectory())) run(["systemd-dissect", "--mount", "--mkdir", Path(image.output_dir) / "image.raw", src]) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", src])) dst = Path(stack.enter_context(tempfile.TemporaryDirectory())) stack.enter_context(mount(Path("/dev/vg_mkosi/lv0"), dst)) copy_tree(src, dst) stack.close() lvm.rename(Path(image.output_dir) / "image.raw") image.vm( [ "--firmware=linux", # LVM confuses systemd-repart so we mask it for this test. "--kernel-command-line-extra=systemd.mask=systemd-repart.service", "--kernel-command-line-extra=root=LABEL=root", ] ) def test_initrd_luks(config: ImageConfig, passphrase: Path) -> None: with tempfile.TemporaryDirectory() as repartd: st = Path.cwd().stat() os.chown(repartd, st.st_uid, st.st_gid) (Path(repartd) / "00-esp.conf").write_text( textwrap.dedent( """\ [Partition] Type=esp Format=vfat CopyFiles=/boot:/ CopyFiles=/efi:/ SizeMinBytes=1G SizeMaxBytes=1G """ ) ) (Path(repartd) / "05-bios.conf").write_text( textwrap.dedent( """\ [Partition] # UUID of the grub BIOS boot partition which grubs needs on GPT to # embed itself into. Type=21686148-6449-6e6f-744e-656564454649 SizeMinBytes=1M SizeMaxBytes=1M """ ) ) (Path(repartd) / "10-root.conf").write_text( textwrap.dedent( f"""\ [Partition] Type=root Format={config.distribution.filesystem()} Minimize=guess Encrypt=key-file CopyFiles=/ """ ) ) with Image(config) as image: image.build(["--repart-directory", repartd, "--passphrase", passphrase, "--format=disk"]) image.vm(["--credential=cryptsetup.passphrase=mkosi"]) @pytest.mark.skipif(os.getuid() != 0, reason="mkosi-initrd LUKS+LVM test can only be executed as root") def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: with Image(config) as image, contextlib.ExitStack() as stack: image.build(["--format=disk"]) lvm = Path(image.output_dir) / "lvm.raw" lvm.touch() os.truncate(lvm, 5000 * 1024**2) lodev = run( ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run( [ "cryptsetup", "--key-file", passphrase, "--use-random", "--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000", "luksFormat", f"{lodev}p1", ] ) # fmt: skip run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() run(["lvm", "pvcreate", "/dev/mapper/lvm_root"]) run(["lvm", "pvs"]) run(["lvm", "vgcreate", "vg_mkosi", "/dev/mapper/lvm_root"]) run(["lvm", "vgchange", "-ay", "vg_mkosi"]) run(["lvm", "vgs"]) stack.callback(lambda: run(["vgchange", "-an", "vg_mkosi"])) run(["lvm", "lvcreate", "-l", "100%FREE", "-n", "lv0", "vg_mkosi"]) run(["lvm", "lvs"]) run(["udevadm", "wait", "--timeout=30", "/dev/vg_mkosi/lv0"]) run([f"mkfs.{image.config.distribution.filesystem()}", "-L", "root", "/dev/vg_mkosi/lv0"]) src = Path(stack.enter_context(tempfile.TemporaryDirectory())) run(["systemd-dissect", "--mount", "--mkdir", Path(image.output_dir) / "image.raw", src]) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", src])) dst = Path(stack.enter_context(tempfile.TemporaryDirectory())) stack.enter_context(mount(Path("/dev/vg_mkosi/lv0"), dst)) copy_tree(src, dst) stack.close() lvm.rename(Path(image.output_dir) / "image.raw") image.vm( [ "--format=disk", "--credential=cryptsetup.passphrase=mkosi", "--firmware=linux", "--kernel-command-line-extra=root=LABEL=root", f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", ] ) def test_initrd_size(config: ImageConfig) -> None: with Image(config) as image: image.build() # The fallback value is for CentOS and related distributions. maxsize = 1024**2 * { Distribution.fedora: 67, Distribution.debian: 62, Distribution.ubuntu: 57, Distribution.arch: 86, Distribution.opensuse: 67, }.get(config.distribution, 58) assert (Path(image.output_dir) / "image.initrd").stat().st_size <= maxsize mkosi-25.3/tests/test_json.py000066400000000000000000000447461474711424400163400ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import os import textwrap import uuid from pathlib import Path from typing import Optional import pytest from mkosi.config import ( Architecture, Args, ArtifactOutput, BiosBootloader, Bootloader, BuildSourcesEphemeral, Cacheonly, CertificateSource, CertificateSourceType, Compression, Config, ConfigFeature, ConfigTree, ConsoleMode, DocFormat, Drive, Firmware, Incremental, KeySource, KeySourceType, ManifestFormat, Network, OutputFormat, SecureBootSignTool, ShimBootloader, UKIProfile, Verb, Vmm, VsockCID, ) from mkosi.distributions import Distribution from mkosi.versioncomp import GenericVersion @pytest.mark.parametrize("path", [None, "/baz/qux"]) def test_args(path: Optional[Path]) -> None: dump = textwrap.dedent( f"""\ {{ "AutoBump": false, "Cmdline": [ "foo", "bar" ], "Debug": false, "DebugSandbox": false, "DebugShell": false, "DebugWorkspace": false, "Directory": {f'"{os.fspath(path)}"' if path is not None else "null"}, "DocFormat": "auto", "Force": 9001, "GenkeyCommonName": "test", "GenkeyValidDays": "100", "Json": false, "Pager": true, "Verb": "build", "WipeBuildDir": true }} """ ) args = Args( auto_bump=False, cmdline=["foo", "bar"], debug=False, debug_sandbox=False, debug_shell=False, debug_workspace=False, directory=Path(path) if path is not None else None, doc_format=DocFormat.auto, force=9001, genkey_common_name="test", genkey_valid_days="100", json=False, pager=True, verb=Verb.build, wipe_build_dir=True, ) assert args.to_json(indent=4, sort_keys=True) == dump.rstrip() assert Args.from_json(dump) == args def test_config() -> None: dump = textwrap.dedent( """\ { "Architecture": "ia64", "Autologin": false, "BaseTrees": [ "/hello/world" ], "BiosBootloader": "none", "Bootable": "disabled", "Bootloader": "grub", "BuildDirectory": null, "BuildPackages": [ "pkg1", "pkg2" ], "BuildScripts": [ "/path/to/buildscript" ], "BuildSources": [ { "Source": "/qux", "Target": "/frob" } ], "BuildSourcesEphemeral": "yes", "CDROM": false, "CPUs": 2, "CacheDirectory": "/is/this/the/cachedir", "CacheOnly": "always", "Checksum": false, "CleanPackageMetadata": "auto", "CleanScripts": [ "/clean" ], "CompressLevel": 3, "CompressOutput": "bz2", "ConfigureScripts": [ "/configure" ], "Console": "gui", "Credentials": { "credkey": "credval" }, "Dependencies": [ "dep1" ], "Devicetree": "freescale/imx8mm-verdin-nonwifi-dev.dtb", "Distribution": "fedora", "Drives": [ { "Directory": "/foo/bar", "FileId": "red", "Id": "abc", "Options": "abc,qed", "Size": 200 }, { "Directory": null, "FileId": "wcd", "Id": "abc", "Options": "", "Size": 200 } ], "Environment": { "BAR": "BAR", "Qux": "Qux", "foo": "foo" }, "EnvironmentFiles": [], "Ephemeral": true, "ExtraSearchPaths": [], "ExtraTrees": [], "Files": [], "FinalizeScripts": [], "Firmware": "linux", "FirmwareVariables": "/foo/bar", "Format": "uki", "ForwardJournal": "/mkosi.journal", "History": true, "Hostname": null, "Image": "default", "ImageId": "myimage", "ImageVersion": "5", "Incremental": "no", "InitrdPackages": [ "clevis" ], "InitrdVolatilePackages": [ "abc" ], "Initrds": [ "/efi/initrd1", "/efi/initrd2" ], "KVM": "auto", "KernelCommandLine": [], "KernelCommandLineExtra": [ "look", "im", "on", "the", "kernel", "command", "line" ], "KernelModulesExclude": [ "nvidia" ], "KernelModulesInclude": [ "loop" ], "KernelModulesIncludeHost": true, "KernelModulesInitrd": true, "KernelModulesInitrdExclude": [], "KernelModulesInitrdInclude": [], "KernelModulesInitrdIncludeHost": true, "Key": null, "Keymap": "wow, so much keymap", "Linux": null, "LocalMirror": null, "Locale": "en_C.UTF-8", "LocaleMessages": "", "Machine": "machine", "MachineId": "b58253b0-cc92-4a34-8782-bcd99b20d07f", "MakeInitrd": false, "ManifestFormat": [ "json", "changelog" ], "MicrocodeHost": true, "MinimumVersion": "123", "Mirror": null, "NSpawnSettings": null, "OpenPGPTool": "gpg", "Output": "outfile", "OutputDirectory": "/your/output/here", "OutputMode": 83, "Overlay": true, "PackageCacheDirectory": "/a/b/c", "PackageDirectories": [], "Packages": [], "PassEnvironment": [ "abc" ], "Passphrase": null, "PostInstallationScripts": [ "/bar/qux" ], "PostOutputScripts": [ "/foo/src" ], "PrepareScripts": [ "/run/foo" ], "Profiles": [ "profile" ], "ProxyClientCertificate": "/my/client/cert", "ProxyClientKey": "/my/client/key", "ProxyExclude": [ "www.example.com" ], "ProxyPeerCertificate": "/my/peer/cert", "ProxyUrl": "https://my/proxy", "QemuArgs": [], "RAM": 123, "Register": "enabled", "Release": "53", "Removable": false, "RemoveFiles": [], "RemovePackages": [ "all" ], "RepartDirectories": [], "RepartOffline": true, "Repositories": [], "RepositoryKeyCheck": false, "RepositoryKeyFetch": true, "RootPassword": [ "test1234", false ], "RootShell": "/bin/tcsh", "RuntimeBuildSources": true, "RuntimeHome": true, "RuntimeNetwork": "interface", "RuntimeScratch": "enabled", "RuntimeSize": 8589934592, "RuntimeTrees": [ { "Source": "/foo/bar", "Target": "/baz" }, { "Source": "/bar/baz", "Target": "/qux" } ], "SELinuxRelabel": "disabled", "SandboxTrees": [ { "Source": "/foo/bar", "Target": null } ], "SectorSize": null, "SecureBoot": true, "SecureBootAutoEnroll": true, "SecureBootCertificate": null, "SecureBootCertificateSource": { "Source": "", "Type": "file" }, "SecureBootKey": "/path/to/keyfile", "SecureBootKeySource": { "Source": "", "Type": "file" }, "SecureBootSignTool": "systemd-sbsign", "Seed": "7496d7d8-7f08-4a2b-96c6-ec8c43791b60", "ShimBootloader": "none", "Sign": false, "SignExpectedPcr": "disabled", "SignExpectedPcrCertificate": "/my/cert", "SignExpectedPcrCertificateSource": { "Source": "", "Type": "file" }, "SignExpectedPcrKey": "/my/key", "SignExpectedPcrKeySource": { "Source": "", "Type": "file" }, "SkeletonTrees": [ { "Source": "/foo/bar", "Target": "/" }, { "Source": "/bar/baz", "Target": "/qux" } ], "SourceDateEpoch": 12345, "SplitArtifacts": [ "uki", "kernel" ], "Ssh": false, "SshCertificate": "/path/to/cert", "SshKey": null, "SyncScripts": [ "/sync" ], "SysupdateDirectory": "/sysupdate", "TPM": "auto", "Timezone": null, "ToolsTree": null, "ToolsTreeCertificates": true, "ToolsTreeDistribution": "fedora", "ToolsTreeMirror": null, "ToolsTreePackageDirectories": [ "/abc" ], "ToolsTreePackages": [], "ToolsTreeRelease": null, "ToolsTreeRepositories": [ "abc" ], "ToolsTreeSandboxTrees": [ { "Source": "/a/b/c", "Target": "/" } ], "UnifiedKernelImageFormat": "myuki", "UnifiedKernelImageProfiles": [ { "Cmdline": [ "key=value" ], "Profile": { "key": "value" } } ], "UnifiedKernelImages": "auto", "UnitProperties": [ "PROPERTY=VALUE" ], "UseSubvolumes": "auto", "VSock": "enabled", "VSockCID": -2, "Verity": "enabled", "VerityCertificate": "/path/to/cert", "VerityCertificateSource": { "Source": "", "Type": "file" }, "VerityKey": null, "VerityKeySource": { "Source": "", "Type": "file" }, "VirtualMachineMonitor": "qemu", "VolatilePackageDirectories": [ "def" ], "VolatilePackages": [ "abc" ], "WithDocs": true, "WithNetwork": false, "WithRecommends": true, "WithTests": true, "WorkspaceDirectory": "/cwd" } """ ) args = Config( architecture=Architecture.ia64, autologin=False, base_trees=[Path("/hello/world")], bios_bootloader=BiosBootloader.none, bootable=ConfigFeature.disabled, bootloader=Bootloader.grub, build_dir=None, build_packages=["pkg1", "pkg2"], build_scripts=[Path("/path/to/buildscript")], build_sources_ephemeral=BuildSourcesEphemeral.yes, build_sources=[ConfigTree(Path("/qux"), Path("/frob"))], cache_dir=Path("/is/this/the/cachedir"), cacheonly=Cacheonly.always, cdrom=False, checksum=False, clean_package_metadata=ConfigFeature.auto, clean_scripts=[Path("/clean")], compress_level=3, compress_output=Compression.bz2, configure_scripts=[Path("/configure")], console=ConsoleMode.gui, cpus=2, credentials={"credkey": "credval"}, dependencies=["dep1"], distribution=Distribution.fedora, drives=[Drive("abc", 200, Path("/foo/bar"), "abc,qed", "red"), Drive("abc", 200, None, "", "wcd")], environment_files=[], environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"}, ephemeral=True, extra_search_paths=[], extra_trees=[], files=[], finalize_scripts=[], firmware_variables=Path("/foo/bar"), firmware=Firmware.linux, forward_journal=Path("/mkosi.journal"), history=True, hostname=None, image_id="myimage", image_version="5", image="default", incremental=Incremental.no, initrd_packages=["clevis"], initrd_volatile_packages=["abc"], initrds=[Path("/efi/initrd1"), Path("/efi/initrd2")], kernel_command_line_extra=["look", "im", "on", "the", "kernel", "command", "line"], kernel_command_line=[], kernel_modules_exclude=["nvidia"], kernel_modules_include_host=True, kernel_modules_include=["loop"], kernel_modules_initrd_exclude=[], kernel_modules_initrd_include_host=True, kernel_modules_initrd_include=[], kernel_modules_initrd=True, key=None, keymap="wow, so much keymap", kvm=ConfigFeature.auto, linux=None, local_mirror=None, locale_messages="", locale="en_C.UTF-8", machine_id=uuid.UUID("b58253b0cc924a348782bcd99b20d07f"), machine="machine", make_initrd=False, manifest_format=[ManifestFormat.json, ManifestFormat.changelog], microcode_host=True, devicetree=Path("freescale/imx8mm-verdin-nonwifi-dev.dtb"), minimum_version=GenericVersion("123"), mirror=None, nspawn_settings=None, openpgp_tool="gpg", output_dir=Path("/your/output/here"), output_format=OutputFormat.uki, output_mode=0o123, output="outfile", overlay=True, package_cache_dir=Path("/a/b/c"), package_directories=[], packages=[], pass_environment=["abc"], passphrase=None, postinst_scripts=[Path("/bar/qux")], postoutput_scripts=[Path("/foo/src")], prepare_scripts=[Path("/run/foo")], profiles=["profile"], proxy_client_certificate=Path("/my/client/cert"), proxy_client_key=Path("/my/client/key"), proxy_exclude=["www.example.com"], proxy_peer_certificate=Path("/my/peer/cert"), proxy_url="https://my/proxy", qemu_args=[], ram=123, register=ConfigFeature.enabled, release="53", removable=False, remove_files=[], remove_packages=["all"], repart_dirs=[], repart_offline=True, repositories=[], repository_key_check=False, repository_key_fetch=True, root_password=("test1234", False), root_shell="/bin/tcsh", runtime_build_sources=True, runtime_home=True, runtime_network=Network.interface, runtime_scratch=ConfigFeature.enabled, runtime_size=8589934592, runtime_trees=[ ConfigTree(Path("/foo/bar"), Path("/baz")), ConfigTree(Path("/bar/baz"), Path("/qux")), ], sandbox_trees=[ConfigTree(Path("/foo/bar"), None)], sector_size=None, secure_boot_auto_enroll=True, secure_boot_certificate_source=CertificateSource(type=CertificateSourceType.file), secure_boot_certificate=None, secure_boot_key_source=KeySource(type=KeySourceType.file), secure_boot_key=Path("/path/to/keyfile"), secure_boot_sign_tool=SecureBootSignTool.systemd_sbsign, secure_boot=True, seed=uuid.UUID("7496d7d8-7f08-4a2b-96c6-ec8c43791b60"), selinux_relabel=ConfigFeature.disabled, shim_bootloader=ShimBootloader.none, sign_expected_pcr_certificate_source=CertificateSource(type=CertificateSourceType.file), sign_expected_pcr_certificate=Path("/my/cert"), sign_expected_pcr_key_source=KeySource(type=KeySourceType.file), sign_expected_pcr_key=Path("/my/key"), sign_expected_pcr=ConfigFeature.disabled, sign=False, skeleton_trees=[ConfigTree(Path("/foo/bar"), Path("/")), ConfigTree(Path("/bar/baz"), Path("/qux"))], source_date_epoch=12345, split_artifacts=[ArtifactOutput.uki, ArtifactOutput.kernel], ssh_certificate=Path("/path/to/cert"), ssh_key=None, ssh=False, sync_scripts=[Path("/sync")], sysupdate_dir=Path("/sysupdate"), timezone=None, tools_tree_certificates=True, tools_tree_distribution=Distribution.fedora, tools_tree_mirror=None, tools_tree_package_directories=[Path("/abc")], tools_tree_packages=[], tools_tree_release=None, tools_tree_repositories=["abc"], tools_tree_sandbox_trees=[ConfigTree(Path("/a/b/c"), Path("/"))], tools_tree=None, tpm=ConfigFeature.auto, unified_kernel_image_format="myuki", unified_kernel_image_profiles=[UKIProfile(profile={"key": "value"}, cmdline=["key=value"])], unified_kernel_images=ConfigFeature.auto, unit_properties=["PROPERTY=VALUE"], use_subvolumes=ConfigFeature.auto, verity_certificate_source=CertificateSource(type=CertificateSourceType.file), verity_certificate=Path("/path/to/cert"), verity_key_source=KeySource(type=KeySourceType.file), verity_key=None, verity=ConfigFeature.enabled, vmm=Vmm.qemu, volatile_package_directories=[Path("def")], volatile_packages=["abc"], vsock_cid=VsockCID.hash, vsock=ConfigFeature.enabled, with_docs=True, with_network=False, with_recommends=True, with_tests=True, workspace_dir=Path("/cwd"), ) assert args.to_json() == dump.rstrip() assert Config.from_json(dump) == args mkosi-25.3/tests/test_signing.py000066400000000000000000000045531474711424400170150ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import tempfile from pathlib import Path import pytest from mkosi.run import find_binary, run from . import Image, ImageConfig pytestmark = pytest.mark.integration def test_signing_checksums_with_sop(config: ImageConfig) -> None: if find_binary("sqop") is None: pytest.skip("Need 'sqop' binary to perform sop tests.") with tempfile.TemporaryDirectory() as path, Image(config) as image: tmp_path = Path(path) signing_key = tmp_path / "signing-key.pgp" signing_cert = tmp_path / "signing-cert.pgp" # create a brand new signing key with open(signing_key, "wb") as o: run(cmdline=["sqop", "generate-key", "--signing-only", "Test"], stdout=o) # extract public key (certificate) with open(signing_key, "rb") as i, open(signing_cert, "wb") as o: run(cmdline=["sqop", "extract-cert"], stdin=i, stdout=o) image.build( options=["--checksum=true", "--openpgp-tool=sqop", "--sign=true", f"--key={signing_key}"] ) signed_file = image.output_dir / "image.SHA256SUMS" signature = image.output_dir / "image.SHA256SUMS.gpg" with open(signed_file, "rb") as i: run(cmdline=["sqop", "verify", signature, signing_cert], stdin=i) def test_signing_checksums_with_gpg(config: ImageConfig) -> None: with tempfile.TemporaryDirectory() as path, Image(config) as image: tmp_path = Path(path) signing_key = "mkosi-test@example.org" signing_cert = tmp_path / "signing-cert.pgp" gnupghome = tmp_path / ".gnupg" gnupghome.mkdir() env = dict(GNUPGHOME=str(gnupghome)) # create a brand new signing key run( cmdline=["gpg", "--quick-gen-key", "--batch", "--passphrase", "", signing_key], env=env, ) # export public key (certificate) with open(signing_cert, "wb") as o: run( cmdline=["gpg", "--export", signing_key], env=env, stdout=o, ) image.build(options=["--checksum=true", "--sign=true", f"--key={signing_key}"], env=env) signed_file = image.output_dir / "image.SHA256SUMS" signature = image.output_dir / "image.SHA256SUMS.gpg" run(cmdline=["gpg", "--verify", signature, signed_file], env=env) mkosi-25.3/tests/test_versioncomp.py000066400000000000000000000205571474711424400177250ustar00rootroot00000000000000# SPDX-License-Identifier: LGPL-2.1-or-later import itertools import pytest from mkosi.versioncomp import GenericVersion def test_conversion() -> None: assert GenericVersion("1") < 2 assert GenericVersion("1") < "2" assert GenericVersion("2") > 1 assert GenericVersion("2") > "1" assert GenericVersion("1") == "1" def test_generic_version_systemd() -> None: """Same as the first block of systemd/test/test-compare-versions.sh""" assert GenericVersion("1") < GenericVersion("2") assert GenericVersion("1") <= GenericVersion("2") assert GenericVersion("1") != GenericVersion("2") assert not (GenericVersion("1") > GenericVersion("2")) assert not (GenericVersion("1") == GenericVersion("2")) assert not (GenericVersion("1") >= GenericVersion("2")) assert GenericVersion.compare_versions("1", "2") == -1 assert GenericVersion.compare_versions("2", "2") == 0 assert GenericVersion.compare_versions("2", "1") == 1 def test_generic_version_spec() -> None: """Examples from the uapi group version format spec""" assert GenericVersion("11") == GenericVersion("11") assert GenericVersion("systemd-123") == GenericVersion("systemd-123") assert GenericVersion("bar-123") < GenericVersion("foo-123") assert GenericVersion("123a") > GenericVersion("123") assert GenericVersion("123.a") > GenericVersion("123") assert GenericVersion("123.a") < GenericVersion("123.b") assert GenericVersion("123a") > GenericVersion("123.a") assert GenericVersion("11α") == GenericVersion("11β") assert GenericVersion("A") < GenericVersion("a") assert GenericVersion("") < GenericVersion("0") assert GenericVersion("0.") > GenericVersion("0") assert GenericVersion("0.0") > GenericVersion("0") assert GenericVersion("0") > GenericVersion("~") assert GenericVersion("") > GenericVersion("~") assert GenericVersion("1_") == GenericVersion("1") assert GenericVersion("_1") == GenericVersion("1") assert GenericVersion("1_") < GenericVersion("1.2") assert GenericVersion("1_2_3") > GenericVersion("1.3.3") assert GenericVersion("1+") == GenericVersion("1") assert GenericVersion("+1") == GenericVersion("1") assert GenericVersion("1+") < GenericVersion("1.2") assert GenericVersion("1+2+3") > GenericVersion("1.3.3") @pytest.mark.parametrize( "s1,s2", itertools.combinations_with_replacement( enumerate( [ GenericVersion("122.1"), GenericVersion("123~rc1-1"), GenericVersion("123"), GenericVersion("123-a"), GenericVersion("123-a.1"), GenericVersion("123-1"), GenericVersion("123-1.1"), GenericVersion("123^post1"), GenericVersion("123.a-1"), GenericVersion("123.1-1"), GenericVersion("123a-1"), GenericVersion("124-1"), ], ), 2, ), ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], s2: tuple[int, GenericVersion], ) -> None: """Example from the doc string of strverscmp_improved. strverscmp_improved can be found in systemd/src/fundamental/string-util-fundamental.c """ i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) def RPMVERCMP(a: str, b: str, expected: int) -> None: assert (GenericVersion(a) > GenericVersion(b)) - (GenericVersion(a) < GenericVersion(b)) == expected def test_generic_version_rpmvercmp() -> None: # Tests copied from rpm's rpmio test suite, under the LGPL license: # https://github.com/rpm-software-management/rpm/blob/master/tests/rpmvercmp.at. # The original form is retained as much as possible for easy comparisons and updates. RPMVERCMP("1.0", "1.0", 0) RPMVERCMP("1.0", "2.0", -1) RPMVERCMP("2.0", "1.0", 1) RPMVERCMP("2.0.1", "2.0.1", 0) RPMVERCMP("2.0", "2.0.1", -1) RPMVERCMP("2.0.1", "2.0", 1) RPMVERCMP("2.0.1a", "2.0.1a", 0) RPMVERCMP("2.0.1a", "2.0.1", 1) RPMVERCMP("2.0.1", "2.0.1a", -1) RPMVERCMP("5.5p1", "5.5p1", 0) RPMVERCMP("5.5p1", "5.5p2", -1) RPMVERCMP("5.5p2", "5.5p1", 1) RPMVERCMP("5.5p10", "5.5p10", 0) RPMVERCMP("5.5p1", "5.5p10", -1) RPMVERCMP("5.5p10", "5.5p1", 1) RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", 0) RPMVERCMP("xyz10", "xyz10.1", -1) RPMVERCMP("xyz10.1", "xyz10", 1) RPMVERCMP("xyz.4", "xyz.4", 0) RPMVERCMP("xyz.4", "8", -1) RPMVERCMP("8", "xyz.4", 1) RPMVERCMP("xyz.4", "2", -1) RPMVERCMP("2", "xyz.4", 1) RPMVERCMP("5.5p2", "5.6p1", -1) RPMVERCMP("5.6p1", "5.5p2", 1) RPMVERCMP("5.6p1", "6.5p1", -1) RPMVERCMP("6.5p1", "5.6p1", 1) RPMVERCMP("6.0.rc1", "6.0", 1) RPMVERCMP("6.0", "6.0.rc1", -1) RPMVERCMP("10b2", "10a1", 1) RPMVERCMP("10a2", "10b2", -1) RPMVERCMP("1.0aa", "1.0aa", 0) RPMVERCMP("1.0a", "1.0aa", -1) RPMVERCMP("1.0aa", "1.0a", 1) RPMVERCMP("10.0001", "10.0001", 0) RPMVERCMP("10.0001", "10.1", 0) RPMVERCMP("10.1", "10.0001", 0) RPMVERCMP("10.0001", "10.0039", -1) RPMVERCMP("10.0039", "10.0001", 1) RPMVERCMP("4.999.9", "5.0", -1) RPMVERCMP("5.0", "4.999.9", 1) RPMVERCMP("20101121", "20101121", 0) RPMVERCMP("20101121", "20101122", -1) RPMVERCMP("20101122", "20101121", 1) RPMVERCMP("2_0", "2_0", 0) RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", 0) RPMVERCMP("a+", "a+", 0) RPMVERCMP("a+", "a_", 0) RPMVERCMP("a_", "a+", 0) RPMVERCMP("+a", "+a", 0) RPMVERCMP("+a", "_a", 0) RPMVERCMP("_a", "+a", 0) RPMVERCMP("+_", "+_", 0) RPMVERCMP("_+", "+_", 0) RPMVERCMP("_+", "_+", 0) RPMVERCMP("+", "_", 0) RPMVERCMP("_", "+", 0) # Basic testcases for tilde sorting RPMVERCMP("1.0~rc1", "1.0~rc1", 0) RPMVERCMP("1.0~rc1", "1.0", -1) RPMVERCMP("1.0", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc2", -1) RPMVERCMP("1.0~rc2", "1.0~rc1", 1) RPMVERCMP("1.0~rc1~git123", "1.0~rc1~git123", 0) RPMVERCMP("1.0~rc1~git123", "1.0~rc1", -1) RPMVERCMP("1.0~rc1", "1.0~rc1~git123", 1) # Basic testcases for caret sorting RPMVERCMP("1.0^", "1.0^", 0) RPMVERCMP("1.0^", "1.0", 1) RPMVERCMP("1.0", "1.0^", -1) RPMVERCMP("1.0^git1", "1.0^git1", 0) RPMVERCMP("1.0^git1", "1.0", 1) RPMVERCMP("1.0", "1.0^git1", -1) RPMVERCMP("1.0^git1", "1.0^git2", -1) RPMVERCMP("1.0^git2", "1.0^git1", 1) RPMVERCMP("1.0^git1", "1.01", -1) RPMVERCMP("1.01", "1.0^git1", 1) RPMVERCMP("1.0^20160101", "1.0^20160101", 0) RPMVERCMP("1.0^20160101", "1.0.1", -1) RPMVERCMP("1.0.1", "1.0^20160101", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160101^git1", 0) RPMVERCMP("1.0^20160102", "1.0^20160101^git1", 1) RPMVERCMP("1.0^20160101^git1", "1.0^20160102", -1) # Basic testcases for tilde and caret sorting */ RPMVERCMP("1.0~rc1^git1", "1.0~rc1^git1", 0) RPMVERCMP("1.0~rc1^git1", "1.0~rc1", 1) RPMVERCMP("1.0~rc1", "1.0~rc1^git1", -1) RPMVERCMP("1.0^git1~pre", "1.0^git1~pre", 0) RPMVERCMP("1.0^git1", "1.0^git1~pre", 1) RPMVERCMP("1.0^git1~pre", "1.0^git1", -1) # These are included here to document current, arguably buggy behaviors # for reference purposes and for easy checking against unintended # behavior changes. */ print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", 0) RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", -1) RPMVERCMP("1g.fc17", "1g.fc17", 0) RPMVERCMP("1g.fc17", "1.fc17", 1) RPMVERCMP("1.fc17", "1g.fc17", -1) # Non-ascii characters are considered equal so these are all the same, eh… */ RPMVERCMP("1.1.α", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.β", 0) RPMVERCMP("1.1.β", "1.1.α", 0) RPMVERCMP("1.1.αα", "1.1.α", 0) RPMVERCMP("1.1.α", "1.1.ββ", 0) RPMVERCMP("1.1.ββ", "1.1.αα", 0) mkosi-25.3/tools/000077500000000000000000000000001474711424400137355ustar00rootroot00000000000000mkosi-25.3/tools/do-a-release.sh000077500000000000000000000013131474711424400165300ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later if [ -z "$1" ] ; then echo "Version number not specified." exit 1 fi VERSION="$1" if ! git diff-index --quiet HEAD; then echo "Repo has modified files." exit 1 fi sed -r -i "s/^version = \".*\"$/version = \"$VERSION\"/" pyproject.toml sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/sandbox.py git add -p pyproject.toml mkosi git commit -m "Release $VERSION" git tag -s "v$VERSION" -m "mkosi $VERSION" VERSION_MAJOR=${VERSION%%.*} VERSION="$((VERSION_MAJOR + 1))~devel" sed -r -i "s/^__version__ = \".*\"$/__version__ = \"$VERSION\"/" mkosi/sandbox.py git add -p mkosi git commit -m "Bump version to $VERSION" mkosi-25.3/tools/generate-zipapp.sh000077500000000000000000000004261474711424400173710ustar00rootroot00000000000000#!/bin/bash BUILDDIR=$(mktemp -d -q) cleanup() { rm -rf "$BUILDDIR" } trap cleanup EXIT mkdir -p builddir cp -r mkosi "${BUILDDIR}/" python3 -m zipapp \ -p "/usr/bin/env python3" \ -o builddir/mkosi \ -m mkosi.__main__:main \ "$BUILDDIR" mkosi-25.3/tools/make-man-page.sh000077500000000000000000000007771474711424400167070ustar00rootroot00000000000000#!/bin/sh # SPDX-License-Identifier: LGPL-2.1-or-later set -ex pandoc -t man -s -o mkosi/resources/man/mkosi.1 mkosi/resources/man/mkosi.1.md pandoc -t man -s -o mkosi/resources/man/mkosi-addon.1 mkosi/resources/man/mkosi-addon.1.md pandoc -t man -s -o mkosi/resources/man/mkosi-initrd.1 mkosi/resources/man/mkosi-initrd.1.md pandoc -t man -s -o mkosi/resources/man/mkosi-sandbox.1 mkosi/resources/man/mkosi-sandbox.1.md pandoc -t man -s -o mkosi/resources/man/mkosi.news.7 mkosi/resources/man/mkosi.news.7.md